aboutsummaryrefslogtreecommitdiff
path: root/vendor/memchr/src/arch/generic
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/memchr/src/arch/generic')
-rw-r--r--vendor/memchr/src/arch/generic/memchr.rs1214
-rw-r--r--vendor/memchr/src/arch/generic/mod.rs14
-rw-r--r--vendor/memchr/src/arch/generic/packedpair.rs317
3 files changed, 0 insertions, 1545 deletions
diff --git a/vendor/memchr/src/arch/generic/memchr.rs b/vendor/memchr/src/arch/generic/memchr.rs
deleted file mode 100644
index 580b3cc..0000000
--- a/vendor/memchr/src/arch/generic/memchr.rs
+++ /dev/null
@@ -1,1214 +0,0 @@
-/*!
-Generic crate-internal routines for the `memchr` family of functions.
-*/
-
-// What follows is a vector algorithm generic over the specific vector
-// type to detect the position of one, two or three needles in a haystack.
-// From what I know, this is a "classic" algorithm, although I don't
-// believe it has been published in any peer reviewed journal. I believe
-// it can be found in places like glibc and Go's standard library. It
-// appears to be well known and is elaborated on in more detail here:
-// https://gms.tf/stdfind-and-memchr-optimizations.html
-//
-// While the routine below is fairly long and perhaps intimidating, the basic
-// idea is actually very simple and can be expressed straight-forwardly in
-// pseudo code. The psuedo code below is written for 128 bit vectors, but the
-// actual code below works for anything that implements the Vector trait.
-//
-// needle = (n1 << 15) | (n1 << 14) | ... | (n1 << 1) | n1
-// // Note: shift amount is in bytes
-//
-// while i <= haystack.len() - 16:
-// // A 16 byte vector. Each byte in chunk corresponds to a byte in
-// // the haystack.
-// chunk = haystack[i:i+16]
-// // Compare bytes in needle with bytes in chunk. The result is a 16
-// // byte chunk where each byte is 0xFF if the corresponding bytes
-// // in needle and chunk were equal, or 0x00 otherwise.
-// eqs = cmpeq(needle, chunk)
-// // Return a 32 bit integer where the most significant 16 bits
-// // are always 0 and the lower 16 bits correspond to whether the
-// // most significant bit in the correspond byte in `eqs` is set.
-// // In other words, `mask as u16` has bit i set if and only if
-// // needle[i] == chunk[i].
-// mask = movemask(eqs)
-//
-// // Mask is 0 if there is no match, and non-zero otherwise.
-// if mask != 0:
-// // trailing_zeros tells us the position of the least significant
-// // bit that is set.
-// return i + trailing_zeros(mask)
-//
-// // haystack length may not be a multiple of 16, so search the rest.
-// while i < haystack.len():
-// if haystack[i] == n1:
-// return i
-//
-// // No match found.
-// return NULL
-//
-// In fact, we could loosely translate the above code to Rust line-for-line
-// and it would be a pretty fast algorithm. But, we pull out all the stops
-// to go as fast as possible:
-//
-// 1. We use aligned loads. That is, we do some finagling to make sure our
-// primary loop not only proceeds in increments of 16 bytes, but that
-// the address of haystack's pointer that we dereference is aligned to
-// 16 bytes. 16 is a magic number here because it is the size of SSE2
-// 128-bit vector. (For the AVX2 algorithm, 32 is the magic number.)
-// Therefore, to get aligned loads, our pointer's address must be evenly
-// divisible by 16.
-// 2. Our primary loop proceeds 64 bytes at a time instead of 16. It's
-// kind of like loop unrolling, but we combine the equality comparisons
-// using a vector OR such that we only need to extract a single mask to
-// determine whether a match exists or not. If so, then we do some
-// book-keeping to determine the precise location but otherwise mush on.
-// 3. We use our "chunk" comparison routine in as many places as possible,
-// even if it means using unaligned loads. In particular, if haystack
-// starts with an unaligned address, then we do an unaligned load to
-// search the first 16 bytes. We then start our primary loop at the
-// smallest subsequent aligned address, which will actually overlap with
-// previously searched bytes. But we're OK with that. We do a similar
-// dance at the end of our primary loop. Finally, to avoid a
-// byte-at-a-time loop at the end, we do a final 16 byte unaligned load
-// that may overlap with a previous load. This is OK because it converts
-// a loop into a small number of very fast vector instructions. The overlap
-// is OK because we know the place where the overlap occurs does not
-// contain a match.
-//
-// And that's pretty all there is to it. Note that since the below is
-// generic and since it's meant to be inlined into routines with a
-// `#[target_feature(enable = "...")]` annotation, we must mark all routines as
-// both unsafe and `#[inline(always)]`.
-//
-// The fact that the code below is generic does somewhat inhibit us. For
-// example, I've noticed that introducing an unlineable `#[cold]` function to
-// handle the match case in the loop generates tighter assembly, but there is
-// no way to do this in the generic code below because the generic code doesn't
-// know what `target_feature` annotation to apply to the unlineable function.
-// We could make such functions part of the `Vector` trait, but we instead live
-// with the slightly sub-optimal codegen for now since it doesn't seem to have
-// a noticeable perf difference.
-
-use crate::{
- ext::Pointer,
- vector::{MoveMask, Vector},
-};
-
-/// Finds all occurrences of a single byte in a haystack.
-#[derive(Clone, Copy, Debug)]
-pub(crate) struct One<V> {
- s1: u8,
- v1: V,
-}
-
-impl<V: Vector> One<V> {
- /// The number of bytes we examine per each iteration of our search loop.
- const LOOP_SIZE: usize = 4 * V::BYTES;
-
- /// Create a new searcher that finds occurrences of the byte given.
- #[inline(always)]
- pub(crate) unsafe fn new(needle: u8) -> One<V> {
- One { s1: needle, v1: V::splat(needle) }
- }
-
- /// Returns the needle given to `One::new`.
- #[inline(always)]
- pub(crate) fn needle1(&self) -> u8 {
- self.s1
- }
-
- /// Return a pointer to the first occurrence of the needle in the given
- /// haystack. If no such occurrence exists, then `None` is returned.
- ///
- /// When a match is found, the pointer returned is guaranteed to be
- /// `>= start` and `< end`.
- ///
- /// # Safety
- ///
- /// * It must be the case that `start < end` and that the distance between
- /// them is at least equal to `V::BYTES`. That is, it must always be valid
- /// to do at least an unaligned load of `V` at `start`.
- /// * Both `start` and `end` must be valid for reads.
- /// * Both `start` and `end` must point to an initialized value.
- /// * Both `start` and `end` must point to the same allocated object and
- /// must either be in bounds or at most one byte past the end of the
- /// allocated object.
- /// * Both `start` and `end` must be _derived from_ a pointer to the same
- /// object.
- /// * The distance between `start` and `end` must not overflow `isize`.
- /// * The distance being in bounds must not rely on "wrapping around" the
- /// address space.
- #[inline(always)]
- pub(crate) unsafe fn find_raw(
- &self,
- start: *const u8,
- end: *const u8,
- ) -> Option<*const u8> {
- // If we want to support vectors bigger than 256 bits, we probably
- // need to move up to using a u64 for the masks used below. Currently
- // they are 32 bits, which means we're SOL for vectors that need masks
- // bigger than 32 bits. Overall unclear until there's a use case.
- debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes");
-
- let topos = V::Mask::first_offset;
- let len = end.distance(start);
- debug_assert!(
- len >= V::BYTES,
- "haystack has length {}, but must be at least {}",
- len,
- V::BYTES
- );
-
- // Search a possibly unaligned chunk at `start`. This covers any part
- // of the haystack prior to where aligned loads can start.
- if let Some(cur) = self.search_chunk(start, topos) {
- return Some(cur);
- }
- // Set `cur` to the first V-aligned pointer greater than `start`.
- let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN));
- debug_assert!(cur > start && end.sub(V::BYTES) >= start);
- if len >= Self::LOOP_SIZE {
- while cur <= end.sub(Self::LOOP_SIZE) {
- debug_assert_eq!(0, cur.as_usize() % V::BYTES);
-
- let a = V::load_aligned(cur);
- let b = V::load_aligned(cur.add(1 * V::BYTES));
- let c = V::load_aligned(cur.add(2 * V::BYTES));
- let d = V::load_aligned(cur.add(3 * V::BYTES));
- let eqa = self.v1.cmpeq(a);
- let eqb = self.v1.cmpeq(b);
- let eqc = self.v1.cmpeq(c);
- let eqd = self.v1.cmpeq(d);
- let or1 = eqa.or(eqb);
- let or2 = eqc.or(eqd);
- let or3 = or1.or(or2);
- if or3.movemask_will_have_non_zero() {
- let mask = eqa.movemask();
- if mask.has_non_zero() {
- return Some(cur.add(topos(mask)));
- }
-
- let mask = eqb.movemask();
- if mask.has_non_zero() {
- return Some(cur.add(1 * V::BYTES).add(topos(mask)));
- }
-
- let mask = eqc.movemask();
- if mask.has_non_zero() {
- return Some(cur.add(2 * V::BYTES).add(topos(mask)));
- }
-
- let mask = eqd.movemask();
- debug_assert!(mask.has_non_zero());
- return Some(cur.add(3 * V::BYTES).add(topos(mask)));
- }
- cur = cur.add(Self::LOOP_SIZE);
- }
- }
- // Handle any leftovers after the aligned loop above. We use unaligned
- // loads here, but I believe we are guaranteed that they are aligned
- // since `cur` is aligned.
- while cur <= end.sub(V::BYTES) {
- debug_assert!(end.distance(cur) >= V::BYTES);
- if let Some(cur) = self.search_chunk(cur, topos) {
- return Some(cur);
- }
- cur = cur.add(V::BYTES);
- }
- // Finally handle any remaining bytes less than the size of V. In this
- // case, our pointer may indeed be unaligned and the load may overlap
- // with the previous one. But that's okay since we know the previous
- // load didn't lead to a match (otherwise we wouldn't be here).
- if cur < end {
- debug_assert!(end.distance(cur) < V::BYTES);
- cur = cur.sub(V::BYTES - end.distance(cur));
- debug_assert_eq!(end.distance(cur), V::BYTES);
- return self.search_chunk(cur, topos);
- }
- None
- }
-
- /// Return a pointer to the last occurrence of the needle in the given
- /// haystack. If no such occurrence exists, then `None` is returned.
- ///
- /// When a match is found, the pointer returned is guaranteed to be
- /// `>= start` and `< end`.
- ///
- /// # Safety
- ///
- /// * It must be the case that `start < end` and that the distance between
- /// them is at least equal to `V::BYTES`. That is, it must always be valid
- /// to do at least an unaligned load of `V` at `start`.
- /// * Both `start` and `end` must be valid for reads.
- /// * Both `start` and `end` must point to an initialized value.
- /// * Both `start` and `end` must point to the same allocated object and
- /// must either be in bounds or at most one byte past the end of the
- /// allocated object.
- /// * Both `start` and `end` must be _derived from_ a pointer to the same
- /// object.
- /// * The distance between `start` and `end` must not overflow `isize`.
- /// * The distance being in bounds must not rely on "wrapping around" the
- /// address space.
- #[inline(always)]
- pub(crate) unsafe fn rfind_raw(
- &self,
- start: *const u8,
- end: *const u8,
- ) -> Option<*const u8> {
- // If we want to support vectors bigger than 256 bits, we probably
- // need to move up to using a u64 for the masks used below. Currently
- // they are 32 bits, which means we're SOL for vectors that need masks
- // bigger than 32 bits. Overall unclear until there's a use case.
- debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes");
-
- let topos = V::Mask::last_offset;
- let len = end.distance(start);
- debug_assert!(
- len >= V::BYTES,
- "haystack has length {}, but must be at least {}",
- len,
- V::BYTES
- );
-
- if let Some(cur) = self.search_chunk(end.sub(V::BYTES), topos) {
- return Some(cur);
- }
- let mut cur = end.sub(end.as_usize() & V::ALIGN);
- debug_assert!(start <= cur && cur <= end);
- if len >= Self::LOOP_SIZE {
- while cur >= start.add(Self::LOOP_SIZE) {
- debug_assert_eq!(0, cur.as_usize() % V::BYTES);
-
- cur = cur.sub(Self::LOOP_SIZE);
- let a = V::load_aligned(cur);
- let b = V::load_aligned(cur.add(1 * V::BYTES));
- let c = V::load_aligned(cur.add(2 * V::BYTES));
- let d = V::load_aligned(cur.add(3 * V::BYTES));
- let eqa = self.v1.cmpeq(a);
- let eqb = self.v1.cmpeq(b);
- let eqc = self.v1.cmpeq(c);
- let eqd = self.v1.cmpeq(d);
- let or1 = eqa.or(eqb);
- let or2 = eqc.or(eqd);
- let or3 = or1.or(or2);
- if or3.movemask_will_have_non_zero() {
- let mask = eqd.movemask();
- if mask.has_non_zero() {
- return Some(cur.add(3 * V::BYTES).add(topos(mask)));
- }
-
- let mask = eqc.movemask();
- if mask.has_non_zero() {
- return Some(cur.add(2 * V::BYTES).add(topos(mask)));
- }
-
- let mask = eqb.movemask();
- if mask.has_non_zero() {
- return Some(cur.add(1 * V::BYTES).add(topos(mask)));
- }
-
- let mask = eqa.movemask();
- debug_assert!(mask.has_non_zero());
- return Some(cur.add(topos(mask)));
- }
- }
- }
- while cur >= start.add(V::BYTES) {
- debug_assert!(cur.distance(start) >= V::BYTES);
- cur = cur.sub(V::BYTES);
- if let Some(cur) = self.search_chunk(cur, topos) {
- return Some(cur);
- }
- }
- if cur > start {
- debug_assert!(cur.distance(start) < V::BYTES);
- return self.search_chunk(start, topos);
- }
- None
- }
-
- /// Return a count of all matching bytes in the given haystack.
- ///
- /// # Safety
- ///
- /// * It must be the case that `start < end` and that the distance between
- /// them is at least equal to `V::BYTES`. That is, it must always be valid
- /// to do at least an unaligned load of `V` at `start`.
- /// * Both `start` and `end` must be valid for reads.
- /// * Both `start` and `end` must point to an initialized value.
- /// * Both `start` and `end` must point to the same allocated object and
- /// must either be in bounds or at most one byte past the end of the
- /// allocated object.
- /// * Both `start` and `end` must be _derived from_ a pointer to the same
- /// object.
- /// * The distance between `start` and `end` must not overflow `isize`.
- /// * The distance being in bounds must not rely on "wrapping around" the
- /// address space.
- #[inline(always)]
- pub(crate) unsafe fn count_raw(
- &self,
- start: *const u8,
- end: *const u8,
- ) -> usize {
- debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes");
-
- let confirm = |b| b == self.needle1();
- let len = end.distance(start);
- debug_assert!(
- len >= V::BYTES,
- "haystack has length {}, but must be at least {}",
- len,
- V::BYTES
- );
-
- // Set `cur` to the first V-aligned pointer greater than `start`.
- let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN));
- // Count any matching bytes before we start our aligned loop.
- let mut count = count_byte_by_byte(start, cur, confirm);
- debug_assert!(cur > start && end.sub(V::BYTES) >= start);
- if len >= Self::LOOP_SIZE {
- while cur <= end.sub(Self::LOOP_SIZE) {
- debug_assert_eq!(0, cur.as_usize() % V::BYTES);
-
- let a = V::load_aligned(cur);
- let b = V::load_aligned(cur.add(1 * V::BYTES));
- let c = V::load_aligned(cur.add(2 * V::BYTES));
- let d = V::load_aligned(cur.add(3 * V::BYTES));
- let eqa = self.v1.cmpeq(a);
- let eqb = self.v1.cmpeq(b);
- let eqc = self.v1.cmpeq(c);
- let eqd = self.v1.cmpeq(d);
- count += eqa.movemask().count_ones();
- count += eqb.movemask().count_ones();
- count += eqc.movemask().count_ones();
- count += eqd.movemask().count_ones();
- cur = cur.add(Self::LOOP_SIZE);
- }
- }
- // Handle any leftovers after the aligned loop above. We use unaligned
- // loads here, but I believe we are guaranteed that they are aligned
- // since `cur` is aligned.
- while cur <= end.sub(V::BYTES) {
- debug_assert!(end.distance(cur) >= V::BYTES);
- let chunk = V::load_unaligned(cur);
- count += self.v1.cmpeq(chunk).movemask().count_ones();
- cur = cur.add(V::BYTES);
- }
- // And finally count any leftovers that weren't caught above.
- count += count_byte_by_byte(cur, end, confirm);
- count
- }
-
- /// Search `V::BYTES` starting at `cur` via an unaligned load.
- ///
- /// `mask_to_offset` should be a function that converts a `movemask` to
- /// an offset such that `cur.add(offset)` corresponds to a pointer to the
- /// match location if one is found. Generally it is expected to use either
- /// `mask_to_first_offset` or `mask_to_last_offset`, depending on whether
- /// one is implementing a forward or reverse search, respectively.
- ///
- /// # Safety
- ///
- /// `cur` must be a valid pointer and it must be valid to do an unaligned
- /// load of size `V::BYTES` at `cur`.
- #[inline(always)]
- unsafe fn search_chunk(
- &self,
- cur: *const u8,
- mask_to_offset: impl Fn(V::Mask) -> usize,
- ) -> Option<*const u8> {
- let chunk = V::load_unaligned(cur);
- let mask = self.v1.cmpeq(chunk).movemask();
- if mask.has_non_zero() {
- Some(cur.add(mask_to_offset(mask)))
- } else {
- None
- }
- }
-}
-
-/// Finds all occurrences of two bytes in a haystack.
-///
-/// That is, this reports matches of one of two possible bytes. For example,
-/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`,
-/// `4` and `5`.
-#[derive(Clone, Copy, Debug)]
-pub(crate) struct Two<V> {
- s1: u8,
- s2: u8,
- v1: V,
- v2: V,
-}
-
-impl<V: Vector> Two<V> {
- /// The number of bytes we examine per each iteration of our search loop.
- const LOOP_SIZE: usize = 2 * V::BYTES;
-
- /// Create a new searcher that finds occurrences of the byte given.
- #[inline(always)]
- pub(crate) unsafe fn new(needle1: u8, needle2: u8) -> Two<V> {
- Two {
- s1: needle1,
- s2: needle2,
- v1: V::splat(needle1),
- v2: V::splat(needle2),
- }
- }
-
- /// Returns the first needle given to `Two::new`.
- #[inline(always)]
- pub(crate) fn needle1(&self) -> u8 {
- self.s1
- }
-
- /// Returns the second needle given to `Two::new`.
- #[inline(always)]
- pub(crate) fn needle2(&self) -> u8 {
- self.s2
- }
-
- /// Return a pointer to the first occurrence of one of the needles in the
- /// given haystack. If no such occurrence exists, then `None` is returned.
- ///
- /// When a match is found, the pointer returned is guaranteed to be
- /// `>= start` and `< end`.
- ///
- /// # Safety
- ///
- /// * It must be the case that `start < end` and that the distance between
- /// them is at least equal to `V::BYTES`. That is, it must always be valid
- /// to do at least an unaligned load of `V` at `start`.
- /// * Both `start` and `end` must be valid for reads.
- /// * Both `start` and `end` must point to an initialized value.
- /// * Both `start` and `end` must point to the same allocated object and
- /// must either be in bounds or at most one byte past the end of the
- /// allocated object.
- /// * Both `start` and `end` must be _derived from_ a pointer to the same
- /// object.
- /// * The distance between `start` and `end` must not overflow `isize`.
- /// * The distance being in bounds must not rely on "wrapping around" the
- /// address space.
- #[inline(always)]
- pub(crate) unsafe fn find_raw(
- &self,
- start: *const u8,
- end: *const u8,
- ) -> Option<*const u8> {
- // If we want to support vectors bigger than 256 bits, we probably
- // need to move up to using a u64 for the masks used below. Currently
- // they are 32 bits, which means we're SOL for vectors that need masks
- // bigger than 32 bits. Overall unclear until there's a use case.
- debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes");
-
- let topos = V::Mask::first_offset;
- let len = end.distance(start);
- debug_assert!(
- len >= V::BYTES,
- "haystack has length {}, but must be at least {}",
- len,
- V::BYTES
- );
-
- // Search a possibly unaligned chunk at `start`. This covers any part
- // of the haystack prior to where aligned loads can start.
- if let Some(cur) = self.search_chunk(start, topos) {
- return Some(cur);
- }
- // Set `cur` to the first V-aligned pointer greater than `start`.
- let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN));
- debug_assert!(cur > start && end.sub(V::BYTES) >= start);
- if len >= Self::LOOP_SIZE {
- while cur <= end.sub(Self::LOOP_SIZE) {
- debug_assert_eq!(0, cur.as_usize() % V::BYTES);
-
- let a = V::load_aligned(cur);
- let b = V::load_aligned(cur.add(V::BYTES));
- let eqa1 = self.v1.cmpeq(a);
- let eqb1 = self.v1.cmpeq(b);
- let eqa2 = self.v2.cmpeq(a);
- let eqb2 = self.v2.cmpeq(b);
- let or1 = eqa1.or(eqb1);
- let or2 = eqa2.or(eqb2);
- let or3 = or1.or(or2);
- if or3.movemask_will_have_non_zero() {
- let mask = eqa1.movemask().or(eqa2.movemask());
- if mask.has_non_zero() {
- return Some(cur.add(topos(mask)));
- }
-
- let mask = eqb1.movemask().or(eqb2.movemask());
- debug_assert!(mask.has_non_zero());
- return Some(cur.add(V::BYTES).add(topos(mask)));
- }
- cur = cur.add(Self::LOOP_SIZE);
- }
- }
- // Handle any leftovers after the aligned loop above. We use unaligned
- // loads here, but I believe we are guaranteed that they are aligned
- // since `cur` is aligned.
- while cur <= end.sub(V::BYTES) {
- debug_assert!(end.distance(cur) >= V::BYTES);
- if let Some(cur) = self.search_chunk(cur, topos) {
- return Some(cur);
- }
- cur = cur.add(V::BYTES);
- }
- // Finally handle any remaining bytes less than the size of V. In this
- // case, our pointer may indeed be unaligned and the load may overlap
- // with the previous one. But that's okay since we know the previous
- // load didn't lead to a match (otherwise we wouldn't be here).
- if cur < end {
- debug_assert!(end.distance(cur) < V::BYTES);
- cur = cur.sub(V::BYTES - end.distance(cur));
- debug_assert_eq!(end.distance(cur), V::BYTES);
- return self.search_chunk(cur, topos);
- }
- None
- }
-
- /// Return a pointer to the last occurrence of the needle in the given
- /// haystack. If no such occurrence exists, then `None` is returned.
- ///
- /// When a match is found, the pointer returned is guaranteed to be
- /// `>= start` and `< end`.
- ///
- /// # Safety
- ///
- /// * It must be the case that `start < end` and that the distance between
- /// them is at least equal to `V::BYTES`. That is, it must always be valid
- /// to do at least an unaligned load of `V` at `start`.
- /// * Both `start` and `end` must be valid for reads.
- /// * Both `start` and `end` must point to an initialized value.
- /// * Both `start` and `end` must point to the same allocated object and
- /// must either be in bounds or at most one byte past the end of the
- /// allocated object.
- /// * Both `start` and `end` must be _derived from_ a pointer to the same
- /// object.
- /// * The distance between `start` and `end` must not overflow `isize`.
- /// * The distance being in bounds must not rely on "wrapping around" the
- /// address space.
- #[inline(always)]
- pub(crate) unsafe fn rfind_raw(
- &self,
- start: *const u8,
- end: *const u8,
- ) -> Option<*const u8> {
- // If we want to support vectors bigger than 256 bits, we probably
- // need to move up to using a u64 for the masks used below. Currently
- // they are 32 bits, which means we're SOL for vectors that need masks
- // bigger than 32 bits. Overall unclear until there's a use case.
- debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes");
-
- let topos = V::Mask::last_offset;
- let len = end.distance(start);
- debug_assert!(
- len >= V::BYTES,
- "haystack has length {}, but must be at least {}",
- len,
- V::BYTES
- );
-
- if let Some(cur) = self.search_chunk(end.sub(V::BYTES), topos) {
- return Some(cur);
- }
- let mut cur = end.sub(end.as_usize() & V::ALIGN);
- debug_assert!(start <= cur && cur <= end);
- if len >= Self::LOOP_SIZE {
- while cur >= start.add(Self::LOOP_SIZE) {
- debug_assert_eq!(0, cur.as_usize() % V::BYTES);
-
- cur = cur.sub(Self::LOOP_SIZE);
- let a = V::load_aligned(cur);
- let b = V::load_aligned(cur.add(V::BYTES));
- let eqa1 = self.v1.cmpeq(a);
- let eqb1 = self.v1.cmpeq(b);
- let eqa2 = self.v2.cmpeq(a);
- let eqb2 = self.v2.cmpeq(b);
- let or1 = eqa1.or(eqb1);
- let or2 = eqa2.or(eqb2);
- let or3 = or1.or(or2);
- if or3.movemask_will_have_non_zero() {
- let mask = eqb1.movemask().or(eqb2.movemask());
- if mask.has_non_zero() {
- return Some(cur.add(V::BYTES).add(topos(mask)));
- }
-
- let mask = eqa1.movemask().or(eqa2.movemask());
- debug_assert!(mask.has_non_zero());
- return Some(cur.add(topos(mask)));
- }
- }
- }
- while cur >= start.add(V::BYTES) {
- debug_assert!(cur.distance(start) >= V::BYTES);
- cur = cur.sub(V::BYTES);
- if let Some(cur) = self.search_chunk(cur, topos) {
- return Some(cur);
- }
- }
- if cur > start {
- debug_assert!(cur.distance(start) < V::BYTES);
- return self.search_chunk(start, topos);
- }
- None
- }
-
- /// Search `V::BYTES` starting at `cur` via an unaligned load.
- ///
- /// `mask_to_offset` should be a function that converts a `movemask` to
- /// an offset such that `cur.add(offset)` corresponds to a pointer to the
- /// match location if one is found. Generally it is expected to use either
- /// `mask_to_first_offset` or `mask_to_last_offset`, depending on whether
- /// one is implementing a forward or reverse search, respectively.
- ///
- /// # Safety
- ///
- /// `cur` must be a valid pointer and it must be valid to do an unaligned
- /// load of size `V::BYTES` at `cur`.
- #[inline(always)]
- unsafe fn search_chunk(
- &self,
- cur: *const u8,
- mask_to_offset: impl Fn(V::Mask) -> usize,
- ) -> Option<*const u8> {
- let chunk = V::load_unaligned(cur);
- let eq1 = self.v1.cmpeq(chunk);
- let eq2 = self.v2.cmpeq(chunk);
- let mask = eq1.or(eq2).movemask();
- if mask.has_non_zero() {
- let mask1 = eq1.movemask();
- let mask2 = eq2.movemask();
- Some(cur.add(mask_to_offset(mask1.or(mask2))))
- } else {
- None
- }
- }
-}
-
-/// Finds all occurrences of two bytes in a haystack.
-///
-/// That is, this reports matches of one of two possible bytes. For example,
-/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`,
-/// `4` and `5`.
-#[derive(Clone, Copy, Debug)]
-pub(crate) struct Three<V> {
- s1: u8,
- s2: u8,
- s3: u8,
- v1: V,
- v2: V,
- v3: V,
-}
-
-impl<V: Vector> Three<V> {
- /// The number of bytes we examine per each iteration of our search loop.
- const LOOP_SIZE: usize = 2 * V::BYTES;
-
- /// Create a new searcher that finds occurrences of the byte given.
- #[inline(always)]
- pub(crate) unsafe fn new(
- needle1: u8,
- needle2: u8,
- needle3: u8,
- ) -> Three<V> {
- Three {
- s1: needle1,
- s2: needle2,
- s3: needle3,
- v1: V::splat(needle1),
- v2: V::splat(needle2),
- v3: V::splat(needle3),
- }
- }
-
- /// Returns the first needle given to `Three::new`.
- #[inline(always)]
- pub(crate) fn needle1(&self) -> u8 {
- self.s1
- }
-
- /// Returns the second needle given to `Three::new`.
- #[inline(always)]
- pub(crate) fn needle2(&self) -> u8 {
- self.s2
- }
-
- /// Returns the third needle given to `Three::new`.
- #[inline(always)]
- pub(crate) fn needle3(&self) -> u8 {
- self.s3
- }
-
- /// Return a pointer to the first occurrence of one of the needles in the
- /// given haystack. If no such occurrence exists, then `None` is returned.
- ///
- /// When a match is found, the pointer returned is guaranteed to be
- /// `>= start` and `< end`.
- ///
- /// # Safety
- ///
- /// * It must be the case that `start < end` and that the distance between
- /// them is at least equal to `V::BYTES`. That is, it must always be valid
- /// to do at least an unaligned load of `V` at `start`.
- /// * Both `start` and `end` must be valid for reads.
- /// * Both `start` and `end` must point to an initialized value.
- /// * Both `start` and `end` must point to the same allocated object and
- /// must either be in bounds or at most one byte past the end of the
- /// allocated object.
- /// * Both `start` and `end` must be _derived from_ a pointer to the same
- /// object.
- /// * The distance between `start` and `end` must not overflow `isize`.
- /// * The distance being in bounds must not rely on "wrapping around" the
- /// address space.
- #[inline(always)]
- pub(crate) unsafe fn find_raw(
- &self,
- start: *const u8,
- end: *const u8,
- ) -> Option<*const u8> {
- // If we want to support vectors bigger than 256 bits, we probably
- // need to move up to using a u64 for the masks used below. Currently
- // they are 32 bits, which means we're SOL for vectors that need masks
- // bigger than 32 bits. Overall unclear until there's a use case.
- debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes");
-
- let topos = V::Mask::first_offset;
- let len = end.distance(start);
- debug_assert!(
- len >= V::BYTES,
- "haystack has length {}, but must be at least {}",
- len,
- V::BYTES
- );
-
- // Search a possibly unaligned chunk at `start`. This covers any part
- // of the haystack prior to where aligned loads can start.
- if let Some(cur) = self.search_chunk(start, topos) {
- return Some(cur);
- }
- // Set `cur` to the first V-aligned pointer greater than `start`.
- let mut cur = start.add(V::BYTES - (start.as_usize() & V::ALIGN));
- debug_assert!(cur > start && end.sub(V::BYTES) >= start);
- if len >= Self::LOOP_SIZE {
- while cur <= end.sub(Self::LOOP_SIZE) {
- debug_assert_eq!(0, cur.as_usize() % V::BYTES);
-
- let a = V::load_aligned(cur);
- let b = V::load_aligned(cur.add(V::BYTES));
- let eqa1 = self.v1.cmpeq(a);
- let eqb1 = self.v1.cmpeq(b);
- let eqa2 = self.v2.cmpeq(a);
- let eqb2 = self.v2.cmpeq(b);
- let eqa3 = self.v3.cmpeq(a);
- let eqb3 = self.v3.cmpeq(b);
- let or1 = eqa1.or(eqb1);
- let or2 = eqa2.or(eqb2);
- let or3 = eqa3.or(eqb3);
- let or4 = or1.or(or2);
- let or5 = or3.or(or4);
- if or5.movemask_will_have_non_zero() {
- let mask = eqa1
- .movemask()
- .or(eqa2.movemask())
- .or(eqa3.movemask());
- if mask.has_non_zero() {
- return Some(cur.add(topos(mask)));
- }
-
- let mask = eqb1
- .movemask()
- .or(eqb2.movemask())
- .or(eqb3.movemask());
- debug_assert!(mask.has_non_zero());
- return Some(cur.add(V::BYTES).add(topos(mask)));
- }
- cur = cur.add(Self::LOOP_SIZE);
- }
- }
- // Handle any leftovers after the aligned loop above. We use unaligned
- // loads here, but I believe we are guaranteed that they are aligned
- // since `cur` is aligned.
- while cur <= end.sub(V::BYTES) {
- debug_assert!(end.distance(cur) >= V::BYTES);
- if let Some(cur) = self.search_chunk(cur, topos) {
- return Some(cur);
- }
- cur = cur.add(V::BYTES);
- }
- // Finally handle any remaining bytes less than the size of V. In this
- // case, our pointer may indeed be unaligned and the load may overlap
- // with the previous one. But that's okay since we know the previous
- // load didn't lead to a match (otherwise we wouldn't be here).
- if cur < end {
- debug_assert!(end.distance(cur) < V::BYTES);
- cur = cur.sub(V::BYTES - end.distance(cur));
- debug_assert_eq!(end.distance(cur), V::BYTES);
- return self.search_chunk(cur, topos);
- }
- None
- }
-
- /// Return a pointer to the last occurrence of the needle in the given
- /// haystack. If no such occurrence exists, then `None` is returned.
- ///
- /// When a match is found, the pointer returned is guaranteed to be
- /// `>= start` and `< end`.
- ///
- /// # Safety
- ///
- /// * It must be the case that `start < end` and that the distance between
- /// them is at least equal to `V::BYTES`. That is, it must always be valid
- /// to do at least an unaligned load of `V` at `start`.
- /// * Both `start` and `end` must be valid for reads.
- /// * Both `start` and `end` must point to an initialized value.
- /// * Both `start` and `end` must point to the same allocated object and
- /// must either be in bounds or at most one byte past the end of the
- /// allocated object.
- /// * Both `start` and `end` must be _derived from_ a pointer to the same
- /// object.
- /// * The distance between `start` and `end` must not overflow `isize`.
- /// * The distance being in bounds must not rely on "wrapping around" the
- /// address space.
- #[inline(always)]
- pub(crate) unsafe fn rfind_raw(
- &self,
- start: *const u8,
- end: *const u8,
- ) -> Option<*const u8> {
- // If we want to support vectors bigger than 256 bits, we probably
- // need to move up to using a u64 for the masks used below. Currently
- // they are 32 bits, which means we're SOL for vectors that need masks
- // bigger than 32 bits. Overall unclear until there's a use case.
- debug_assert!(V::BYTES <= 32, "vector cannot be bigger than 32 bytes");
-
- let topos = V::Mask::last_offset;
- let len = end.distance(start);
- debug_assert!(
- len >= V::BYTES,
- "haystack has length {}, but must be at least {}",
- len,
- V::BYTES
- );
-
- if let Some(cur) = self.search_chunk(end.sub(V::BYTES), topos) {
- return Some(cur);
- }
- let mut cur = end.sub(end.as_usize() & V::ALIGN);
- debug_assert!(start <= cur && cur <= end);
- if len >= Self::LOOP_SIZE {
- while cur >= start.add(Self::LOOP_SIZE) {
- debug_assert_eq!(0, cur.as_usize() % V::BYTES);
-
- cur = cur.sub(Self::LOOP_SIZE);
- let a = V::load_aligned(cur);
- let b = V::load_aligned(cur.add(V::BYTES));
- let eqa1 = self.v1.cmpeq(a);
- let eqb1 = self.v1.cmpeq(b);
- let eqa2 = self.v2.cmpeq(a);
- let eqb2 = self.v2.cmpeq(b);
- let eqa3 = self.v3.cmpeq(a);
- let eqb3 = self.v3.cmpeq(b);
- let or1 = eqa1.or(eqb1);
- let or2 = eqa2.or(eqb2);
- let or3 = eqa3.or(eqb3);
- let or4 = or1.or(or2);
- let or5 = or3.or(or4);
- if or5.movemask_will_have_non_zero() {
- let mask = eqb1
- .movemask()
- .or(eqb2.movemask())
- .or(eqb3.movemask());
- if mask.has_non_zero() {
- return Some(cur.add(V::BYTES).add(topos(mask)));
- }
-
- let mask = eqa1
- .movemask()
- .or(eqa2.movemask())
- .or(eqa3.movemask());
- debug_assert!(mask.has_non_zero());
- return Some(cur.add(topos(mask)));
- }
- }
- }
- while cur >= start.add(V::BYTES) {
- debug_assert!(cur.distance(start) >= V::BYTES);
- cur = cur.sub(V::BYTES);
- if let Some(cur) = self.search_chunk(cur, topos) {
- return Some(cur);
- }
- }
- if cur > start {
- debug_assert!(cur.distance(start) < V::BYTES);
- return self.search_chunk(start, topos);
- }
- None
- }
-
- /// Search `V::BYTES` starting at `cur` via an unaligned load.
- ///
- /// `mask_to_offset` should be a function that converts a `movemask` to
- /// an offset such that `cur.add(offset)` corresponds to a pointer to the
- /// match location if one is found. Generally it is expected to use either
- /// `mask_to_first_offset` or `mask_to_last_offset`, depending on whether
- /// one is implementing a forward or reverse search, respectively.
- ///
- /// # Safety
- ///
- /// `cur` must be a valid pointer and it must be valid to do an unaligned
- /// load of size `V::BYTES` at `cur`.
- #[inline(always)]
- unsafe fn search_chunk(
- &self,
- cur: *const u8,
- mask_to_offset: impl Fn(V::Mask) -> usize,
- ) -> Option<*const u8> {
- let chunk = V::load_unaligned(cur);
- let eq1 = self.v1.cmpeq(chunk);
- let eq2 = self.v2.cmpeq(chunk);
- let eq3 = self.v3.cmpeq(chunk);
- let mask = eq1.or(eq2).or(eq3).movemask();
- if mask.has_non_zero() {
- let mask1 = eq1.movemask();
- let mask2 = eq2.movemask();
- let mask3 = eq3.movemask();
- Some(cur.add(mask_to_offset(mask1.or(mask2).or(mask3))))
- } else {
- None
- }
- }
-}
-
-/// An iterator over all occurrences of a set of bytes in a haystack.
-///
-/// This iterator implements the routines necessary to provide a
-/// `DoubleEndedIterator` impl, which means it can also be used to find
-/// occurrences in reverse order.
-///
-/// The lifetime parameters are as follows:
-///
-/// * `'h` refers to the lifetime of the haystack being searched.
-///
-/// This type is intended to be used to implement all iterators for the
-/// `memchr` family of functions. It handles a tiny bit of marginally tricky
-/// raw pointer math, but otherwise expects the caller to provide `find_raw`
-/// and `rfind_raw` routines for each call of `next` and `next_back`,
-/// respectively.
-#[derive(Clone, Debug)]
-pub(crate) struct Iter<'h> {
- /// The original starting point into the haystack. We use this to convert
- /// pointers to offsets.
- original_start: *const u8,
- /// The current starting point into the haystack. That is, where the next
- /// search will begin.
- start: *const u8,
- /// The current ending point into the haystack. That is, where the next
- /// reverse search will begin.
- end: *const u8,
- /// A marker for tracking the lifetime of the start/cur_start/cur_end
- /// pointers above, which all point into the haystack.
- haystack: core::marker::PhantomData<&'h [u8]>,
-}
-
-// SAFETY: Iter contains no shared references to anything that performs any
-// interior mutations. Also, the lifetime guarantees that Iter will not outlive
-// the haystack.
-unsafe impl<'h> Send for Iter<'h> {}
-
-// SAFETY: Iter perform no interior mutations, therefore no explicit
-// synchronization is necessary. Also, the lifetime guarantees that Iter will
-// not outlive the haystack.
-unsafe impl<'h> Sync for Iter<'h> {}
-
-impl<'h> Iter<'h> {
- /// Create a new generic memchr iterator.
- #[inline(always)]
- pub(crate) fn new(haystack: &'h [u8]) -> Iter<'h> {
- Iter {
- original_start: haystack.as_ptr(),
- start: haystack.as_ptr(),
- end: haystack.as_ptr().wrapping_add(haystack.len()),
- haystack: core::marker::PhantomData,
- }
- }
-
- /// Returns the next occurrence in the forward direction.
- ///
- /// # Safety
- ///
- /// Callers must ensure that if a pointer is returned from the closure
- /// provided, then it must be greater than or equal to the start pointer
- /// and less than the end pointer.
- #[inline(always)]
- pub(crate) unsafe fn next(
- &mut self,
- mut find_raw: impl FnMut(*const u8, *const u8) -> Option<*const u8>,
- ) -> Option<usize> {
- // SAFETY: Pointers are derived directly from the same &[u8] haystack.
- // We only ever modify start/end corresponding to a matching offset
- // found between start and end. Thus all changes to start/end maintain
- // our safety requirements.
- //
- // The only other assumption we rely on is that the pointer returned
- // by `find_raw` satisfies `self.start <= found < self.end`, and that
- // safety contract is forwarded to the caller.
- let found = find_raw(self.start, self.end)?;
- let result = found.distance(self.original_start);
- self.start = found.add(1);
- Some(result)
- }
-
- /// Returns the number of remaining elements in this iterator.
- #[inline(always)]
- pub(crate) fn count(
- self,
- mut count_raw: impl FnMut(*const u8, *const u8) -> usize,
- ) -> usize {
- // SAFETY: Pointers are derived directly from the same &[u8] haystack.
- // We only ever modify start/end corresponding to a matching offset
- // found between start and end. Thus all changes to start/end maintain
- // our safety requirements.
- count_raw(self.start, self.end)
- }
-
- /// Returns the next occurrence in reverse.
- ///
- /// # Safety
- ///
- /// Callers must ensure that if a pointer is returned from the closure
- /// provided, then it must be greater than or equal to the start pointer
- /// and less than the end pointer.
- #[inline(always)]
- pub(crate) unsafe fn next_back(
- &mut self,
- mut rfind_raw: impl FnMut(*const u8, *const u8) -> Option<*const u8>,
- ) -> Option<usize> {
- // SAFETY: Pointers are derived directly from the same &[u8] haystack.
- // We only ever modify start/end corresponding to a matching offset
- // found between start and end. Thus all changes to start/end maintain
- // our safety requirements.
- //
- // The only other assumption we rely on is that the pointer returned
- // by `rfind_raw` satisfies `self.start <= found < self.end`, and that
- // safety contract is forwarded to the caller.
- let found = rfind_raw(self.start, self.end)?;
- let result = found.distance(self.original_start);
- self.end = found;
- Some(result)
- }
-
- /// Provides an implementation of `Iterator::size_hint`.
- #[inline(always)]
- pub(crate) fn size_hint(&self) -> (usize, Option<usize>) {
- (0, Some(self.end.as_usize().saturating_sub(self.start.as_usize())))
- }
-}
-
-/// Search a slice using a function that operates on raw pointers.
-///
-/// Given a function to search a contiguous sequence of memory for the location
-/// of a non-empty set of bytes, this will execute that search on a slice of
-/// bytes. The pointer returned by the given function will be converted to an
-/// offset relative to the starting point of the given slice. That is, if a
-/// match is found, the offset returned by this routine is guaranteed to be a
-/// valid index into `haystack`.
-///
-/// Callers may use this for a forward or reverse search.
-///
-/// # Safety
-///
-/// Callers must ensure that if a pointer is returned by `find_raw`, then the
-/// pointer must be greater than or equal to the starting pointer and less than
-/// the end pointer.
-#[inline(always)]
-pub(crate) unsafe fn search_slice_with_raw(
- haystack: &[u8],
- mut find_raw: impl FnMut(*const u8, *const u8) -> Option<*const u8>,
-) -> Option<usize> {
- // SAFETY: We rely on `find_raw` to return a correct and valid pointer, but
- // otherwise, `start` and `end` are valid due to the guarantees provided by
- // a &[u8].
- let start = haystack.as_ptr();
- let end = start.add(haystack.len());
- let found = find_raw(start, end)?;
- Some(found.distance(start))
-}
-
-/// Performs a forward byte-at-a-time loop until either `ptr >= end_ptr` or
-/// until `confirm(*ptr)` returns `true`. If the former occurs, then `None` is
-/// returned. If the latter occurs, then the pointer at which `confirm` returns
-/// `true` is returned.
-///
-/// # Safety
-///
-/// Callers must provide valid pointers and they must satisfy `start_ptr <=
-/// ptr` and `ptr <= end_ptr`.
-#[inline(always)]
-pub(crate) unsafe fn fwd_byte_by_byte<F: Fn(u8) -> bool>(
- start: *const u8,
- end: *const u8,
- confirm: F,
-) -> Option<*const u8> {
- debug_assert!(start <= end);
- let mut ptr = start;
- while ptr < end {
- if confirm(*ptr) {
- return Some(ptr);
- }
- ptr = ptr.offset(1);
- }
- None
-}
-
-/// Performs a reverse byte-at-a-time loop until either `ptr < start_ptr` or
-/// until `confirm(*ptr)` returns `true`. If the former occurs, then `None` is
-/// returned. If the latter occurs, then the pointer at which `confirm` returns
-/// `true` is returned.
-///
-/// # Safety
-///
-/// Callers must provide valid pointers and they must satisfy `start_ptr <=
-/// ptr` and `ptr <= end_ptr`.
-#[inline(always)]
-pub(crate) unsafe fn rev_byte_by_byte<F: Fn(u8) -> bool>(
- start: *const u8,
- end: *const u8,
- confirm: F,
-) -> Option<*const u8> {
- debug_assert!(start <= end);
-
- let mut ptr = end;
- while ptr > start {
- ptr = ptr.offset(-1);
- if confirm(*ptr) {
- return Some(ptr);
- }
- }
- None
-}
-
-/// Performs a forward byte-at-a-time loop until `ptr >= end_ptr` and returns
-/// the number of times `confirm(*ptr)` returns `true`.
-///
-/// # Safety
-///
-/// Callers must provide valid pointers and they must satisfy `start_ptr <=
-/// ptr` and `ptr <= end_ptr`.
-#[inline(always)]
-pub(crate) unsafe fn count_byte_by_byte<F: Fn(u8) -> bool>(
- start: *const u8,
- end: *const u8,
- confirm: F,
-) -> usize {
- debug_assert!(start <= end);
- let mut ptr = start;
- let mut count = 0;
- while ptr < end {
- if confirm(*ptr) {
- count += 1;
- }
- ptr = ptr.offset(1);
- }
- count
-}
diff --git a/vendor/memchr/src/arch/generic/mod.rs b/vendor/memchr/src/arch/generic/mod.rs
deleted file mode 100644
index 63ee3f0..0000000
--- a/vendor/memchr/src/arch/generic/mod.rs
+++ /dev/null
@@ -1,14 +0,0 @@
-/*!
-This module defines "generic" routines that can be specialized to specific
-architectures.
-
-We don't expose this module primarily because it would require exposing all
-of the internal infrastructure required to write these generic routines.
-That infrastructure should be treated as an implementation detail so that
-it is allowed to evolve. Instead, what we expose are architecture specific
-instantiations of these generic implementations. The generic code just lets us
-write the code once (usually).
-*/
-
-pub(crate) mod memchr;
-pub(crate) mod packedpair;
diff --git a/vendor/memchr/src/arch/generic/packedpair.rs b/vendor/memchr/src/arch/generic/packedpair.rs
deleted file mode 100644
index 8d97cf2..0000000
--- a/vendor/memchr/src/arch/generic/packedpair.rs
+++ /dev/null
@@ -1,317 +0,0 @@
-/*!
-Generic crate-internal routines for the "packed pair" SIMD algorithm.
-
-The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main
-difference is that it (by default) uses a background distribution of byte
-frequencies to heuristically select the pair of bytes to search for.
-
-[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last
-*/
-
-use crate::{
- arch::all::{is_equal_raw, packedpair::Pair},
- ext::Pointer,
- vector::{MoveMask, Vector},
-};
-
-/// A generic architecture dependent "packed pair" finder.
-///
-/// This finder picks two bytes that it believes have high predictive power
-/// for indicating an overall match of a needle. Depending on whether
-/// `Finder::find` or `Finder::find_prefilter` is used, it reports offsets
-/// where the needle matches or could match. In the prefilter case, candidates
-/// are reported whenever the [`Pair`] of bytes given matches.
-///
-/// This is architecture dependent because it uses specific vector operations
-/// to look for occurrences of the pair of bytes.
-///
-/// This type is not meant to be exported and is instead meant to be used as
-/// the implementation for architecture specific facades. Why? Because it's a
-/// bit of a quirky API that requires `inline(always)` annotations. And pretty
-/// much everything has safety obligations due (at least) to the caller needing
-/// to inline calls into routines marked with
-/// `#[target_feature(enable = "...")]`.
-#[derive(Clone, Copy, Debug)]
-pub(crate) struct Finder<V> {
- pair: Pair,
- v1: V,
- v2: V,
- min_haystack_len: usize,
-}
-
-impl<V: Vector> Finder<V> {
- /// Create a new pair searcher. The searcher returned can either report
- /// exact matches of `needle` or act as a prefilter and report candidate
- /// positions of `needle`.
- ///
- /// # Safety
- ///
- /// Callers must ensure that whatever vector type this routine is called
- /// with is supported by the current environment.
- ///
- /// Callers must also ensure that `needle.len() >= 2`.
- #[inline(always)]
- pub(crate) unsafe fn new(needle: &[u8], pair: Pair) -> Finder<V> {
- let max_index = pair.index1().max(pair.index2());
- let min_haystack_len =
- core::cmp::max(needle.len(), usize::from(max_index) + V::BYTES);
- let v1 = V::splat(needle[usize::from(pair.index1())]);
- let v2 = V::splat(needle[usize::from(pair.index2())]);
- Finder { pair, v1, v2, min_haystack_len }
- }
-
- /// Searches the given haystack for the given needle. The needle given
- /// should be the same as the needle that this finder was initialized
- /// with.
- ///
- /// # Panics
- ///
- /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
- ///
- /// # Safety
- ///
- /// Since this is meant to be used with vector functions, callers need to
- /// specialize this inside of a function with a `target_feature` attribute.
- /// Therefore, callers must ensure that whatever target feature is being
- /// used supports the vector functions that this function is specialized
- /// for. (For the specific vector functions used, see the Vector trait
- /// implementations.)
- #[inline(always)]
- pub(crate) unsafe fn find(
- &self,
- haystack: &[u8],
- needle: &[u8],
- ) -> Option<usize> {
- assert!(
- haystack.len() >= self.min_haystack_len,
- "haystack too small, should be at least {} but got {}",
- self.min_haystack_len,
- haystack.len(),
- );
-
- let all = V::Mask::all_zeros_except_least_significant(0);
- let start = haystack.as_ptr();
- let end = start.add(haystack.len());
- let max = end.sub(self.min_haystack_len);
- let mut cur = start;
-
- // N.B. I did experiment with unrolling the loop to deal with size(V)
- // bytes at a time and 2*size(V) bytes at a time. The double unroll
- // was marginally faster while the quadruple unroll was unambiguously
- // slower. In the end, I decided the complexity from unrolling wasn't
- // worth it. I used the memmem/krate/prebuilt/huge-en/ benchmarks to
- // compare.
- while cur <= max {
- if let Some(chunki) = self.find_in_chunk(needle, cur, end, all) {
- return Some(matched(start, cur, chunki));
- }
- cur = cur.add(V::BYTES);
- }
- if cur < end {
- let remaining = end.distance(cur);
- debug_assert!(
- remaining < self.min_haystack_len,
- "remaining bytes should be smaller than the minimum haystack \
- length of {}, but there are {} bytes remaining",
- self.min_haystack_len,
- remaining,
- );
- if remaining < needle.len() {
- return None;
- }
- debug_assert!(
- max < cur,
- "after main loop, cur should have exceeded max",
- );
- let overlap = cur.distance(max);
- debug_assert!(
- overlap > 0,
- "overlap ({}) must always be non-zero",
- overlap,
- );
- debug_assert!(
- overlap < V::BYTES,
- "overlap ({}) cannot possibly be >= than a vector ({})",
- overlap,
- V::BYTES,
- );
- // The mask has all of its bits set except for the first N least
- // significant bits, where N=overlap. This way, any matches that
- // occur in find_in_chunk within the overlap are automatically
- // ignored.
- let mask = V::Mask::all_zeros_except_least_significant(overlap);
- cur = max;
- let m = self.find_in_chunk(needle, cur, end, mask);
- if let Some(chunki) = m {
- return Some(matched(start, cur, chunki));
- }
- }
- None
- }
-
- /// Searches the given haystack for offsets that represent candidate
- /// matches of the `needle` given to this finder's constructor. The offsets
- /// returned, if they are a match, correspond to the starting offset of
- /// `needle` in the given `haystack`.
- ///
- /// # Panics
- ///
- /// When `haystack.len()` is less than [`Finder::min_haystack_len`].
- ///
- /// # Safety
- ///
- /// Since this is meant to be used with vector functions, callers need to
- /// specialize this inside of a function with a `target_feature` attribute.
- /// Therefore, callers must ensure that whatever target feature is being
- /// used supports the vector functions that this function is specialized
- /// for. (For the specific vector functions used, see the Vector trait
- /// implementations.)
- #[inline(always)]
- pub(crate) unsafe fn find_prefilter(
- &self,
- haystack: &[u8],
- ) -> Option<usize> {
- assert!(
- haystack.len() >= self.min_haystack_len,
- "haystack too small, should be at least {} but got {}",
- self.min_haystack_len,
- haystack.len(),
- );
-
- let start = haystack.as_ptr();
- let end = start.add(haystack.len());
- let max = end.sub(self.min_haystack_len);
- let mut cur = start;
-
- // N.B. I did experiment with unrolling the loop to deal with size(V)
- // bytes at a time and 2*size(V) bytes at a time. The double unroll
- // was marginally faster while the quadruple unroll was unambiguously
- // slower. In the end, I decided the complexity from unrolling wasn't
- // worth it. I used the memmem/krate/prebuilt/huge-en/ benchmarks to
- // compare.
- while cur <= max {
- if let Some(chunki) = self.find_prefilter_in_chunk(cur) {
- return Some(matched(start, cur, chunki));
- }
- cur = cur.add(V::BYTES);
- }
- if cur < end {
- // This routine immediately quits if a candidate match is found.
- // That means that if we're here, no candidate matches have been
- // found at or before 'ptr'. Thus, we don't need to mask anything
- // out even though we might technically search part of the haystack
- // that we've already searched (because we know it can't match).
- cur = max;
- if let Some(chunki) = self.find_prefilter_in_chunk(cur) {
- return Some(matched(start, cur, chunki));
- }
- }
- None
- }
-
- /// Search for an occurrence of our byte pair from the needle in the chunk
- /// pointed to by cur, with the end of the haystack pointed to by end.
- /// When an occurrence is found, memcmp is run to check if a match occurs
- /// at the corresponding position.
- ///
- /// `mask` should have bits set corresponding the positions in the chunk
- /// in which matches are considered. This is only used for the last vector
- /// load where the beginning of the vector might have overlapped with the
- /// last load in the main loop. The mask lets us avoid visiting positions
- /// that have already been discarded as matches.
- ///
- /// # Safety
- ///
- /// It must be safe to do an unaligned read of size(V) bytes starting at
- /// both (cur + self.index1) and (cur + self.index2). It must also be safe
- /// to do unaligned loads on cur up to (end - needle.len()).
- #[inline(always)]
- unsafe fn find_in_chunk(
- &self,
- needle: &[u8],
- cur: *const u8,
- end: *const u8,
- mask: V::Mask,
- ) -> Option<usize> {
- let index1 = usize::from(self.pair.index1());
- let index2 = usize::from(self.pair.index2());
- let chunk1 = V::load_unaligned(cur.add(index1));
- let chunk2 = V::load_unaligned(cur.add(index2));
- let eq1 = chunk1.cmpeq(self.v1);
- let eq2 = chunk2.cmpeq(self.v2);
-
- let mut offsets = eq1.and(eq2).movemask().and(mask);
- while offsets.has_non_zero() {
- let offset = offsets.first_offset();
- let cur = cur.add(offset);
- if end.sub(needle.len()) < cur {
- return None;
- }
- if is_equal_raw(needle.as_ptr(), cur, needle.len()) {
- return Some(offset);
- }
- offsets = offsets.clear_least_significant_bit();
- }
- None
- }
-
- /// Search for an occurrence of our byte pair from the needle in the chunk
- /// pointed to by cur, with the end of the haystack pointed to by end.
- /// When an occurrence is found, memcmp is run to check if a match occurs
- /// at the corresponding position.
- ///
- /// # Safety
- ///
- /// It must be safe to do an unaligned read of size(V) bytes starting at
- /// both (cur + self.index1) and (cur + self.index2). It must also be safe
- /// to do unaligned reads on cur up to (end - needle.len()).
- #[inline(always)]
- unsafe fn find_prefilter_in_chunk(&self, cur: *const u8) -> Option<usize> {
- let index1 = usize::from(self.pair.index1());
- let index2 = usize::from(self.pair.index2());
- let chunk1 = V::load_unaligned(cur.add(index1));
- let chunk2 = V::load_unaligned(cur.add(index2));
- let eq1 = chunk1.cmpeq(self.v1);
- let eq2 = chunk2.cmpeq(self.v2);
-
- let offsets = eq1.and(eq2).movemask();
- if !offsets.has_non_zero() {
- return None;
- }
- Some(offsets.first_offset())
- }
-
- /// Returns the pair of offsets (into the needle) used to check as a
- /// predicate before confirming whether a needle exists at a particular
- /// position.
- #[inline]
- pub(crate) fn pair(&self) -> &Pair {
- &self.pair
- }
-
- /// Returns the minimum haystack length that this `Finder` can search.
- ///
- /// Providing a haystack to this `Finder` shorter than this length is
- /// guaranteed to result in a panic.
- #[inline(always)]
- pub(crate) fn min_haystack_len(&self) -> usize {
- self.min_haystack_len
- }
-}
-
-/// Accepts a chunk-relative offset and returns a haystack relative offset.
-///
-/// This used to be marked `#[cold]` and `#[inline(never)]`, but I couldn't
-/// observe a consistent measureable difference between that and just inlining
-/// it. So we go with inlining it.
-///
-/// # Safety
-///
-/// Same at `ptr::offset_from` in addition to `cur >= start`.
-#[inline(always)]
-unsafe fn matched(start: *const u8, cur: *const u8, chunki: usize) -> usize {
- cur.distance(start) + chunki
-}
-
-// If you're looking for tests, those are run for each instantiation of the
-// above code. So for example, see arch::x86_64::sse2::packedpair.