core/slice/ascii.rs
1//! Operations on ASCII `[u8]`.
2
3use core::ascii::EscapeDefault;
4
5use crate::fmt::{self, Write};
6#[cfg(not(all(target_arch = "loongarch64", target_feature = "lsx")))]
7use crate::intrinsics::const_eval_select;
8use crate::{ascii, iter, ops};
9
10impl [u8] {
11 /// Checks if all bytes in this slice are within the ASCII range.
12 ///
13 /// An empty slice returns `true`.
14 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
15 #[rustc_const_stable(feature = "const_slice_is_ascii", since = "1.74.0")]
16 #[must_use]
17 #[inline]
18 pub const fn is_ascii(&self) -> bool {
19 is_ascii(self)
20 }
21
22 /// If this slice [`is_ascii`](Self::is_ascii), returns it as a slice of
23 /// [ASCII characters](`ascii::Char`), otherwise returns `None`.
24 #[unstable(feature = "ascii_char", issue = "110998")]
25 #[must_use]
26 #[inline]
27 pub const fn as_ascii(&self) -> Option<&[ascii::Char]> {
28 if self.is_ascii() {
29 // SAFETY: Just checked that it's ASCII
30 Some(unsafe { self.as_ascii_unchecked() })
31 } else {
32 None
33 }
34 }
35
36 /// Converts this slice of bytes into a slice of ASCII characters,
37 /// without checking whether they're valid.
38 ///
39 /// # Safety
40 ///
41 /// Every byte in the slice must be in `0..=127`, or else this is UB.
42 #[unstable(feature = "ascii_char", issue = "110998")]
43 #[must_use]
44 #[inline]
45 pub const unsafe fn as_ascii_unchecked(&self) -> &[ascii::Char] {
46 let byte_ptr: *const [u8] = self;
47 let ascii_ptr = byte_ptr as *const [ascii::Char];
48 // SAFETY: The caller promised all the bytes are ASCII
49 unsafe { &*ascii_ptr }
50 }
51
52 /// Checks that two slices are an ASCII case-insensitive match.
53 ///
54 /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
55 /// but without allocating and copying temporaries.
56 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
57 #[rustc_const_stable(feature = "const_eq_ignore_ascii_case", since = "1.89.0")]
58 #[must_use]
59 #[inline]
60 pub const fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
61 if self.len() != other.len() {
62 return false;
63 }
64
65 #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
66 {
67 const CHUNK_SIZE: usize = 16;
68 // The following function has two invariants:
69 // 1. The slice lengths must be equal, which we checked above.
70 // 2. The slice lengths must greater than or equal to N, which this
71 // if-statement is checking.
72 if self.len() >= CHUNK_SIZE {
73 return self.eq_ignore_ascii_case_chunks::<CHUNK_SIZE>(other);
74 }
75 }
76
77 self.eq_ignore_ascii_case_simple(other)
78 }
79
80 /// ASCII case-insensitive equality check without chunk-at-a-time
81 /// optimization.
82 #[inline]
83 const fn eq_ignore_ascii_case_simple(&self, other: &[u8]) -> bool {
84 // FIXME(const-hack): This implementation can be reverted when
85 // `core::iter::zip` is allowed in const. The original implementation:
86 // self.len() == other.len() && iter::zip(self, other).all(|(a, b)| a.eq_ignore_ascii_case(b))
87 let mut a = self;
88 let mut b = other;
89
90 while let ([first_a, rest_a @ ..], [first_b, rest_b @ ..]) = (a, b) {
91 if first_a.eq_ignore_ascii_case(&first_b) {
92 a = rest_a;
93 b = rest_b;
94 } else {
95 return false;
96 }
97 }
98
99 true
100 }
101
102 /// Optimized version of `eq_ignore_ascii_case` to process chunks at a time.
103 ///
104 /// Platforms that have SIMD instructions may benefit from this
105 /// implementation over `eq_ignore_ascii_case_simple`.
106 ///
107 /// # Invariants
108 ///
109 /// The caller must guarantee that the slices are equal in length, and the
110 /// slice lengths are greater than or equal to `N` bytes.
111 #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
112 #[inline]
113 const fn eq_ignore_ascii_case_chunks<const N: usize>(&self, other: &[u8]) -> bool {
114 // FIXME(const-hack): The while-loops that follow should be replaced by
115 // for-loops when available in const.
116
117 let (self_chunks, self_rem) = self.as_chunks::<N>();
118 let (other_chunks, _) = other.as_chunks::<N>();
119
120 // Branchless check to encourage auto-vectorization
121 #[inline(always)]
122 const fn eq_ignore_ascii_inner<const L: usize>(lhs: &[u8; L], rhs: &[u8; L]) -> bool {
123 let mut equal_ascii = true;
124 let mut j = 0;
125 while j < L {
126 equal_ascii &= lhs[j].eq_ignore_ascii_case(&rhs[j]);
127 j += 1;
128 }
129
130 equal_ascii
131 }
132
133 // Process the chunks, returning early if an inequality is found
134 let mut i = 0;
135 while i < self_chunks.len() && i < other_chunks.len() {
136 if !eq_ignore_ascii_inner(&self_chunks[i], &other_chunks[i]) {
137 return false;
138 }
139 i += 1;
140 }
141
142 // Check the length invariant which is necessary for the tail-handling
143 // logic to be correct. This should have been upheld by the caller,
144 // otherwise lengths less than N will compare as true without any
145 // checking.
146 debug_assert!(self.len() >= N);
147
148 // If there are remaining tails, load the last N bytes in the slices to
149 // avoid falling back to per-byte checking.
150 if !self_rem.is_empty() {
151 if let (Some(a_rem), Some(b_rem)) = (self.last_chunk::<N>(), other.last_chunk::<N>()) {
152 if !eq_ignore_ascii_inner(a_rem, b_rem) {
153 return false;
154 }
155 }
156 }
157
158 true
159 }
160
161 /// Converts this slice to its ASCII upper case equivalent in-place.
162 ///
163 /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
164 /// but non-ASCII letters are unchanged.
165 ///
166 /// To return a new uppercased value without modifying the existing one, use
167 /// [`to_ascii_uppercase`].
168 ///
169 /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
170 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
171 #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
172 #[inline]
173 pub const fn make_ascii_uppercase(&mut self) {
174 // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
175 let mut i = 0;
176 while i < self.len() {
177 let byte = &mut self[i];
178 byte.make_ascii_uppercase();
179 i += 1;
180 }
181 }
182
183 /// Converts this slice to its ASCII lower case equivalent in-place.
184 ///
185 /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
186 /// but non-ASCII letters are unchanged.
187 ///
188 /// To return a new lowercased value without modifying the existing one, use
189 /// [`to_ascii_lowercase`].
190 ///
191 /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
192 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
193 #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
194 #[inline]
195 pub const fn make_ascii_lowercase(&mut self) {
196 // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
197 let mut i = 0;
198 while i < self.len() {
199 let byte = &mut self[i];
200 byte.make_ascii_lowercase();
201 i += 1;
202 }
203 }
204
205 /// Returns an iterator that produces an escaped version of this slice,
206 /// treating it as an ASCII string.
207 ///
208 /// # Examples
209 ///
210 /// ```
211 /// let s = b"0\t\r\n'\"\\\x9d";
212 /// let escaped = s.escape_ascii().to_string();
213 /// assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
214 /// ```
215 #[must_use = "this returns the escaped bytes as an iterator, \
216 without modifying the original"]
217 #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
218 pub fn escape_ascii(&self) -> EscapeAscii<'_> {
219 EscapeAscii { inner: self.iter().flat_map(EscapeByte) }
220 }
221
222 /// Returns a byte slice with leading ASCII whitespace bytes removed.
223 ///
224 /// 'Whitespace' refers to the definition used by
225 /// [`u8::is_ascii_whitespace`].
226 ///
227 /// # Examples
228 ///
229 /// ```
230 /// assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
231 /// assert_eq!(b" ".trim_ascii_start(), b"");
232 /// assert_eq!(b"".trim_ascii_start(), b"");
233 /// ```
234 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
235 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
236 #[inline]
237 pub const fn trim_ascii_start(&self) -> &[u8] {
238 let mut bytes = self;
239 // Note: A pattern matching based approach (instead of indexing) allows
240 // making the function const.
241 while let [first, rest @ ..] = bytes {
242 if first.is_ascii_whitespace() {
243 bytes = rest;
244 } else {
245 break;
246 }
247 }
248 bytes
249 }
250
251 /// Returns a byte slice with trailing ASCII whitespace bytes removed.
252 ///
253 /// 'Whitespace' refers to the definition used by
254 /// [`u8::is_ascii_whitespace`].
255 ///
256 /// # Examples
257 ///
258 /// ```
259 /// assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
260 /// assert_eq!(b" ".trim_ascii_end(), b"");
261 /// assert_eq!(b"".trim_ascii_end(), b"");
262 /// ```
263 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
264 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
265 #[inline]
266 pub const fn trim_ascii_end(&self) -> &[u8] {
267 let mut bytes = self;
268 // Note: A pattern matching based approach (instead of indexing) allows
269 // making the function const.
270 while let [rest @ .., last] = bytes {
271 if last.is_ascii_whitespace() {
272 bytes = rest;
273 } else {
274 break;
275 }
276 }
277 bytes
278 }
279
280 /// Returns a byte slice with leading and trailing ASCII whitespace bytes
281 /// removed.
282 ///
283 /// 'Whitespace' refers to the definition used by
284 /// [`u8::is_ascii_whitespace`].
285 ///
286 /// # Examples
287 ///
288 /// ```
289 /// assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
290 /// assert_eq!(b" ".trim_ascii(), b"");
291 /// assert_eq!(b"".trim_ascii(), b"");
292 /// ```
293 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
294 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
295 #[inline]
296 pub const fn trim_ascii(&self) -> &[u8] {
297 self.trim_ascii_start().trim_ascii_end()
298 }
299}
300
301impl_fn_for_zst! {
302 #[derive(Clone)]
303 struct EscapeByte impl Fn = |byte: &u8| -> ascii::EscapeDefault {
304 ascii::escape_default(*byte)
305 };
306}
307
308/// An iterator over the escaped version of a byte slice.
309///
310/// This `struct` is created by the [`slice::escape_ascii`] method. See its
311/// documentation for more information.
312#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
313#[derive(Clone)]
314#[must_use = "iterators are lazy and do nothing unless consumed"]
315pub struct EscapeAscii<'a> {
316 inner: iter::FlatMap<super::Iter<'a, u8>, ascii::EscapeDefault, EscapeByte>,
317}
318
319#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
320impl<'a> iter::Iterator for EscapeAscii<'a> {
321 type Item = u8;
322 #[inline]
323 fn next(&mut self) -> Option<u8> {
324 self.inner.next()
325 }
326 #[inline]
327 fn size_hint(&self) -> (usize, Option<usize>) {
328 self.inner.size_hint()
329 }
330 #[inline]
331 fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
332 where
333 Fold: FnMut(Acc, Self::Item) -> R,
334 R: ops::Try<Output = Acc>,
335 {
336 self.inner.try_fold(init, fold)
337 }
338 #[inline]
339 fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
340 where
341 Fold: FnMut(Acc, Self::Item) -> Acc,
342 {
343 self.inner.fold(init, fold)
344 }
345 #[inline]
346 fn last(mut self) -> Option<u8> {
347 self.next_back()
348 }
349}
350
351#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
352impl<'a> iter::DoubleEndedIterator for EscapeAscii<'a> {
353 fn next_back(&mut self) -> Option<u8> {
354 self.inner.next_back()
355 }
356}
357#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
358impl<'a> iter::FusedIterator for EscapeAscii<'a> {}
359#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
360impl<'a> fmt::Display for EscapeAscii<'a> {
361 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
362 // disassemble iterator, including front/back parts of flatmap in case it has been partially consumed
363 let (front, slice, back) = self.clone().inner.into_parts();
364 let front = front.unwrap_or(EscapeDefault::empty());
365 let mut bytes = slice.unwrap_or_default().as_slice();
366 let back = back.unwrap_or(EscapeDefault::empty());
367
368 // usually empty, so the formatter won't have to do any work
369 for byte in front {
370 f.write_char(byte as char)?;
371 }
372
373 fn needs_escape(b: u8) -> bool {
374 b > 0x7E || b < 0x20 || b == b'\\' || b == b'\'' || b == b'"'
375 }
376
377 while bytes.len() > 0 {
378 // fast path for the printable, non-escaped subset of ascii
379 let prefix = bytes.iter().take_while(|&&b| !needs_escape(b)).count();
380 // SAFETY: prefix length was derived by counting bytes in the same splice, so it's in-bounds
381 let (prefix, remainder) = unsafe { bytes.split_at_unchecked(prefix) };
382 // SAFETY: prefix is a valid utf8 sequence, as it's a subset of ASCII
383 let prefix = unsafe { crate::str::from_utf8_unchecked(prefix) };
384
385 f.write_str(prefix)?; // the fast part
386
387 bytes = remainder;
388
389 if let Some(&b) = bytes.first() {
390 // guaranteed to be non-empty, better to write it as a str
391 fmt::Display::fmt(&ascii::escape_default(b), f)?;
392 bytes = &bytes[1..];
393 }
394 }
395
396 // also usually empty
397 for byte in back {
398 f.write_char(byte as char)?;
399 }
400 Ok(())
401 }
402}
403#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
404impl<'a> fmt::Debug for EscapeAscii<'a> {
405 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
406 f.debug_struct("EscapeAscii").finish_non_exhaustive()
407 }
408}
409
410/// ASCII test *without* the chunk-at-a-time optimizations.
411///
412/// This is carefully structured to produce nice small code -- it's smaller in
413/// `-O` than what the "obvious" ways produces under `-C opt-level=s`. If you
414/// touch it, be sure to run (and update if needed) the assembly test.
415#[unstable(feature = "str_internals", issue = "none")]
416#[doc(hidden)]
417#[inline]
418pub const fn is_ascii_simple(mut bytes: &[u8]) -> bool {
419 while let [rest @ .., last] = bytes {
420 if !last.is_ascii() {
421 break;
422 }
423 bytes = rest;
424 }
425 bytes.is_empty()
426}
427
428/// Optimized ASCII test that will use usize-at-a-time operations instead of
429/// byte-at-a-time operations (when possible).
430///
431/// The algorithm we use here is pretty simple. If `s` is too short, we just
432/// check each byte and be done with it. Otherwise:
433///
434/// - Read the first word with an unaligned load.
435/// - Align the pointer, read subsequent words until end with aligned loads.
436/// - Read the last `usize` from `s` with an unaligned load.
437///
438/// If any of these loads produces something for which `contains_nonascii`
439/// (above) returns true, then we know the answer is false.
440#[cfg(not(any(
441 all(target_arch = "x86_64", target_feature = "sse2"),
442 all(target_arch = "loongarch64", target_feature = "lsx")
443)))]
444#[inline]
445#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
446const fn is_ascii(s: &[u8]) -> bool {
447 // The runtime version behaves the same as the compiletime version, it's
448 // just more optimized.
449 const_eval_select!(
450 @capture { s: &[u8] } -> bool:
451 if const {
452 is_ascii_simple(s)
453 } else {
454 /// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
455 /// from `../str/mod.rs`, which does something similar for utf8 validation.
456 const fn contains_nonascii(v: usize) -> bool {
457 const NONASCII_MASK: usize = usize::repeat_u8(0x80);
458 (NONASCII_MASK & v) != 0
459 }
460
461 const USIZE_SIZE: usize = size_of::<usize>();
462
463 let len = s.len();
464 let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
465
466 // If we wouldn't gain anything from the word-at-a-time implementation, fall
467 // back to a scalar loop.
468 //
469 // We also do this for architectures where `size_of::<usize>()` isn't
470 // sufficient alignment for `usize`, because it's a weird edge case.
471 if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < align_of::<usize>() {
472 return is_ascii_simple(s);
473 }
474
475 // We always read the first word unaligned, which means `align_offset` is
476 // 0, we'd read the same value again for the aligned read.
477 let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
478
479 let start = s.as_ptr();
480 // SAFETY: We verify `len < USIZE_SIZE` above.
481 let first_word = unsafe { (start as *const usize).read_unaligned() };
482
483 if contains_nonascii(first_word) {
484 return false;
485 }
486 // We checked this above, somewhat implicitly. Note that `offset_to_aligned`
487 // is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
488 // above.
489 debug_assert!(offset_to_aligned <= len);
490
491 // SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
492 // middle chunk of the slice.
493 let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
494
495 // `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
496 let mut byte_pos = offset_to_aligned;
497
498 // Paranoia check about alignment, since we're about to do a bunch of
499 // unaligned loads. In practice this should be impossible barring a bug in
500 // `align_offset` though.
501 // While this method is allowed to spuriously fail in CTFE, if it doesn't
502 // have alignment information it should have given a `usize::MAX` for
503 // `align_offset` earlier, sending things through the scalar path instead of
504 // this one, so this check should pass if it's reachable.
505 debug_assert!(word_ptr.is_aligned_to(align_of::<usize>()));
506
507 // Read subsequent words until the last aligned word, excluding the last
508 // aligned word by itself to be done in tail check later, to ensure that
509 // tail is always one `usize` at most to extra branch `byte_pos == len`.
510 while byte_pos < len - USIZE_SIZE {
511 // Sanity check that the read is in bounds
512 debug_assert!(byte_pos + USIZE_SIZE <= len);
513 // And that our assumptions about `byte_pos` hold.
514 debug_assert!(word_ptr.cast::<u8>() == start.wrapping_add(byte_pos));
515
516 // SAFETY: We know `word_ptr` is properly aligned (because of
517 // `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
518 let word = unsafe { word_ptr.read() };
519 if contains_nonascii(word) {
520 return false;
521 }
522
523 byte_pos += USIZE_SIZE;
524 // SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
525 // after this `add`, `word_ptr` will be at most one-past-the-end.
526 word_ptr = unsafe { word_ptr.add(1) };
527 }
528
529 // Sanity check to ensure there really is only one `usize` left. This should
530 // be guaranteed by our loop condition.
531 debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
532
533 // SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
534 let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
535
536 !contains_nonascii(last_word)
537 }
538 )
539}
540
541/// Chunk size for SSE2 vectorized ASCII checking (4x 16-byte loads).
542#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
543const SSE2_CHUNK_SIZE: usize = 64;
544
545#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
546#[inline]
547fn is_ascii_sse2(bytes: &[u8]) -> bool {
548 use crate::arch::x86_64::{__m128i, _mm_loadu_si128, _mm_movemask_epi8, _mm_or_si128};
549
550 let (chunks, rest) = bytes.as_chunks::<SSE2_CHUNK_SIZE>();
551
552 for chunk in chunks {
553 let ptr = chunk.as_ptr();
554 // SAFETY: chunk is 64 bytes. SSE2 is baseline on x86_64.
555 let mask = unsafe {
556 let a1 = _mm_loadu_si128(ptr as *const __m128i);
557 let a2 = _mm_loadu_si128(ptr.add(16) as *const __m128i);
558 let b1 = _mm_loadu_si128(ptr.add(32) as *const __m128i);
559 let b2 = _mm_loadu_si128(ptr.add(48) as *const __m128i);
560 // OR all chunks - if any byte has high bit set, combined will too.
561 let combined = _mm_or_si128(_mm_or_si128(a1, a2), _mm_or_si128(b1, b2));
562 // Create a mask from the MSBs of each byte.
563 // If any byte is >= 128, its MSB is 1, so the mask will be non-zero.
564 _mm_movemask_epi8(combined)
565 };
566 if mask != 0 {
567 return false;
568 }
569 }
570
571 // Handle remaining bytes
572 rest.iter().all(|b| b.is_ascii())
573}
574
575/// ASCII test optimized to use the `pmovmskb` instruction on `x86-64`.
576///
577/// Uses explicit SSE2 intrinsics to prevent LLVM from auto-vectorizing with
578/// broken AVX-512 code that extracts mask bits one-by-one.
579#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
580#[inline]
581#[rustc_allow_const_fn_unstable(const_eval_select)]
582const fn is_ascii(bytes: &[u8]) -> bool {
583 const USIZE_SIZE: usize = size_of::<usize>();
584 const NONASCII_MASK: usize = usize::MAX / 255 * 0x80;
585
586 const_eval_select!(
587 @capture { bytes: &[u8] } -> bool:
588 if const {
589 is_ascii_simple(bytes)
590 } else {
591 // For small inputs, use usize-at-a-time processing to avoid SSE2 call overhead.
592 if bytes.len() < SSE2_CHUNK_SIZE {
593 let chunks = bytes.chunks_exact(USIZE_SIZE);
594 let remainder = chunks.remainder();
595 for chunk in chunks {
596 let word = usize::from_ne_bytes(chunk.try_into().unwrap());
597 if (word & NONASCII_MASK) != 0 {
598 return false;
599 }
600 }
601 return remainder.iter().all(|b| b.is_ascii());
602 }
603
604 is_ascii_sse2(bytes)
605 }
606 )
607}
608
609/// ASCII test optimized to use the `vmskltz.b` instruction on `loongarch64`.
610///
611/// Other platforms are not likely to benefit from this code structure, so they
612/// use SWAR techniques to test for ASCII in `usize`-sized chunks.
613#[cfg(all(target_arch = "loongarch64", target_feature = "lsx"))]
614#[inline]
615const fn is_ascii(bytes: &[u8]) -> bool {
616 // Process chunks of 32 bytes at a time in the fast path to enable
617 // auto-vectorization and use of `vmskltz.b`. Two 128-bit vector registers
618 // can be OR'd together and then the resulting vector can be tested for
619 // non-ASCII bytes.
620 const CHUNK_SIZE: usize = 32;
621
622 let mut i = 0;
623
624 while i + CHUNK_SIZE <= bytes.len() {
625 let chunk_end = i + CHUNK_SIZE;
626
627 // Get LLVM to produce a `vmskltz.b` instruction on loongarch64 which
628 // creates a mask from the most significant bit of each byte.
629 // ASCII bytes are less than 128 (0x80), so their most significant
630 // bit is unset.
631 let mut count = 0;
632 while i < chunk_end {
633 count += bytes[i].is_ascii() as u8;
634 i += 1;
635 }
636
637 // All bytes should be <= 127 so count is equal to chunk size.
638 if count != CHUNK_SIZE as u8 {
639 return false;
640 }
641 }
642
643 // Process the remaining `bytes.len() % N` bytes.
644 let mut is_ascii = true;
645 while i < bytes.len() {
646 is_ascii &= bytes[i].is_ascii();
647 i += 1;
648 }
649
650 is_ascii
651}