1#![allow(non_camel_case_types)]
7#![allow(unused_imports)]
8
9use crate::{core_arch::simd, intrinsics::simd::*, marker::Sized, mem, ptr};
10
11#[cfg(test)]
12use stdarch_test::assert_instr;
13
14types! {
15 #![stable(feature = "wasm_simd", since = "1.54.0")]
16
17 pub struct v128(4 x i32);
39}
40
41macro_rules! conversions {
42 ($(($name:ident = $ty:ty))*) => {
43 impl v128 {
44 $(
45 #[inline(always)]
46 pub(crate) fn $name(self) -> $ty {
47 unsafe { mem::transmute(self) }
48 }
49 )*
50 }
51 $(
52 impl $ty {
53 #[inline(always)]
54 pub(crate) const fn v128(self) -> v128 {
55 unsafe { mem::transmute(self) }
56 }
57 }
58 )*
59 }
60}
61
62conversions! {
63 (as_u8x16 = simd::u8x16)
64 (as_u16x8 = simd::u16x8)
65 (as_u32x4 = simd::u32x4)
66 (as_u64x2 = simd::u64x2)
67 (as_i8x16 = simd::i8x16)
68 (as_i16x8 = simd::i16x8)
69 (as_i32x4 = simd::i32x4)
70 (as_i64x2 = simd::i64x2)
71 (as_f32x4 = simd::f32x4)
72 (as_f64x2 = simd::f64x2)
73}
74
75#[allow(improper_ctypes)]
76unsafe extern "unadjusted" {
77 #[link_name = "llvm.wasm.swizzle"]
78 fn llvm_swizzle(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
79
80 #[link_name = "llvm.wasm.bitselect.v16i8"]
81 fn llvm_bitselect(a: simd::i8x16, b: simd::i8x16, c: simd::i8x16) -> simd::i8x16;
82 #[link_name = "llvm.wasm.anytrue.v16i8"]
83 fn llvm_any_true_i8x16(x: simd::i8x16) -> i32;
84
85 #[link_name = "llvm.wasm.alltrue.v16i8"]
86 fn llvm_i8x16_all_true(x: simd::i8x16) -> i32;
87 #[link_name = "llvm.wasm.bitmask.v16i8"]
88 fn llvm_bitmask_i8x16(a: simd::i8x16) -> i32;
89 #[link_name = "llvm.wasm.avgr.unsigned.v16i8"]
90 fn llvm_avgr_u_i8x16(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
91
92 #[link_name = "llvm.wasm.extadd.pairwise.signed.v8i16"]
93 fn llvm_i16x8_extadd_pairwise_i8x16_s(x: simd::i8x16) -> simd::i16x8;
94 #[link_name = "llvm.wasm.extadd.pairwise.unsigned.v8i16"]
95 fn llvm_i16x8_extadd_pairwise_i8x16_u(x: simd::i8x16) -> simd::i16x8;
96 #[link_name = "llvm.wasm.q15mulr.sat.signed"]
97 fn llvm_q15mulr(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
98 #[link_name = "llvm.wasm.alltrue.v8i16"]
99 fn llvm_i16x8_all_true(x: simd::i16x8) -> i32;
100 #[link_name = "llvm.wasm.bitmask.v8i16"]
101 fn llvm_bitmask_i16x8(a: simd::i16x8) -> i32;
102 #[link_name = "llvm.wasm.avgr.unsigned.v8i16"]
103 fn llvm_avgr_u_i16x8(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
104
105 #[link_name = "llvm.wasm.extadd.pairwise.signed.v4i32"]
106 fn llvm_i32x4_extadd_pairwise_i16x8_s(x: simd::i16x8) -> simd::i32x4;
107 #[link_name = "llvm.wasm.extadd.pairwise.unsigned.v4i32"]
108 fn llvm_i32x4_extadd_pairwise_i16x8_u(x: simd::i16x8) -> simd::i32x4;
109 #[link_name = "llvm.wasm.alltrue.v4i32"]
110 fn llvm_i32x4_all_true(x: simd::i32x4) -> i32;
111 #[link_name = "llvm.wasm.bitmask.v4i32"]
112 fn llvm_bitmask_i32x4(a: simd::i32x4) -> i32;
113 #[link_name = "llvm.wasm.dot"]
114 fn llvm_i32x4_dot_i16x8_s(a: simd::i16x8, b: simd::i16x8) -> simd::i32x4;
115
116 #[link_name = "llvm.wasm.alltrue.v2i64"]
117 fn llvm_i64x2_all_true(x: simd::i64x2) -> i32;
118 #[link_name = "llvm.wasm.bitmask.v2i64"]
119 fn llvm_bitmask_i64x2(a: simd::i64x2) -> i32;
120
121 #[link_name = "llvm.nearbyint.v4f32"]
122 fn llvm_f32x4_nearest(x: simd::f32x4) -> simd::f32x4;
123 #[link_name = "llvm.minimum.v4f32"]
124 fn llvm_f32x4_min(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4;
125 #[link_name = "llvm.maximum.v4f32"]
126 fn llvm_f32x4_max(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4;
127
128 #[link_name = "llvm.nearbyint.v2f64"]
129 fn llvm_f64x2_nearest(x: simd::f64x2) -> simd::f64x2;
130 #[link_name = "llvm.minimum.v2f64"]
131 fn llvm_f64x2_min(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2;
132 #[link_name = "llvm.maximum.v2f64"]
133 fn llvm_f64x2_max(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2;
134}
135
136#[inline]
159#[cfg_attr(test, assert_instr(v128.load))]
160#[target_feature(enable = "simd128")]
161#[doc(alias("v128.load"))]
162#[stable(feature = "wasm_simd", since = "1.54.0")]
163pub unsafe fn v128_load(m: *const v128) -> v128 {
164 m.read_unaligned()
165}
166
167#[inline]
176#[cfg_attr(test, assert_instr(v128.load8x8_s))]
177#[target_feature(enable = "simd128")]
178#[doc(alias("v128.load8x8_s"))]
179#[stable(feature = "wasm_simd", since = "1.54.0")]
180pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 {
181 let m = m.cast::<simd::i8x8>().read_unaligned();
182 simd_cast::<_, simd::i16x8>(m).v128()
183}
184
185#[inline]
194#[cfg_attr(test, assert_instr(v128.load8x8_u))]
195#[target_feature(enable = "simd128")]
196#[doc(alias("v128.load8x8_u"))]
197#[stable(feature = "wasm_simd", since = "1.54.0")]
198pub unsafe fn i16x8_load_extend_u8x8(m: *const u8) -> v128 {
199 let m = m.cast::<simd::u8x8>().read_unaligned();
200 simd_cast::<_, simd::u16x8>(m).v128()
201}
202
203#[stable(feature = "wasm_simd", since = "1.54.0")]
204pub use i16x8_load_extend_u8x8 as u16x8_load_extend_u8x8;
205
206#[inline]
215#[cfg_attr(test, assert_instr(v128.load16x4_s))]
216#[target_feature(enable = "simd128")]
217#[doc(alias("v128.load16x4_s"))]
218#[stable(feature = "wasm_simd", since = "1.54.0")]
219pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 {
220 let m = m.cast::<simd::i16x4>().read_unaligned();
221 simd_cast::<_, simd::i32x4>(m).v128()
222}
223
224#[inline]
233#[cfg_attr(test, assert_instr(v128.load16x4_u))]
234#[target_feature(enable = "simd128")]
235#[doc(alias("v128.load16x4_u"))]
236#[stable(feature = "wasm_simd", since = "1.54.0")]
237pub unsafe fn i32x4_load_extend_u16x4(m: *const u16) -> v128 {
238 let m = m.cast::<simd::u16x4>().read_unaligned();
239 simd_cast::<_, simd::u32x4>(m).v128()
240}
241
242#[stable(feature = "wasm_simd", since = "1.54.0")]
243pub use i32x4_load_extend_u16x4 as u32x4_load_extend_u16x4;
244
245#[inline]
254#[cfg_attr(test, assert_instr(v128.load32x2_s))]
255#[target_feature(enable = "simd128")]
256#[doc(alias("v128.load32x2_s"))]
257#[stable(feature = "wasm_simd", since = "1.54.0")]
258pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 {
259 let m = m.cast::<simd::i32x2>().read_unaligned();
260 simd_cast::<_, simd::i64x2>(m).v128()
261}
262
263#[inline]
272#[cfg_attr(test, assert_instr(v128.load32x2_u))]
273#[target_feature(enable = "simd128")]
274#[doc(alias("v128.load32x2_u"))]
275#[stable(feature = "wasm_simd", since = "1.54.0")]
276pub unsafe fn i64x2_load_extend_u32x2(m: *const u32) -> v128 {
277 let m = m.cast::<simd::u32x2>().read_unaligned();
278 simd_cast::<_, simd::u64x2>(m).v128()
279}
280
281#[stable(feature = "wasm_simd", since = "1.54.0")]
282pub use i64x2_load_extend_u32x2 as u64x2_load_extend_u32x2;
283
284#[inline]
297#[cfg_attr(test, assert_instr(v128.load8_splat))]
298#[target_feature(enable = "simd128")]
299#[doc(alias("v128.load8_splat"))]
300#[stable(feature = "wasm_simd", since = "1.54.0")]
301pub unsafe fn v128_load8_splat(m: *const u8) -> v128 {
302 u8x16_splat(*m)
303}
304
305#[inline]
318#[cfg_attr(test, assert_instr(v128.load16_splat))]
319#[target_feature(enable = "simd128")]
320#[doc(alias("v128.load16_splat"))]
321#[stable(feature = "wasm_simd", since = "1.54.0")]
322pub unsafe fn v128_load16_splat(m: *const u16) -> v128 {
323 u16x8_splat(ptr::read_unaligned(m))
324}
325
326#[inline]
339#[cfg_attr(test, assert_instr(v128.load32_splat))]
340#[target_feature(enable = "simd128")]
341#[doc(alias("v128.load32_splat"))]
342#[stable(feature = "wasm_simd", since = "1.54.0")]
343pub unsafe fn v128_load32_splat(m: *const u32) -> v128 {
344 u32x4_splat(ptr::read_unaligned(m))
345}
346
347#[inline]
360#[cfg_attr(test, assert_instr(v128.load64_splat))]
361#[target_feature(enable = "simd128")]
362#[doc(alias("v128.load64_splat"))]
363#[stable(feature = "wasm_simd", since = "1.54.0")]
364pub unsafe fn v128_load64_splat(m: *const u64) -> v128 {
365 u64x2_splat(ptr::read_unaligned(m))
366}
367
368#[inline]
381#[cfg_attr(test, assert_instr(v128.load32_zero))]
382#[target_feature(enable = "simd128")]
383#[doc(alias("v128.load32_zero"))]
384#[stable(feature = "wasm_simd", since = "1.54.0")]
385pub unsafe fn v128_load32_zero(m: *const u32) -> v128 {
386 u32x4(ptr::read_unaligned(m), 0, 0, 0)
387}
388
389#[inline]
402#[cfg_attr(test, assert_instr(v128.load64_zero))]
403#[target_feature(enable = "simd128")]
404#[doc(alias("v128.load64_zero"))]
405#[stable(feature = "wasm_simd", since = "1.54.0")]
406pub unsafe fn v128_load64_zero(m: *const u64) -> v128 {
407 u64x2_replace_lane::<0>(u64x2(0, 0), ptr::read_unaligned(m))
408}
409
410#[inline]
433#[cfg_attr(test, assert_instr(v128.store))]
434#[target_feature(enable = "simd128")]
435#[doc(alias("v128.store"))]
436#[stable(feature = "wasm_simd", since = "1.54.0")]
437pub unsafe fn v128_store(m: *mut v128, a: v128) {
438 m.write_unaligned(a)
439}
440
441#[inline]
453#[cfg_attr(test, assert_instr(v128.load8_lane, L = 0))]
454#[target_feature(enable = "simd128")]
455#[doc(alias("v128.load8_lane"))]
456#[stable(feature = "wasm_simd", since = "1.54.0")]
457pub unsafe fn v128_load8_lane<const L: usize>(v: v128, m: *const u8) -> v128 {
458 u8x16_replace_lane::<L>(v, *m)
459}
460
461#[inline]
473#[cfg_attr(test, assert_instr(v128.load16_lane, L = 0))]
474#[target_feature(enable = "simd128")]
475#[doc(alias("v128.load16_lane"))]
476#[stable(feature = "wasm_simd", since = "1.54.0")]
477pub unsafe fn v128_load16_lane<const L: usize>(v: v128, m: *const u16) -> v128 {
478 u16x8_replace_lane::<L>(v, ptr::read_unaligned(m))
479}
480
481#[inline]
493#[cfg_attr(test, assert_instr(v128.load32_lane, L = 0))]
494#[target_feature(enable = "simd128")]
495#[doc(alias("v128.load32_lane"))]
496#[stable(feature = "wasm_simd", since = "1.54.0")]
497pub unsafe fn v128_load32_lane<const L: usize>(v: v128, m: *const u32) -> v128 {
498 u32x4_replace_lane::<L>(v, ptr::read_unaligned(m))
499}
500
501#[inline]
513#[cfg_attr(test, assert_instr(v128.load64_lane, L = 0))]
514#[target_feature(enable = "simd128")]
515#[doc(alias("v128.load64_lane"))]
516#[stable(feature = "wasm_simd", since = "1.54.0")]
517pub unsafe fn v128_load64_lane<const L: usize>(v: v128, m: *const u64) -> v128 {
518 u64x2_replace_lane::<L>(v, ptr::read_unaligned(m))
519}
520
521#[inline]
533#[cfg_attr(test, assert_instr(v128.store8_lane, L = 0))]
534#[target_feature(enable = "simd128")]
535#[doc(alias("v128.store8_lane"))]
536#[stable(feature = "wasm_simd", since = "1.54.0")]
537pub unsafe fn v128_store8_lane<const L: usize>(v: v128, m: *mut u8) {
538 *m = u8x16_extract_lane::<L>(v);
539}
540
541#[inline]
553#[cfg_attr(test, assert_instr(v128.store16_lane, L = 0))]
554#[target_feature(enable = "simd128")]
555#[doc(alias("v128.store16_lane"))]
556#[stable(feature = "wasm_simd", since = "1.54.0")]
557pub unsafe fn v128_store16_lane<const L: usize>(v: v128, m: *mut u16) {
558 ptr::write_unaligned(m, u16x8_extract_lane::<L>(v))
559}
560
561#[inline]
573#[cfg_attr(test, assert_instr(v128.store32_lane, L = 0))]
574#[target_feature(enable = "simd128")]
575#[doc(alias("v128.store32_lane"))]
576#[stable(feature = "wasm_simd", since = "1.54.0")]
577pub unsafe fn v128_store32_lane<const L: usize>(v: v128, m: *mut u32) {
578 ptr::write_unaligned(m, u32x4_extract_lane::<L>(v))
579}
580
581#[inline]
593#[cfg_attr(test, assert_instr(v128.store64_lane, L = 0))]
594#[target_feature(enable = "simd128")]
595#[doc(alias("v128.store64_lane"))]
596#[stable(feature = "wasm_simd", since = "1.54.0")]
597pub unsafe fn v128_store64_lane<const L: usize>(v: v128, m: *mut u64) {
598 ptr::write_unaligned(m, u64x2_extract_lane::<L>(v))
599}
600
601#[inline]
606#[cfg_attr(
607 test,
608 assert_instr(
609 v128.const,
610 a0 = 0,
611 a1 = 1,
612 a2 = 2,
613 a3 = 3,
614 a4 = 4,
615 a5 = 5,
616 a6 = 6,
617 a7 = 7,
618 a8 = 8,
619 a9 = 9,
620 a10 = 10,
621 a11 = 11,
622 a12 = 12,
623 a13 = 13,
624 a14 = 14,
625 a15 = 15,
626 )
627)]
628#[doc(alias("v128.const"))]
629#[stable(feature = "wasm_simd", since = "1.54.0")]
630#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
631#[target_feature(enable = "simd128")]
632pub const fn i8x16(
633 a0: i8,
634 a1: i8,
635 a2: i8,
636 a3: i8,
637 a4: i8,
638 a5: i8,
639 a6: i8,
640 a7: i8,
641 a8: i8,
642 a9: i8,
643 a10: i8,
644 a11: i8,
645 a12: i8,
646 a13: i8,
647 a14: i8,
648 a15: i8,
649) -> v128 {
650 simd::i8x16::new(
651 a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
652 )
653 .v128()
654}
655
656#[inline]
661#[doc(alias("v128.const"))]
662#[stable(feature = "wasm_simd", since = "1.54.0")]
663#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
664#[target_feature(enable = "simd128")]
665pub const fn u8x16(
666 a0: u8,
667 a1: u8,
668 a2: u8,
669 a3: u8,
670 a4: u8,
671 a5: u8,
672 a6: u8,
673 a7: u8,
674 a8: u8,
675 a9: u8,
676 a10: u8,
677 a11: u8,
678 a12: u8,
679 a13: u8,
680 a14: u8,
681 a15: u8,
682) -> v128 {
683 simd::u8x16::new(
684 a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
685 )
686 .v128()
687}
688
689#[inline]
694#[cfg_attr(
695 test,
696 assert_instr(
697 v128.const,
698 a0 = 0,
699 a1 = 1,
700 a2 = 2,
701 a3 = 3,
702 a4 = 4,
703 a5 = 5,
704 a6 = 6,
705 a7 = 7,
706 )
707)]
708#[doc(alias("v128.const"))]
709#[stable(feature = "wasm_simd", since = "1.54.0")]
710#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
711#[target_feature(enable = "simd128")]
712pub const fn i16x8(a0: i16, a1: i16, a2: i16, a3: i16, a4: i16, a5: i16, a6: i16, a7: i16) -> v128 {
713 simd::i16x8::new(a0, a1, a2, a3, a4, a5, a6, a7).v128()
714}
715
716#[inline]
721#[doc(alias("v128.const"))]
722#[stable(feature = "wasm_simd", since = "1.54.0")]
723#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
724#[target_feature(enable = "simd128")]
725pub const fn u16x8(a0: u16, a1: u16, a2: u16, a3: u16, a4: u16, a5: u16, a6: u16, a7: u16) -> v128 {
726 simd::u16x8::new(a0, a1, a2, a3, a4, a5, a6, a7).v128()
727}
728
729#[inline]
734#[cfg_attr(test, assert_instr(v128.const, a0 = 0, a1 = 1, a2 = 2, a3 = 3))]
735#[doc(alias("v128.const"))]
736#[stable(feature = "wasm_simd", since = "1.54.0")]
737#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
738#[target_feature(enable = "simd128")]
739pub const fn i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128 {
740 simd::i32x4::new(a0, a1, a2, a3).v128()
741}
742
743#[inline]
748#[doc(alias("v128.const"))]
749#[stable(feature = "wasm_simd", since = "1.54.0")]
750#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
751#[target_feature(enable = "simd128")]
752pub const fn u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128 {
753 simd::u32x4::new(a0, a1, a2, a3).v128()
754}
755
756#[inline]
761#[cfg_attr(test, assert_instr(v128.const, a0 = 1, a1 = 2))]
762#[doc(alias("v128.const"))]
763#[stable(feature = "wasm_simd", since = "1.54.0")]
764#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
765#[target_feature(enable = "simd128")]
766pub const fn i64x2(a0: i64, a1: i64) -> v128 {
767 simd::i64x2::new(a0, a1).v128()
768}
769
770#[inline]
775#[doc(alias("v128.const"))]
776#[stable(feature = "wasm_simd", since = "1.54.0")]
777#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
778#[target_feature(enable = "simd128")]
779pub const fn u64x2(a0: u64, a1: u64) -> v128 {
780 simd::u64x2::new(a0, a1).v128()
781}
782
783#[inline]
788#[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0, a2 = 2.0, a3 = 3.0))]
789#[doc(alias("v128.const"))]
790#[stable(feature = "wasm_simd", since = "1.54.0")]
791#[rustc_const_stable(feature = "wasm_simd_const", since = "1.56.0")]
792#[target_feature(enable = "simd128")]
793pub const fn f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128 {
794 simd::f32x4::new(a0, a1, a2, a3).v128()
795}
796
797#[inline]
802#[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0))]
803#[doc(alias("v128.const"))]
804#[stable(feature = "wasm_simd", since = "1.54.0")]
805#[rustc_const_stable(feature = "wasm_simd_const", since = "1.56.0")]
806#[target_feature(enable = "simd128")]
807pub const fn f64x2(a0: f64, a1: f64) -> v128 {
808 simd::f64x2::new(a0, a1).v128()
809}
810
811#[inline]
826#[cfg_attr(test,
827 assert_instr(
828 i8x16.shuffle,
829 I0 = 0,
830 I1 = 2,
831 I2 = 4,
832 I3 = 6,
833 I4 = 8,
834 I5 = 10,
835 I6 = 12,
836 I7 = 14,
837 I8 = 16,
838 I9 = 18,
839 I10 = 20,
840 I11 = 22,
841 I12 = 24,
842 I13 = 26,
843 I14 = 28,
844 I15 = 30,
845 )
846)]
847#[target_feature(enable = "simd128")]
848#[doc(alias("i8x16.shuffle"))]
849#[stable(feature = "wasm_simd", since = "1.54.0")]
850pub fn i8x16_shuffle<
851 const I0: usize,
852 const I1: usize,
853 const I2: usize,
854 const I3: usize,
855 const I4: usize,
856 const I5: usize,
857 const I6: usize,
858 const I7: usize,
859 const I8: usize,
860 const I9: usize,
861 const I10: usize,
862 const I11: usize,
863 const I12: usize,
864 const I13: usize,
865 const I14: usize,
866 const I15: usize,
867>(
868 a: v128,
869 b: v128,
870) -> v128 {
871 static_assert!(I0 < 32);
872 static_assert!(I1 < 32);
873 static_assert!(I2 < 32);
874 static_assert!(I3 < 32);
875 static_assert!(I4 < 32);
876 static_assert!(I5 < 32);
877 static_assert!(I6 < 32);
878 static_assert!(I7 < 32);
879 static_assert!(I8 < 32);
880 static_assert!(I9 < 32);
881 static_assert!(I10 < 32);
882 static_assert!(I11 < 32);
883 static_assert!(I12 < 32);
884 static_assert!(I13 < 32);
885 static_assert!(I14 < 32);
886 static_assert!(I15 < 32);
887 let shuf: simd::u8x16 = unsafe {
888 simd_shuffle!(
889 a.as_u8x16(),
890 b.as_u8x16(),
891 [
892 I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32,
893 I7 as u32, I8 as u32, I9 as u32, I10 as u32, I11 as u32, I12 as u32, I13 as u32,
894 I14 as u32, I15 as u32,
895 ],
896 )
897 };
898 shuf.v128()
899}
900
901#[stable(feature = "wasm_simd", since = "1.54.0")]
902pub use i8x16_shuffle as u8x16_shuffle;
903
904#[inline]
912#[cfg_attr(test,
913 assert_instr(
914 i8x16.shuffle,
915 I0 = 0,
916 I1 = 2,
917 I2 = 4,
918 I3 = 6,
919 I4 = 8,
920 I5 = 10,
921 I6 = 12,
922 I7 = 14,
923 )
924)]
925#[target_feature(enable = "simd128")]
926#[doc(alias("i8x16.shuffle"))]
927#[stable(feature = "wasm_simd", since = "1.54.0")]
928pub fn i16x8_shuffle<
929 const I0: usize,
930 const I1: usize,
931 const I2: usize,
932 const I3: usize,
933 const I4: usize,
934 const I5: usize,
935 const I6: usize,
936 const I7: usize,
937>(
938 a: v128,
939 b: v128,
940) -> v128 {
941 static_assert!(I0 < 16);
942 static_assert!(I1 < 16);
943 static_assert!(I2 < 16);
944 static_assert!(I3 < 16);
945 static_assert!(I4 < 16);
946 static_assert!(I5 < 16);
947 static_assert!(I6 < 16);
948 static_assert!(I7 < 16);
949 let shuf: simd::u16x8 = unsafe {
950 simd_shuffle!(
951 a.as_u16x8(),
952 b.as_u16x8(),
953 [
954 I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32,
955 I7 as u32,
956 ],
957 )
958 };
959 shuf.v128()
960}
961
962#[stable(feature = "wasm_simd", since = "1.54.0")]
963pub use i16x8_shuffle as u16x8_shuffle;
964
965#[inline]
973#[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2, I2 = 4, I3 = 6))]
974#[target_feature(enable = "simd128")]
975#[doc(alias("i8x16.shuffle"))]
976#[stable(feature = "wasm_simd", since = "1.54.0")]
977pub fn i32x4_shuffle<const I0: usize, const I1: usize, const I2: usize, const I3: usize>(
978 a: v128,
979 b: v128,
980) -> v128 {
981 static_assert!(I0 < 8);
982 static_assert!(I1 < 8);
983 static_assert!(I2 < 8);
984 static_assert!(I3 < 8);
985 let shuf: simd::u32x4 = unsafe {
986 simd_shuffle!(
987 a.as_u32x4(),
988 b.as_u32x4(),
989 [I0 as u32, I1 as u32, I2 as u32, I3 as u32],
990 )
991 };
992 shuf.v128()
993}
994
995#[stable(feature = "wasm_simd", since = "1.54.0")]
996pub use i32x4_shuffle as u32x4_shuffle;
997
998#[inline]
1006#[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2))]
1007#[target_feature(enable = "simd128")]
1008#[doc(alias("i8x16.shuffle"))]
1009#[stable(feature = "wasm_simd", since = "1.54.0")]
1010pub fn i64x2_shuffle<const I0: usize, const I1: usize>(a: v128, b: v128) -> v128 {
1011 static_assert!(I0 < 4);
1012 static_assert!(I1 < 4);
1013 let shuf: simd::u64x2 =
1014 unsafe { simd_shuffle!(a.as_u64x2(), b.as_u64x2(), [I0 as u32, I1 as u32]) };
1015 shuf.v128()
1016}
1017
1018#[stable(feature = "wasm_simd", since = "1.54.0")]
1019pub use i64x2_shuffle as u64x2_shuffle;
1020
1021#[inline]
1026#[cfg_attr(test, assert_instr(i8x16.extract_lane_s, N = 3))]
1027#[target_feature(enable = "simd128")]
1028#[doc(alias("i8x16.extract_lane_s"))]
1029#[stable(feature = "wasm_simd", since = "1.54.0")]
1030pub fn i8x16_extract_lane<const N: usize>(a: v128) -> i8 {
1031 static_assert!(N < 16);
1032 unsafe { simd_extract!(a.as_i8x16(), N as u32) }
1033}
1034
1035#[inline]
1040#[cfg_attr(test, assert_instr(i8x16.extract_lane_u, N = 3))]
1041#[target_feature(enable = "simd128")]
1042#[doc(alias("i8x16.extract_lane_u"))]
1043#[stable(feature = "wasm_simd", since = "1.54.0")]
1044pub fn u8x16_extract_lane<const N: usize>(a: v128) -> u8 {
1045 static_assert!(N < 16);
1046 unsafe { simd_extract!(a.as_u8x16(), N as u32) }
1047}
1048
1049#[inline]
1054#[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))]
1055#[target_feature(enable = "simd128")]
1056#[doc(alias("i8x16.replace_lane"))]
1057#[stable(feature = "wasm_simd", since = "1.54.0")]
1058pub fn i8x16_replace_lane<const N: usize>(a: v128, val: i8) -> v128 {
1059 static_assert!(N < 16);
1060 unsafe { simd_insert!(a.as_i8x16(), N as u32, val).v128() }
1061}
1062
1063#[inline]
1068#[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))]
1069#[target_feature(enable = "simd128")]
1070#[doc(alias("i8x16.replace_lane"))]
1071#[stable(feature = "wasm_simd", since = "1.54.0")]
1072pub fn u8x16_replace_lane<const N: usize>(a: v128, val: u8) -> v128 {
1073 static_assert!(N < 16);
1074 unsafe { simd_insert!(a.as_u8x16(), N as u32, val).v128() }
1075}
1076
1077#[inline]
1082#[cfg_attr(test, assert_instr(i16x8.extract_lane_s, N = 2))]
1083#[target_feature(enable = "simd128")]
1084#[doc(alias("i16x8.extract_lane_s"))]
1085#[stable(feature = "wasm_simd", since = "1.54.0")]
1086pub fn i16x8_extract_lane<const N: usize>(a: v128) -> i16 {
1087 static_assert!(N < 8);
1088 unsafe { simd_extract!(a.as_i16x8(), N as u32) }
1089}
1090
1091#[inline]
1096#[cfg_attr(test, assert_instr(i16x8.extract_lane_u, N = 2))]
1097#[target_feature(enable = "simd128")]
1098#[doc(alias("i16x8.extract_lane_u"))]
1099#[stable(feature = "wasm_simd", since = "1.54.0")]
1100pub fn u16x8_extract_lane<const N: usize>(a: v128) -> u16 {
1101 static_assert!(N < 8);
1102 unsafe { simd_extract!(a.as_u16x8(), N as u32) }
1103}
1104
1105#[inline]
1110#[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))]
1111#[target_feature(enable = "simd128")]
1112#[doc(alias("i16x8.replace_lane"))]
1113#[stable(feature = "wasm_simd", since = "1.54.0")]
1114pub fn i16x8_replace_lane<const N: usize>(a: v128, val: i16) -> v128 {
1115 static_assert!(N < 8);
1116 unsafe { simd_insert!(a.as_i16x8(), N as u32, val).v128() }
1117}
1118
1119#[inline]
1124#[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))]
1125#[target_feature(enable = "simd128")]
1126#[doc(alias("i16x8.replace_lane"))]
1127#[stable(feature = "wasm_simd", since = "1.54.0")]
1128pub fn u16x8_replace_lane<const N: usize>(a: v128, val: u16) -> v128 {
1129 static_assert!(N < 8);
1130 unsafe { simd_insert!(a.as_u16x8(), N as u32, val).v128() }
1131}
1132
1133#[inline]
1138#[cfg_attr(test, assert_instr(i32x4.extract_lane, N = 2))]
1139#[target_feature(enable = "simd128")]
1140#[doc(alias("i32x4.extract_lane"))]
1141#[stable(feature = "wasm_simd", since = "1.54.0")]
1142pub fn i32x4_extract_lane<const N: usize>(a: v128) -> i32 {
1143 static_assert!(N < 4);
1144 unsafe { simd_extract!(a.as_i32x4(), N as u32) }
1145}
1146
1147#[inline]
1152#[target_feature(enable = "simd128")]
1153#[doc(alias("i32x4.extract_lane"))]
1154#[stable(feature = "wasm_simd", since = "1.54.0")]
1155pub fn u32x4_extract_lane<const N: usize>(a: v128) -> u32 {
1156 i32x4_extract_lane::<N>(a) as u32
1157}
1158
1159#[inline]
1164#[cfg_attr(test, assert_instr(i32x4.replace_lane, N = 2))]
1165#[target_feature(enable = "simd128")]
1166#[doc(alias("i32x4.replace_lane"))]
1167#[stable(feature = "wasm_simd", since = "1.54.0")]
1168pub fn i32x4_replace_lane<const N: usize>(a: v128, val: i32) -> v128 {
1169 static_assert!(N < 4);
1170 unsafe { simd_insert!(a.as_i32x4(), N as u32, val).v128() }
1171}
1172
1173#[inline]
1178#[target_feature(enable = "simd128")]
1179#[doc(alias("i32x4.replace_lane"))]
1180#[stable(feature = "wasm_simd", since = "1.54.0")]
1181pub fn u32x4_replace_lane<const N: usize>(a: v128, val: u32) -> v128 {
1182 i32x4_replace_lane::<N>(a, val as i32)
1183}
1184
1185#[inline]
1190#[cfg_attr(test, assert_instr(i64x2.extract_lane, N = 1))]
1191#[target_feature(enable = "simd128")]
1192#[doc(alias("i64x2.extract_lane"))]
1193#[stable(feature = "wasm_simd", since = "1.54.0")]
1194pub fn i64x2_extract_lane<const N: usize>(a: v128) -> i64 {
1195 static_assert!(N < 2);
1196 unsafe { simd_extract!(a.as_i64x2(), N as u32) }
1197}
1198
1199#[inline]
1204#[target_feature(enable = "simd128")]
1205#[doc(alias("i64x2.extract_lane"))]
1206#[stable(feature = "wasm_simd", since = "1.54.0")]
1207pub fn u64x2_extract_lane<const N: usize>(a: v128) -> u64 {
1208 i64x2_extract_lane::<N>(a) as u64
1209}
1210
1211#[inline]
1216#[cfg_attr(test, assert_instr(i64x2.replace_lane, N = 0))]
1217#[target_feature(enable = "simd128")]
1218#[doc(alias("i64x2.replace_lane"))]
1219#[stable(feature = "wasm_simd", since = "1.54.0")]
1220pub fn i64x2_replace_lane<const N: usize>(a: v128, val: i64) -> v128 {
1221 static_assert!(N < 2);
1222 unsafe { simd_insert!(a.as_i64x2(), N as u32, val).v128() }
1223}
1224
1225#[inline]
1230#[target_feature(enable = "simd128")]
1231#[doc(alias("i64x2.replace_lane"))]
1232#[stable(feature = "wasm_simd", since = "1.54.0")]
1233pub fn u64x2_replace_lane<const N: usize>(a: v128, val: u64) -> v128 {
1234 i64x2_replace_lane::<N>(a, val as i64)
1235}
1236
1237#[inline]
1242#[cfg_attr(test, assert_instr(f32x4.extract_lane, N = 1))]
1243#[target_feature(enable = "simd128")]
1244#[doc(alias("f32x4.extract_lane"))]
1245#[stable(feature = "wasm_simd", since = "1.54.0")]
1246pub fn f32x4_extract_lane<const N: usize>(a: v128) -> f32 {
1247 static_assert!(N < 4);
1248 unsafe { simd_extract!(a.as_f32x4(), N as u32) }
1249}
1250
1251#[inline]
1256#[cfg_attr(test, assert_instr(f32x4.replace_lane, N = 1))]
1257#[target_feature(enable = "simd128")]
1258#[doc(alias("f32x4.replace_lane"))]
1259#[stable(feature = "wasm_simd", since = "1.54.0")]
1260pub fn f32x4_replace_lane<const N: usize>(a: v128, val: f32) -> v128 {
1261 static_assert!(N < 4);
1262 unsafe { simd_insert!(a.as_f32x4(), N as u32, val).v128() }
1263}
1264
1265#[inline]
1270#[cfg_attr(test, assert_instr(f64x2.extract_lane, N = 1))]
1271#[target_feature(enable = "simd128")]
1272#[doc(alias("f64x2.extract_lane"))]
1273#[stable(feature = "wasm_simd", since = "1.54.0")]
1274pub fn f64x2_extract_lane<const N: usize>(a: v128) -> f64 {
1275 static_assert!(N < 2);
1276 unsafe { simd_extract!(a.as_f64x2(), N as u32) }
1277}
1278
1279#[inline]
1284#[cfg_attr(test, assert_instr(f64x2.replace_lane, N = 1))]
1285#[target_feature(enable = "simd128")]
1286#[doc(alias("f64x2.replace_lane"))]
1287#[stable(feature = "wasm_simd", since = "1.54.0")]
1288pub fn f64x2_replace_lane<const N: usize>(a: v128, val: f64) -> v128 {
1289 static_assert!(N < 2);
1290 unsafe { simd_insert!(a.as_f64x2(), N as u32, val).v128() }
1291}
1292
1293#[inline]
1299#[cfg_attr(test, assert_instr(i8x16.swizzle))]
1300#[target_feature(enable = "simd128")]
1301#[doc(alias("i8x16.swizzle"))]
1302#[stable(feature = "wasm_simd", since = "1.54.0")]
1303pub fn i8x16_swizzle(a: v128, s: v128) -> v128 {
1304 unsafe { llvm_swizzle(a.as_i8x16(), s.as_i8x16()).v128() }
1305}
1306
1307#[stable(feature = "wasm_simd", since = "1.54.0")]
1308pub use i8x16_swizzle as u8x16_swizzle;
1309
1310#[inline]
1314#[cfg_attr(test, assert_instr(i8x16.splat))]
1315#[target_feature(enable = "simd128")]
1316#[doc(alias("i8x16.splat"))]
1317#[stable(feature = "wasm_simd", since = "1.54.0")]
1318pub fn i8x16_splat(a: i8) -> v128 {
1319 simd::i8x16::splat(a).v128()
1320}
1321
1322#[inline]
1326#[cfg_attr(test, assert_instr(i8x16.splat))]
1327#[target_feature(enable = "simd128")]
1328#[doc(alias("i8x16.splat"))]
1329#[stable(feature = "wasm_simd", since = "1.54.0")]
1330pub fn u8x16_splat(a: u8) -> v128 {
1331 simd::u8x16::splat(a).v128()
1332}
1333
1334#[inline]
1338#[cfg_attr(test, assert_instr(i16x8.splat))]
1339#[target_feature(enable = "simd128")]
1340#[doc(alias("i16x8.splat"))]
1341#[stable(feature = "wasm_simd", since = "1.54.0")]
1342pub fn i16x8_splat(a: i16) -> v128 {
1343 simd::i16x8::splat(a).v128()
1344}
1345
1346#[inline]
1350#[cfg_attr(test, assert_instr(i16x8.splat))]
1351#[target_feature(enable = "simd128")]
1352#[doc(alias("i16x8.splat"))]
1353#[stable(feature = "wasm_simd", since = "1.54.0")]
1354pub fn u16x8_splat(a: u16) -> v128 {
1355 simd::u16x8::splat(a).v128()
1356}
1357
1358#[inline]
1362#[cfg_attr(test, assert_instr(i32x4.splat))]
1363#[target_feature(enable = "simd128")]
1364#[doc(alias("i32x4.splat"))]
1365#[stable(feature = "wasm_simd", since = "1.54.0")]
1366pub fn i32x4_splat(a: i32) -> v128 {
1367 simd::i32x4::splat(a).v128()
1368}
1369
1370#[inline]
1374#[target_feature(enable = "simd128")]
1375#[doc(alias("i32x4.splat"))]
1376#[stable(feature = "wasm_simd", since = "1.54.0")]
1377pub fn u32x4_splat(a: u32) -> v128 {
1378 i32x4_splat(a as i32)
1379}
1380
1381#[inline]
1385#[cfg_attr(test, assert_instr(i64x2.splat))]
1386#[target_feature(enable = "simd128")]
1387#[doc(alias("i64x2.splat"))]
1388#[stable(feature = "wasm_simd", since = "1.54.0")]
1389pub fn i64x2_splat(a: i64) -> v128 {
1390 simd::i64x2::splat(a).v128()
1391}
1392
1393#[inline]
1397#[target_feature(enable = "simd128")]
1398#[doc(alias("u64x2.splat"))]
1399#[stable(feature = "wasm_simd", since = "1.54.0")]
1400pub fn u64x2_splat(a: u64) -> v128 {
1401 i64x2_splat(a as i64)
1402}
1403
1404#[inline]
1408#[cfg_attr(test, assert_instr(f32x4.splat))]
1409#[target_feature(enable = "simd128")]
1410#[doc(alias("f32x4.splat"))]
1411#[stable(feature = "wasm_simd", since = "1.54.0")]
1412pub fn f32x4_splat(a: f32) -> v128 {
1413 simd::f32x4::splat(a).v128()
1414}
1415
1416#[inline]
1420#[cfg_attr(test, assert_instr(f64x2.splat))]
1421#[target_feature(enable = "simd128")]
1422#[doc(alias("f64x2.splat"))]
1423#[stable(feature = "wasm_simd", since = "1.54.0")]
1424pub fn f64x2_splat(a: f64) -> v128 {
1425 simd::f64x2::splat(a).v128()
1426}
1427
1428#[inline]
1434#[cfg_attr(test, assert_instr(i8x16.eq))]
1435#[target_feature(enable = "simd128")]
1436#[doc(alias("i8x16.eq"))]
1437#[stable(feature = "wasm_simd", since = "1.54.0")]
1438pub fn i8x16_eq(a: v128, b: v128) -> v128 {
1439 unsafe { simd_eq::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1440}
1441
1442#[inline]
1448#[cfg_attr(test, assert_instr(i8x16.ne))]
1449#[target_feature(enable = "simd128")]
1450#[doc(alias("i8x16.ne"))]
1451#[stable(feature = "wasm_simd", since = "1.54.0")]
1452pub fn i8x16_ne(a: v128, b: v128) -> v128 {
1453 unsafe { simd_ne::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1454}
1455
1456#[stable(feature = "wasm_simd", since = "1.54.0")]
1457pub use i8x16_eq as u8x16_eq;
1458#[stable(feature = "wasm_simd", since = "1.54.0")]
1459pub use i8x16_ne as u8x16_ne;
1460
1461#[inline]
1467#[cfg_attr(test, assert_instr(i8x16.lt_s))]
1468#[target_feature(enable = "simd128")]
1469#[doc(alias("i8x16.lt_s"))]
1470#[stable(feature = "wasm_simd", since = "1.54.0")]
1471pub fn i8x16_lt(a: v128, b: v128) -> v128 {
1472 unsafe { simd_lt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1473}
1474
1475#[inline]
1481#[cfg_attr(test, assert_instr(i8x16.lt_u))]
1482#[target_feature(enable = "simd128")]
1483#[doc(alias("i8x16.lt_u"))]
1484#[stable(feature = "wasm_simd", since = "1.54.0")]
1485pub fn u8x16_lt(a: v128, b: v128) -> v128 {
1486 unsafe { simd_lt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1487}
1488
1489#[inline]
1495#[cfg_attr(test, assert_instr(i8x16.gt_s))]
1496#[target_feature(enable = "simd128")]
1497#[doc(alias("i8x16.gt_s"))]
1498#[stable(feature = "wasm_simd", since = "1.54.0")]
1499pub fn i8x16_gt(a: v128, b: v128) -> v128 {
1500 unsafe { simd_gt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1501}
1502
1503#[inline]
1509#[cfg_attr(test, assert_instr(i8x16.gt_u))]
1510#[target_feature(enable = "simd128")]
1511#[doc(alias("i8x16.gt_u"))]
1512#[stable(feature = "wasm_simd", since = "1.54.0")]
1513pub fn u8x16_gt(a: v128, b: v128) -> v128 {
1514 unsafe { simd_gt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1515}
1516
1517#[inline]
1523#[cfg_attr(test, assert_instr(i8x16.le_s))]
1524#[target_feature(enable = "simd128")]
1525#[doc(alias("i8x16.le_s"))]
1526#[stable(feature = "wasm_simd", since = "1.54.0")]
1527pub fn i8x16_le(a: v128, b: v128) -> v128 {
1528 unsafe { simd_le::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1529}
1530
1531#[inline]
1537#[cfg_attr(test, assert_instr(i8x16.le_u))]
1538#[target_feature(enable = "simd128")]
1539#[doc(alias("i8x16.le_u"))]
1540#[stable(feature = "wasm_simd", since = "1.54.0")]
1541pub fn u8x16_le(a: v128, b: v128) -> v128 {
1542 unsafe { simd_le::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1543}
1544
1545#[inline]
1551#[cfg_attr(test, assert_instr(i8x16.ge_s))]
1552#[target_feature(enable = "simd128")]
1553#[doc(alias("i8x16.ge_s"))]
1554#[stable(feature = "wasm_simd", since = "1.54.0")]
1555pub fn i8x16_ge(a: v128, b: v128) -> v128 {
1556 unsafe { simd_ge::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1557}
1558
1559#[inline]
1565#[cfg_attr(test, assert_instr(i8x16.ge_u))]
1566#[target_feature(enable = "simd128")]
1567#[doc(alias("i8x16.ge_u"))]
1568#[stable(feature = "wasm_simd", since = "1.54.0")]
1569pub fn u8x16_ge(a: v128, b: v128) -> v128 {
1570 unsafe { simd_ge::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1571}
1572
1573#[inline]
1579#[cfg_attr(test, assert_instr(i16x8.eq))]
1580#[target_feature(enable = "simd128")]
1581#[doc(alias("i16x8.eq"))]
1582#[stable(feature = "wasm_simd", since = "1.54.0")]
1583pub fn i16x8_eq(a: v128, b: v128) -> v128 {
1584 unsafe { simd_eq::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1585}
1586
1587#[inline]
1593#[cfg_attr(test, assert_instr(i16x8.ne))]
1594#[target_feature(enable = "simd128")]
1595#[doc(alias("i16x8.ne"))]
1596#[stable(feature = "wasm_simd", since = "1.54.0")]
1597pub fn i16x8_ne(a: v128, b: v128) -> v128 {
1598 unsafe { simd_ne::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1599}
1600
1601#[stable(feature = "wasm_simd", since = "1.54.0")]
1602pub use i16x8_eq as u16x8_eq;
1603#[stable(feature = "wasm_simd", since = "1.54.0")]
1604pub use i16x8_ne as u16x8_ne;
1605
1606#[inline]
1612#[cfg_attr(test, assert_instr(i16x8.lt_s))]
1613#[target_feature(enable = "simd128")]
1614#[doc(alias("i16x8.lt_s"))]
1615#[stable(feature = "wasm_simd", since = "1.54.0")]
1616pub fn i16x8_lt(a: v128, b: v128) -> v128 {
1617 unsafe { simd_lt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1618}
1619
1620#[inline]
1626#[cfg_attr(test, assert_instr(i16x8.lt_u))]
1627#[target_feature(enable = "simd128")]
1628#[doc(alias("i16x8.lt_u"))]
1629#[stable(feature = "wasm_simd", since = "1.54.0")]
1630pub fn u16x8_lt(a: v128, b: v128) -> v128 {
1631 unsafe { simd_lt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1632}
1633
1634#[inline]
1640#[cfg_attr(test, assert_instr(i16x8.gt_s))]
1641#[target_feature(enable = "simd128")]
1642#[doc(alias("i16x8.gt_s"))]
1643#[stable(feature = "wasm_simd", since = "1.54.0")]
1644pub fn i16x8_gt(a: v128, b: v128) -> v128 {
1645 unsafe { simd_gt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1646}
1647
1648#[inline]
1654#[cfg_attr(test, assert_instr(i16x8.gt_u))]
1655#[target_feature(enable = "simd128")]
1656#[doc(alias("i16x8.gt_u"))]
1657#[stable(feature = "wasm_simd", since = "1.54.0")]
1658pub fn u16x8_gt(a: v128, b: v128) -> v128 {
1659 unsafe { simd_gt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1660}
1661
1662#[inline]
1668#[cfg_attr(test, assert_instr(i16x8.le_s))]
1669#[target_feature(enable = "simd128")]
1670#[doc(alias("i16x8.le_s"))]
1671#[stable(feature = "wasm_simd", since = "1.54.0")]
1672pub fn i16x8_le(a: v128, b: v128) -> v128 {
1673 unsafe { simd_le::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1674}
1675
1676#[inline]
1682#[cfg_attr(test, assert_instr(i16x8.le_u))]
1683#[target_feature(enable = "simd128")]
1684#[doc(alias("i16x8.le_u"))]
1685#[stable(feature = "wasm_simd", since = "1.54.0")]
1686pub fn u16x8_le(a: v128, b: v128) -> v128 {
1687 unsafe { simd_le::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1688}
1689
1690#[inline]
1696#[cfg_attr(test, assert_instr(i16x8.ge_s))]
1697#[target_feature(enable = "simd128")]
1698#[doc(alias("i16x8.ge_s"))]
1699#[stable(feature = "wasm_simd", since = "1.54.0")]
1700pub fn i16x8_ge(a: v128, b: v128) -> v128 {
1701 unsafe { simd_ge::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1702}
1703
1704#[inline]
1710#[cfg_attr(test, assert_instr(i16x8.ge_u))]
1711#[target_feature(enable = "simd128")]
1712#[doc(alias("i16x8.ge_u"))]
1713#[stable(feature = "wasm_simd", since = "1.54.0")]
1714pub fn u16x8_ge(a: v128, b: v128) -> v128 {
1715 unsafe { simd_ge::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1716}
1717
1718#[inline]
1724#[cfg_attr(test, assert_instr(i32x4.eq))]
1725#[target_feature(enable = "simd128")]
1726#[doc(alias("i32x4.eq"))]
1727#[stable(feature = "wasm_simd", since = "1.54.0")]
1728pub fn i32x4_eq(a: v128, b: v128) -> v128 {
1729 unsafe { simd_eq::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1730}
1731
1732#[inline]
1738#[cfg_attr(test, assert_instr(i32x4.ne))]
1739#[target_feature(enable = "simd128")]
1740#[doc(alias("i32x4.ne"))]
1741#[stable(feature = "wasm_simd", since = "1.54.0")]
1742pub fn i32x4_ne(a: v128, b: v128) -> v128 {
1743 unsafe { simd_ne::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1744}
1745
1746#[stable(feature = "wasm_simd", since = "1.54.0")]
1747pub use i32x4_eq as u32x4_eq;
1748#[stable(feature = "wasm_simd", since = "1.54.0")]
1749pub use i32x4_ne as u32x4_ne;
1750
1751#[inline]
1757#[cfg_attr(test, assert_instr(i32x4.lt_s))]
1758#[target_feature(enable = "simd128")]
1759#[doc(alias("i32x4.lt_s"))]
1760#[stable(feature = "wasm_simd", since = "1.54.0")]
1761pub fn i32x4_lt(a: v128, b: v128) -> v128 {
1762 unsafe { simd_lt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1763}
1764
1765#[inline]
1771#[cfg_attr(test, assert_instr(i32x4.lt_u))]
1772#[target_feature(enable = "simd128")]
1773#[doc(alias("i32x4.lt_u"))]
1774#[stable(feature = "wasm_simd", since = "1.54.0")]
1775pub fn u32x4_lt(a: v128, b: v128) -> v128 {
1776 unsafe { simd_lt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1777}
1778
1779#[inline]
1785#[cfg_attr(test, assert_instr(i32x4.gt_s))]
1786#[target_feature(enable = "simd128")]
1787#[doc(alias("i32x4.gt_s"))]
1788#[stable(feature = "wasm_simd", since = "1.54.0")]
1789pub fn i32x4_gt(a: v128, b: v128) -> v128 {
1790 unsafe { simd_gt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1791}
1792
1793#[inline]
1799#[cfg_attr(test, assert_instr(i32x4.gt_u))]
1800#[target_feature(enable = "simd128")]
1801#[doc(alias("i32x4.gt_u"))]
1802#[stable(feature = "wasm_simd", since = "1.54.0")]
1803pub fn u32x4_gt(a: v128, b: v128) -> v128 {
1804 unsafe { simd_gt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1805}
1806
1807#[inline]
1813#[cfg_attr(test, assert_instr(i32x4.le_s))]
1814#[target_feature(enable = "simd128")]
1815#[doc(alias("i32x4.le_s"))]
1816#[stable(feature = "wasm_simd", since = "1.54.0")]
1817pub fn i32x4_le(a: v128, b: v128) -> v128 {
1818 unsafe { simd_le::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1819}
1820
1821#[inline]
1827#[cfg_attr(test, assert_instr(i32x4.le_u))]
1828#[target_feature(enable = "simd128")]
1829#[doc(alias("i32x4.le_u"))]
1830#[stable(feature = "wasm_simd", since = "1.54.0")]
1831pub fn u32x4_le(a: v128, b: v128) -> v128 {
1832 unsafe { simd_le::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1833}
1834
1835#[inline]
1841#[cfg_attr(test, assert_instr(i32x4.ge_s))]
1842#[target_feature(enable = "simd128")]
1843#[doc(alias("i32x4.ge_s"))]
1844#[stable(feature = "wasm_simd", since = "1.54.0")]
1845pub fn i32x4_ge(a: v128, b: v128) -> v128 {
1846 unsafe { simd_ge::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1847}
1848
1849#[inline]
1855#[cfg_attr(test, assert_instr(i32x4.ge_u))]
1856#[target_feature(enable = "simd128")]
1857#[doc(alias("i32x4.ge_u"))]
1858#[stable(feature = "wasm_simd", since = "1.54.0")]
1859pub fn u32x4_ge(a: v128, b: v128) -> v128 {
1860 unsafe { simd_ge::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1861}
1862
1863#[inline]
1869#[cfg_attr(test, assert_instr(i64x2.eq))]
1870#[target_feature(enable = "simd128")]
1871#[doc(alias("i64x2.eq"))]
1872#[stable(feature = "wasm_simd", since = "1.54.0")]
1873pub fn i64x2_eq(a: v128, b: v128) -> v128 {
1874 unsafe { simd_eq::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1875}
1876
1877#[inline]
1883#[cfg_attr(test, assert_instr(i64x2.ne))]
1884#[target_feature(enable = "simd128")]
1885#[doc(alias("i64x2.ne"))]
1886#[stable(feature = "wasm_simd", since = "1.54.0")]
1887pub fn i64x2_ne(a: v128, b: v128) -> v128 {
1888 unsafe { simd_ne::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1889}
1890
1891#[stable(feature = "wasm_simd", since = "1.54.0")]
1892pub use i64x2_eq as u64x2_eq;
1893#[stable(feature = "wasm_simd", since = "1.54.0")]
1894pub use i64x2_ne as u64x2_ne;
1895
1896#[inline]
1902#[cfg_attr(test, assert_instr(i64x2.lt_s))]
1903#[target_feature(enable = "simd128")]
1904#[doc(alias("i64x2.lt_s"))]
1905#[stable(feature = "wasm_simd", since = "1.54.0")]
1906pub fn i64x2_lt(a: v128, b: v128) -> v128 {
1907 unsafe { simd_lt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1908}
1909
1910#[inline]
1916#[cfg_attr(test, assert_instr(i64x2.gt_s))]
1917#[target_feature(enable = "simd128")]
1918#[doc(alias("i64x2.gt_s"))]
1919#[stable(feature = "wasm_simd", since = "1.54.0")]
1920pub fn i64x2_gt(a: v128, b: v128) -> v128 {
1921 unsafe { simd_gt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1922}
1923
1924#[inline]
1930#[cfg_attr(test, assert_instr(i64x2.le_s))]
1931#[target_feature(enable = "simd128")]
1932#[doc(alias("i64x2.le_s"))]
1933#[stable(feature = "wasm_simd", since = "1.54.0")]
1934pub fn i64x2_le(a: v128, b: v128) -> v128 {
1935 unsafe { simd_le::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1936}
1937
1938#[inline]
1944#[cfg_attr(test, assert_instr(i64x2.ge_s))]
1945#[target_feature(enable = "simd128")]
1946#[doc(alias("i64x2.ge_s"))]
1947#[stable(feature = "wasm_simd", since = "1.54.0")]
1948pub fn i64x2_ge(a: v128, b: v128) -> v128 {
1949 unsafe { simd_ge::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1950}
1951
1952#[inline]
1958#[cfg_attr(test, assert_instr(f32x4.eq))]
1959#[target_feature(enable = "simd128")]
1960#[doc(alias("f32x4.eq"))]
1961#[stable(feature = "wasm_simd", since = "1.54.0")]
1962pub fn f32x4_eq(a: v128, b: v128) -> v128 {
1963 unsafe { simd_eq::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
1964}
1965
1966#[inline]
1972#[cfg_attr(test, assert_instr(f32x4.ne))]
1973#[target_feature(enable = "simd128")]
1974#[doc(alias("f32x4.ne"))]
1975#[stable(feature = "wasm_simd", since = "1.54.0")]
1976pub fn f32x4_ne(a: v128, b: v128) -> v128 {
1977 unsafe { simd_ne::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
1978}
1979
1980#[inline]
1986#[cfg_attr(test, assert_instr(f32x4.lt))]
1987#[target_feature(enable = "simd128")]
1988#[doc(alias("f32x4.lt"))]
1989#[stable(feature = "wasm_simd", since = "1.54.0")]
1990pub fn f32x4_lt(a: v128, b: v128) -> v128 {
1991 unsafe { simd_lt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
1992}
1993
1994#[inline]
2000#[cfg_attr(test, assert_instr(f32x4.gt))]
2001#[target_feature(enable = "simd128")]
2002#[doc(alias("f32x4.gt"))]
2003#[stable(feature = "wasm_simd", since = "1.54.0")]
2004pub fn f32x4_gt(a: v128, b: v128) -> v128 {
2005 unsafe { simd_gt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2006}
2007
2008#[inline]
2014#[cfg_attr(test, assert_instr(f32x4.le))]
2015#[target_feature(enable = "simd128")]
2016#[doc(alias("f32x4.le"))]
2017#[stable(feature = "wasm_simd", since = "1.54.0")]
2018pub fn f32x4_le(a: v128, b: v128) -> v128 {
2019 unsafe { simd_le::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2020}
2021
2022#[inline]
2028#[cfg_attr(test, assert_instr(f32x4.ge))]
2029#[target_feature(enable = "simd128")]
2030#[doc(alias("f32x4.ge"))]
2031#[stable(feature = "wasm_simd", since = "1.54.0")]
2032pub fn f32x4_ge(a: v128, b: v128) -> v128 {
2033 unsafe { simd_ge::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2034}
2035
2036#[inline]
2042#[cfg_attr(test, assert_instr(f64x2.eq))]
2043#[target_feature(enable = "simd128")]
2044#[doc(alias("f64x2.eq"))]
2045#[stable(feature = "wasm_simd", since = "1.54.0")]
2046pub fn f64x2_eq(a: v128, b: v128) -> v128 {
2047 unsafe { simd_eq::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2048}
2049
2050#[inline]
2056#[cfg_attr(test, assert_instr(f64x2.ne))]
2057#[target_feature(enable = "simd128")]
2058#[doc(alias("f64x2.ne"))]
2059#[stable(feature = "wasm_simd", since = "1.54.0")]
2060pub fn f64x2_ne(a: v128, b: v128) -> v128 {
2061 unsafe { simd_ne::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2062}
2063
2064#[inline]
2070#[cfg_attr(test, assert_instr(f64x2.lt))]
2071#[target_feature(enable = "simd128")]
2072#[doc(alias("f64x2.lt"))]
2073#[stable(feature = "wasm_simd", since = "1.54.0")]
2074pub fn f64x2_lt(a: v128, b: v128) -> v128 {
2075 unsafe { simd_lt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2076}
2077
2078#[inline]
2084#[cfg_attr(test, assert_instr(f64x2.gt))]
2085#[target_feature(enable = "simd128")]
2086#[doc(alias("f64x2.gt"))]
2087#[stable(feature = "wasm_simd", since = "1.54.0")]
2088pub fn f64x2_gt(a: v128, b: v128) -> v128 {
2089 unsafe { simd_gt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2090}
2091
2092#[inline]
2098#[cfg_attr(test, assert_instr(f64x2.le))]
2099#[target_feature(enable = "simd128")]
2100#[doc(alias("f64x2.le"))]
2101#[stable(feature = "wasm_simd", since = "1.54.0")]
2102pub fn f64x2_le(a: v128, b: v128) -> v128 {
2103 unsafe { simd_le::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2104}
2105
2106#[inline]
2112#[cfg_attr(test, assert_instr(f64x2.ge))]
2113#[target_feature(enable = "simd128")]
2114#[doc(alias("f64x2.ge"))]
2115#[stable(feature = "wasm_simd", since = "1.54.0")]
2116pub fn f64x2_ge(a: v128, b: v128) -> v128 {
2117 unsafe { simd_ge::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2118}
2119
2120#[inline]
2122#[cfg_attr(test, assert_instr(v128.not))]
2123#[target_feature(enable = "simd128")]
2124#[doc(alias("v128.not"))]
2125#[stable(feature = "wasm_simd", since = "1.54.0")]
2126pub fn v128_not(a: v128) -> v128 {
2127 unsafe { simd_xor(a.as_i64x2(), simd::i64x2::new(!0, !0)).v128() }
2128}
2129
2130#[inline]
2133#[cfg_attr(test, assert_instr(v128.and))]
2134#[target_feature(enable = "simd128")]
2135#[doc(alias("v128.and"))]
2136#[stable(feature = "wasm_simd", since = "1.54.0")]
2137pub fn v128_and(a: v128, b: v128) -> v128 {
2138 unsafe { simd_and(a.as_i64x2(), b.as_i64x2()).v128() }
2139}
2140
2141#[inline]
2145#[cfg_attr(test, assert_instr(v128.andnot))]
2146#[target_feature(enable = "simd128")]
2147#[doc(alias("v128.andnot"))]
2148#[stable(feature = "wasm_simd", since = "1.54.0")]
2149pub fn v128_andnot(a: v128, b: v128) -> v128 {
2150 unsafe {
2151 simd_and(
2152 a.as_i64x2(),
2153 simd_xor(b.as_i64x2(), simd::i64x2::new(-1, -1)),
2154 )
2155 .v128()
2156 }
2157}
2158
2159#[inline]
2162#[cfg_attr(test, assert_instr(v128.or))]
2163#[target_feature(enable = "simd128")]
2164#[doc(alias("v128.or"))]
2165#[stable(feature = "wasm_simd", since = "1.54.0")]
2166pub fn v128_or(a: v128, b: v128) -> v128 {
2167 unsafe { simd_or(a.as_i64x2(), b.as_i64x2()).v128() }
2168}
2169
2170#[inline]
2173#[cfg_attr(test, assert_instr(v128.xor))]
2174#[target_feature(enable = "simd128")]
2175#[doc(alias("v128.xor"))]
2176#[stable(feature = "wasm_simd", since = "1.54.0")]
2177pub fn v128_xor(a: v128, b: v128) -> v128 {
2178 unsafe { simd_xor(a.as_i64x2(), b.as_i64x2()).v128() }
2179}
2180
2181#[inline]
2183#[cfg_attr(test, assert_instr(v128.bitselect))]
2184#[target_feature(enable = "simd128")]
2185#[doc(alias("v128.bitselect"))]
2186#[stable(feature = "wasm_simd", since = "1.54.0")]
2187pub fn v128_bitselect(v1: v128, v2: v128, c: v128) -> v128 {
2188 unsafe { llvm_bitselect(v1.as_i8x16(), v2.as_i8x16(), c.as_i8x16()).v128() }
2189}
2190
2191#[inline]
2193#[cfg_attr(test, assert_instr(v128.any_true))]
2194#[target_feature(enable = "simd128")]
2195#[doc(alias("v128.any_true"))]
2196#[stable(feature = "wasm_simd", since = "1.54.0")]
2197pub fn v128_any_true(a: v128) -> bool {
2198 unsafe { llvm_any_true_i8x16(a.as_i8x16()) != 0 }
2199}
2200
2201#[inline]
2203#[cfg_attr(test, assert_instr(i8x16.abs))]
2204#[target_feature(enable = "simd128")]
2205#[doc(alias("i8x16.abs"))]
2206#[stable(feature = "wasm_simd", since = "1.54.0")]
2207pub fn i8x16_abs(a: v128) -> v128 {
2208 unsafe {
2209 let a = a.as_i8x16();
2210 let zero = simd::i8x16::ZERO;
2211 simd_select::<simd::m8x16, simd::i8x16>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
2212 }
2213}
2214
2215#[inline]
2217#[cfg_attr(test, assert_instr(i8x16.neg))]
2218#[target_feature(enable = "simd128")]
2219#[doc(alias("i8x16.neg"))]
2220#[stable(feature = "wasm_simd", since = "1.54.0")]
2221pub fn i8x16_neg(a: v128) -> v128 {
2222 unsafe { simd_mul(a.as_i8x16(), simd::i8x16::splat(-1)).v128() }
2223}
2224
2225#[inline]
2227#[cfg_attr(test, assert_instr(i8x16.popcnt))]
2228#[target_feature(enable = "simd128")]
2229#[doc(alias("i8x16.popcnt"))]
2230#[stable(feature = "wasm_simd", since = "1.54.0")]
2231pub fn i8x16_popcnt(v: v128) -> v128 {
2232 unsafe { simd_ctpop(v.as_i8x16()).v128() }
2233}
2234
2235#[stable(feature = "wasm_simd", since = "1.54.0")]
2236pub use i8x16_popcnt as u8x16_popcnt;
2237
2238#[inline]
2240#[cfg_attr(test, assert_instr(i8x16.all_true))]
2241#[target_feature(enable = "simd128")]
2242#[doc(alias("i8x16.all_true"))]
2243#[stable(feature = "wasm_simd", since = "1.54.0")]
2244pub fn i8x16_all_true(a: v128) -> bool {
2245 unsafe { llvm_i8x16_all_true(a.as_i8x16()) != 0 }
2246}
2247
2248#[stable(feature = "wasm_simd", since = "1.54.0")]
2249pub use i8x16_all_true as u8x16_all_true;
2250
2251#[inline]
2254#[cfg_attr(test, assert_instr(i8x16.bitmask))]
2255#[target_feature(enable = "simd128")]
2256#[doc(alias("i8x16.bitmask"))]
2257#[stable(feature = "wasm_simd", since = "1.54.0")]
2258pub fn i8x16_bitmask(a: v128) -> u16 {
2259 unsafe { llvm_bitmask_i8x16(a.as_i8x16()) as u16 }
2260}
2261
2262#[stable(feature = "wasm_simd", since = "1.54.0")]
2263pub use i8x16_bitmask as u8x16_bitmask;
2264
2265#[inline]
2271#[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_s))]
2272#[target_feature(enable = "simd128")]
2273#[doc(alias("i8x16.narrow_i16x8_s"))]
2274#[stable(feature = "wasm_simd", since = "1.54.0")]
2275pub fn i8x16_narrow_i16x8(a: v128, b: v128) -> v128 {
2276 unsafe {
2277 let v: simd::i16x16 = simd_shuffle!(
2278 a.as_i16x8(),
2279 b.as_i16x8(),
2280 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
2281 );
2282
2283 let max = simd_splat(i16::from(i8::MAX));
2284 let min = simd_splat(i16::from(i8::MIN));
2285
2286 let v = simd_select(simd_gt::<_, simd::i16x16>(v, max), max, v);
2287 let v = simd_select(simd_lt::<_, simd::i16x16>(v, min), min, v);
2288
2289 let v: simd::i8x16 = simd_cast(v);
2290
2291 v.v128()
2292 }
2293}
2294
2295#[inline]
2301#[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_u))]
2302#[target_feature(enable = "simd128")]
2303#[doc(alias("i8x16.narrow_i16x8_u"))]
2304#[stable(feature = "wasm_simd", since = "1.54.0")]
2305pub fn u8x16_narrow_i16x8(a: v128, b: v128) -> v128 {
2306 unsafe {
2307 let v: simd::i16x16 = simd_shuffle!(
2308 a.as_i16x8(),
2309 b.as_i16x8(),
2310 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
2311 );
2312
2313 let max = simd_splat(i16::from(u8::MAX));
2314 let min = simd_splat(i16::from(u8::MIN));
2315
2316 let v = simd_select(simd_gt::<_, simd::i16x16>(v, max), max, v);
2317 let v = simd_select(simd_lt::<_, simd::i16x16>(v, min), min, v);
2318
2319 let v: simd::u8x16 = simd_cast(v);
2320
2321 v.v128()
2322 }
2323}
2324
2325#[inline]
2330#[cfg_attr(test, assert_instr(i8x16.shl))]
2331#[target_feature(enable = "simd128")]
2332#[doc(alias("i8x16.shl"))]
2333#[stable(feature = "wasm_simd", since = "1.54.0")]
2334pub fn i8x16_shl(a: v128, amt: u32) -> v128 {
2335 unsafe { simd_shl(a.as_i8x16(), simd::i8x16::splat((amt & 0x7) as i8)).v128() }
2354}
2355
2356#[stable(feature = "wasm_simd", since = "1.54.0")]
2357pub use i8x16_shl as u8x16_shl;
2358
2359#[inline]
2365#[cfg_attr(test, assert_instr(i8x16.shr_s))]
2366#[target_feature(enable = "simd128")]
2367#[doc(alias("i8x16.shr_s"))]
2368#[stable(feature = "wasm_simd", since = "1.54.0")]
2369pub fn i8x16_shr(a: v128, amt: u32) -> v128 {
2370 unsafe { simd_shr(a.as_i8x16(), simd::i8x16::splat((amt & 0x7) as i8)).v128() }
2373}
2374
2375#[inline]
2381#[cfg_attr(test, assert_instr(i8x16.shr_u))]
2382#[target_feature(enable = "simd128")]
2383#[doc(alias("i8x16.shr_u"))]
2384#[stable(feature = "wasm_simd", since = "1.54.0")]
2385pub fn u8x16_shr(a: v128, amt: u32) -> v128 {
2386 unsafe { simd_shr(a.as_u8x16(), simd::u8x16::splat((amt & 0x7) as u8)).v128() }
2389}
2390
2391#[inline]
2393#[cfg_attr(test, assert_instr(i8x16.add))]
2394#[target_feature(enable = "simd128")]
2395#[doc(alias("i8x16.add"))]
2396#[stable(feature = "wasm_simd", since = "1.54.0")]
2397pub fn i8x16_add(a: v128, b: v128) -> v128 {
2398 unsafe { simd_add(a.as_i8x16(), b.as_i8x16()).v128() }
2399}
2400
2401#[stable(feature = "wasm_simd", since = "1.54.0")]
2402pub use i8x16_add as u8x16_add;
2403
2404#[inline]
2407#[cfg_attr(test, assert_instr(i8x16.add_sat_s))]
2408#[target_feature(enable = "simd128")]
2409#[doc(alias("i8x16.add_sat_s"))]
2410#[stable(feature = "wasm_simd", since = "1.54.0")]
2411pub fn i8x16_add_sat(a: v128, b: v128) -> v128 {
2412 unsafe { simd_saturating_add(a.as_i8x16(), b.as_i8x16()).v128() }
2413}
2414
2415#[inline]
2418#[cfg_attr(test, assert_instr(i8x16.add_sat_u))]
2419#[target_feature(enable = "simd128")]
2420#[doc(alias("i8x16.add_sat_u"))]
2421#[stable(feature = "wasm_simd", since = "1.54.0")]
2422pub fn u8x16_add_sat(a: v128, b: v128) -> v128 {
2423 unsafe { simd_saturating_add(a.as_u8x16(), b.as_u8x16()).v128() }
2424}
2425
2426#[inline]
2428#[cfg_attr(test, assert_instr(i8x16.sub))]
2429#[target_feature(enable = "simd128")]
2430#[doc(alias("i8x16.sub"))]
2431#[stable(feature = "wasm_simd", since = "1.54.0")]
2432pub fn i8x16_sub(a: v128, b: v128) -> v128 {
2433 unsafe { simd_sub(a.as_i8x16(), b.as_i8x16()).v128() }
2434}
2435
2436#[stable(feature = "wasm_simd", since = "1.54.0")]
2437pub use i8x16_sub as u8x16_sub;
2438
2439#[inline]
2442#[cfg_attr(test, assert_instr(i8x16.sub_sat_s))]
2443#[target_feature(enable = "simd128")]
2444#[doc(alias("i8x16.sub_sat_s"))]
2445#[stable(feature = "wasm_simd", since = "1.54.0")]
2446pub fn i8x16_sub_sat(a: v128, b: v128) -> v128 {
2447 unsafe { simd_saturating_sub(a.as_i8x16(), b.as_i8x16()).v128() }
2448}
2449
2450#[inline]
2453#[cfg_attr(test, assert_instr(i8x16.sub_sat_u))]
2454#[target_feature(enable = "simd128")]
2455#[doc(alias("i8x16.sub_sat_u"))]
2456#[stable(feature = "wasm_simd", since = "1.54.0")]
2457pub fn u8x16_sub_sat(a: v128, b: v128) -> v128 {
2458 unsafe { simd_saturating_sub(a.as_u8x16(), b.as_u8x16()).v128() }
2459}
2460
2461#[inline]
2464#[cfg_attr(test, assert_instr(i8x16.min_s))]
2465#[target_feature(enable = "simd128")]
2466#[doc(alias("i8x16.min_s"))]
2467#[stable(feature = "wasm_simd", since = "1.54.0")]
2468pub fn i8x16_min(a: v128, b: v128) -> v128 {
2469 let a = a.as_i8x16();
2470 let b = b.as_i8x16();
2471 unsafe { simd_select::<simd::i8x16, _>(simd_lt(a, b), a, b).v128() }
2472}
2473
2474#[inline]
2477#[cfg_attr(test, assert_instr(i8x16.min_u))]
2478#[target_feature(enable = "simd128")]
2479#[doc(alias("i8x16.min_u"))]
2480#[stable(feature = "wasm_simd", since = "1.54.0")]
2481pub fn u8x16_min(a: v128, b: v128) -> v128 {
2482 let a = a.as_u8x16();
2483 let b = b.as_u8x16();
2484 unsafe { simd_select::<simd::i8x16, _>(simd_lt(a, b), a, b).v128() }
2485}
2486
2487#[inline]
2490#[cfg_attr(test, assert_instr(i8x16.max_s))]
2491#[target_feature(enable = "simd128")]
2492#[doc(alias("i8x16.max_s"))]
2493#[stable(feature = "wasm_simd", since = "1.54.0")]
2494pub fn i8x16_max(a: v128, b: v128) -> v128 {
2495 let a = a.as_i8x16();
2496 let b = b.as_i8x16();
2497 unsafe { simd_select::<simd::i8x16, _>(simd_gt(a, b), a, b).v128() }
2498}
2499
2500#[inline]
2503#[cfg_attr(test, assert_instr(i8x16.max_u))]
2504#[target_feature(enable = "simd128")]
2505#[doc(alias("i8x16.max_u"))]
2506#[stable(feature = "wasm_simd", since = "1.54.0")]
2507pub fn u8x16_max(a: v128, b: v128) -> v128 {
2508 let a = a.as_u8x16();
2509 let b = b.as_u8x16();
2510 unsafe { simd_select::<simd::i8x16, _>(simd_gt(a, b), a, b).v128() }
2511}
2512
2513#[inline]
2515#[cfg_attr(test, assert_instr(i8x16.avgr_u))]
2516#[target_feature(enable = "simd128")]
2517#[doc(alias("i8x16.avgr_u"))]
2518#[stable(feature = "wasm_simd", since = "1.54.0")]
2519pub fn u8x16_avgr(a: v128, b: v128) -> v128 {
2520 unsafe { llvm_avgr_u_i8x16(a.as_i8x16(), b.as_i8x16()).v128() }
2521}
2522
2523#[inline]
2526#[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_s))]
2527#[target_feature(enable = "simd128")]
2528#[doc(alias("i16x8.extadd_pairwise_i8x16_s"))]
2529#[stable(feature = "wasm_simd", since = "1.54.0")]
2530pub fn i16x8_extadd_pairwise_i8x16(a: v128) -> v128 {
2531 unsafe { llvm_i16x8_extadd_pairwise_i8x16_s(a.as_i8x16()).v128() }
2532}
2533
2534#[inline]
2537#[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_u))]
2538#[target_feature(enable = "simd128")]
2539#[doc(alias("i16x8.extadd_pairwise_i8x16_u"))]
2540#[stable(feature = "wasm_simd", since = "1.54.0")]
2541pub fn i16x8_extadd_pairwise_u8x16(a: v128) -> v128 {
2542 unsafe { llvm_i16x8_extadd_pairwise_i8x16_u(a.as_i8x16()).v128() }
2543}
2544
2545#[stable(feature = "wasm_simd", since = "1.54.0")]
2546pub use i16x8_extadd_pairwise_u8x16 as u16x8_extadd_pairwise_u8x16;
2547
2548#[inline]
2550#[cfg_attr(test, assert_instr(i16x8.abs))]
2551#[target_feature(enable = "simd128")]
2552#[doc(alias("i16x8.abs"))]
2553#[stable(feature = "wasm_simd", since = "1.54.0")]
2554pub fn i16x8_abs(a: v128) -> v128 {
2555 let a = a.as_i16x8();
2556 let zero = simd::i16x8::ZERO;
2557 unsafe {
2558 simd_select::<simd::m16x8, simd::i16x8>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
2559 }
2560}
2561
2562#[inline]
2564#[cfg_attr(test, assert_instr(i16x8.neg))]
2565#[target_feature(enable = "simd128")]
2566#[doc(alias("i16x8.neg"))]
2567#[stable(feature = "wasm_simd", since = "1.54.0")]
2568pub fn i16x8_neg(a: v128) -> v128 {
2569 unsafe { simd_mul(a.as_i16x8(), simd::i16x8::splat(-1)).v128() }
2570}
2571
2572#[inline]
2574#[cfg_attr(test, assert_instr(i16x8.q15mulr_sat_s))]
2575#[target_feature(enable = "simd128")]
2576#[doc(alias("i16x8.q15mulr_sat_s"))]
2577#[stable(feature = "wasm_simd", since = "1.54.0")]
2578pub fn i16x8_q15mulr_sat(a: v128, b: v128) -> v128 {
2579 unsafe { llvm_q15mulr(a.as_i16x8(), b.as_i16x8()).v128() }
2580}
2581
2582#[inline]
2584#[cfg_attr(test, assert_instr(i16x8.all_true))]
2585#[target_feature(enable = "simd128")]
2586#[doc(alias("i16x8.all_true"))]
2587#[stable(feature = "wasm_simd", since = "1.54.0")]
2588pub fn i16x8_all_true(a: v128) -> bool {
2589 unsafe { llvm_i16x8_all_true(a.as_i16x8()) != 0 }
2590}
2591
2592#[stable(feature = "wasm_simd", since = "1.54.0")]
2593pub use i16x8_all_true as u16x8_all_true;
2594
2595#[inline]
2598#[cfg_attr(test, assert_instr(i16x8.bitmask))]
2599#[target_feature(enable = "simd128")]
2600#[doc(alias("i16x8.bitmask"))]
2601#[stable(feature = "wasm_simd", since = "1.54.0")]
2602pub fn i16x8_bitmask(a: v128) -> u8 {
2603 unsafe { llvm_bitmask_i16x8(a.as_i16x8()) as u8 }
2604}
2605
2606#[stable(feature = "wasm_simd", since = "1.54.0")]
2607pub use i16x8_bitmask as u16x8_bitmask;
2608
2609#[inline]
2615#[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_s))]
2616#[target_feature(enable = "simd128")]
2617#[doc(alias("i16x8.narrow_i32x4_s"))]
2618#[stable(feature = "wasm_simd", since = "1.54.0")]
2619pub fn i16x8_narrow_i32x4(a: v128, b: v128) -> v128 {
2620 unsafe {
2621 let v: simd::i32x8 = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]);
2622
2623 let max = simd_splat(i32::from(i16::MAX));
2624 let min = simd_splat(i32::from(i16::MIN));
2625
2626 let v = simd_select(simd_gt::<_, simd::i32x8>(v, max), max, v);
2627 let v = simd_select(simd_lt::<_, simd::i32x8>(v, min), min, v);
2628
2629 let v: simd::i16x8 = simd_cast(v);
2630
2631 v.v128()
2632 }
2633}
2634
2635#[inline]
2641#[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_u))]
2642#[target_feature(enable = "simd128")]
2643#[doc(alias("i16x8.narrow_i32x4_u"))]
2644#[stable(feature = "wasm_simd", since = "1.54.0")]
2645pub fn u16x8_narrow_i32x4(a: v128, b: v128) -> v128 {
2646 unsafe {
2647 let v: simd::i32x8 = simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]);
2648
2649 let max = simd_splat(i32::from(u16::MAX));
2650 let min = simd_splat(i32::from(u16::MIN));
2651
2652 let v = simd_select(simd_gt::<_, simd::i32x8>(v, max), max, v);
2653 let v = simd_select(simd_lt::<_, simd::i32x8>(v, min), min, v);
2654
2655 let v: simd::u16x8 = simd_cast(v);
2656
2657 v.v128()
2658 }
2659}
2660
2661#[inline]
2664#[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_s))]
2665#[target_feature(enable = "simd128")]
2666#[doc(alias("i16x8.extend_low_i8x16_s"))]
2667#[stable(feature = "wasm_simd", since = "1.54.0")]
2668pub fn i16x8_extend_low_i8x16(a: v128) -> v128 {
2669 unsafe {
2670 simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2671 a.as_i8x16(),
2672 a.as_i8x16(),
2673 [0, 1, 2, 3, 4, 5, 6, 7],
2674 ))
2675 .v128()
2676 }
2677}
2678
2679#[inline]
2682#[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_s))]
2683#[target_feature(enable = "simd128")]
2684#[doc(alias("i16x8.extend_high_i8x16_s"))]
2685#[stable(feature = "wasm_simd", since = "1.54.0")]
2686pub fn i16x8_extend_high_i8x16(a: v128) -> v128 {
2687 unsafe {
2688 simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2689 a.as_i8x16(),
2690 a.as_i8x16(),
2691 [8, 9, 10, 11, 12, 13, 14, 15],
2692 ))
2693 .v128()
2694 }
2695}
2696
2697#[inline]
2700#[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_u))]
2701#[target_feature(enable = "simd128")]
2702#[doc(alias("i16x8.extend_low_i8x16_u"))]
2703#[stable(feature = "wasm_simd", since = "1.54.0")]
2704pub fn i16x8_extend_low_u8x16(a: v128) -> v128 {
2705 unsafe {
2706 simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2707 a.as_u8x16(),
2708 a.as_u8x16(),
2709 [0, 1, 2, 3, 4, 5, 6, 7],
2710 ))
2711 .v128()
2712 }
2713}
2714
2715#[stable(feature = "wasm_simd", since = "1.54.0")]
2716pub use i16x8_extend_low_u8x16 as u16x8_extend_low_u8x16;
2717
2718#[inline]
2721#[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_u))]
2722#[target_feature(enable = "simd128")]
2723#[doc(alias("i16x8.extend_high_i8x16_u"))]
2724#[stable(feature = "wasm_simd", since = "1.54.0")]
2725pub fn i16x8_extend_high_u8x16(a: v128) -> v128 {
2726 unsafe {
2727 simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2728 a.as_u8x16(),
2729 a.as_u8x16(),
2730 [8, 9, 10, 11, 12, 13, 14, 15],
2731 ))
2732 .v128()
2733 }
2734}
2735
2736#[stable(feature = "wasm_simd", since = "1.54.0")]
2737pub use i16x8_extend_high_u8x16 as u16x8_extend_high_u8x16;
2738
2739#[inline]
2744#[cfg_attr(test, assert_instr(i16x8.shl))]
2745#[target_feature(enable = "simd128")]
2746#[doc(alias("i16x8.shl"))]
2747#[stable(feature = "wasm_simd", since = "1.54.0")]
2748pub fn i16x8_shl(a: v128, amt: u32) -> v128 {
2749 unsafe { simd_shl(a.as_i16x8(), simd::i16x8::splat((amt & 0xf) as i16)).v128() }
2752}
2753
2754#[stable(feature = "wasm_simd", since = "1.54.0")]
2755pub use i16x8_shl as u16x8_shl;
2756
2757#[inline]
2763#[cfg_attr(test, assert_instr(i16x8.shr_s))]
2764#[target_feature(enable = "simd128")]
2765#[doc(alias("i16x8.shr_s"))]
2766#[stable(feature = "wasm_simd", since = "1.54.0")]
2767pub fn i16x8_shr(a: v128, amt: u32) -> v128 {
2768 unsafe { simd_shr(a.as_i16x8(), simd::i16x8::splat((amt & 0xf) as i16)).v128() }
2771}
2772
2773#[inline]
2779#[cfg_attr(test, assert_instr(i16x8.shr_u))]
2780#[target_feature(enable = "simd128")]
2781#[doc(alias("i16x8.shr_u"))]
2782#[stable(feature = "wasm_simd", since = "1.54.0")]
2783pub fn u16x8_shr(a: v128, amt: u32) -> v128 {
2784 unsafe { simd_shr(a.as_u16x8(), simd::u16x8::splat((amt & 0xf) as u16)).v128() }
2787}
2788
2789#[inline]
2791#[cfg_attr(test, assert_instr(i16x8.add))]
2792#[target_feature(enable = "simd128")]
2793#[doc(alias("i16x8.add"))]
2794#[stable(feature = "wasm_simd", since = "1.54.0")]
2795pub fn i16x8_add(a: v128, b: v128) -> v128 {
2796 unsafe { simd_add(a.as_i16x8(), b.as_i16x8()).v128() }
2797}
2798
2799#[stable(feature = "wasm_simd", since = "1.54.0")]
2800pub use i16x8_add as u16x8_add;
2801
2802#[inline]
2805#[cfg_attr(test, assert_instr(i16x8.add_sat_s))]
2806#[target_feature(enable = "simd128")]
2807#[doc(alias("i16x8.add_sat_s"))]
2808#[stable(feature = "wasm_simd", since = "1.54.0")]
2809pub fn i16x8_add_sat(a: v128, b: v128) -> v128 {
2810 unsafe { simd_saturating_add(a.as_i16x8(), b.as_i16x8()).v128() }
2811}
2812
2813#[inline]
2816#[cfg_attr(test, assert_instr(i16x8.add_sat_u))]
2817#[target_feature(enable = "simd128")]
2818#[doc(alias("i16x8.add_sat_u"))]
2819#[stable(feature = "wasm_simd", since = "1.54.0")]
2820pub fn u16x8_add_sat(a: v128, b: v128) -> v128 {
2821 unsafe { simd_saturating_add(a.as_u16x8(), b.as_u16x8()).v128() }
2822}
2823
2824#[inline]
2826#[cfg_attr(test, assert_instr(i16x8.sub))]
2827#[target_feature(enable = "simd128")]
2828#[doc(alias("i16x8.sub"))]
2829#[stable(feature = "wasm_simd", since = "1.54.0")]
2830pub fn i16x8_sub(a: v128, b: v128) -> v128 {
2831 unsafe { simd_sub(a.as_i16x8(), b.as_i16x8()).v128() }
2832}
2833
2834#[stable(feature = "wasm_simd", since = "1.54.0")]
2835pub use i16x8_sub as u16x8_sub;
2836
2837#[inline]
2840#[cfg_attr(test, assert_instr(i16x8.sub_sat_s))]
2841#[target_feature(enable = "simd128")]
2842#[doc(alias("i16x8.sub_sat_s"))]
2843#[stable(feature = "wasm_simd", since = "1.54.0")]
2844pub fn i16x8_sub_sat(a: v128, b: v128) -> v128 {
2845 unsafe { simd_saturating_sub(a.as_i16x8(), b.as_i16x8()).v128() }
2846}
2847
2848#[inline]
2851#[cfg_attr(test, assert_instr(i16x8.sub_sat_u))]
2852#[target_feature(enable = "simd128")]
2853#[doc(alias("i16x8.sub_sat_u"))]
2854#[stable(feature = "wasm_simd", since = "1.54.0")]
2855pub fn u16x8_sub_sat(a: v128, b: v128) -> v128 {
2856 unsafe { simd_saturating_sub(a.as_u16x8(), b.as_u16x8()).v128() }
2857}
2858
2859#[inline]
2862#[cfg_attr(test, assert_instr(i16x8.mul))]
2863#[target_feature(enable = "simd128")]
2864#[doc(alias("i16x8.mul"))]
2865#[stable(feature = "wasm_simd", since = "1.54.0")]
2866pub fn i16x8_mul(a: v128, b: v128) -> v128 {
2867 unsafe { simd_mul(a.as_i16x8(), b.as_i16x8()).v128() }
2868}
2869
2870#[stable(feature = "wasm_simd", since = "1.54.0")]
2871pub use i16x8_mul as u16x8_mul;
2872
2873#[inline]
2876#[cfg_attr(test, assert_instr(i16x8.min_s))]
2877#[target_feature(enable = "simd128")]
2878#[doc(alias("i16x8.min_s"))]
2879#[stable(feature = "wasm_simd", since = "1.54.0")]
2880pub fn i16x8_min(a: v128, b: v128) -> v128 {
2881 let a = a.as_i16x8();
2882 let b = b.as_i16x8();
2883 unsafe { simd_select::<simd::i16x8, _>(simd_lt(a, b), a, b).v128() }
2884}
2885
2886#[inline]
2889#[cfg_attr(test, assert_instr(i16x8.min_u))]
2890#[target_feature(enable = "simd128")]
2891#[doc(alias("i16x8.min_u"))]
2892#[stable(feature = "wasm_simd", since = "1.54.0")]
2893pub fn u16x8_min(a: v128, b: v128) -> v128 {
2894 let a = a.as_u16x8();
2895 let b = b.as_u16x8();
2896 unsafe { simd_select::<simd::i16x8, _>(simd_lt(a, b), a, b).v128() }
2897}
2898
2899#[inline]
2902#[cfg_attr(test, assert_instr(i16x8.max_s))]
2903#[target_feature(enable = "simd128")]
2904#[doc(alias("i16x8.max_s"))]
2905#[stable(feature = "wasm_simd", since = "1.54.0")]
2906pub fn i16x8_max(a: v128, b: v128) -> v128 {
2907 let a = a.as_i16x8();
2908 let b = b.as_i16x8();
2909 unsafe { simd_select::<simd::i16x8, _>(simd_gt(a, b), a, b).v128() }
2910}
2911
2912#[inline]
2915#[cfg_attr(test, assert_instr(i16x8.max_u))]
2916#[target_feature(enable = "simd128")]
2917#[doc(alias("i16x8.max_u"))]
2918#[stable(feature = "wasm_simd", since = "1.54.0")]
2919pub fn u16x8_max(a: v128, b: v128) -> v128 {
2920 let a = a.as_u16x8();
2921 let b = b.as_u16x8();
2922 unsafe { simd_select::<simd::i16x8, _>(simd_gt(a, b), a, b).v128() }
2923}
2924
2925#[inline]
2927#[cfg_attr(test, assert_instr(i16x8.avgr_u))]
2928#[target_feature(enable = "simd128")]
2929#[doc(alias("i16x8.avgr_u"))]
2930#[stable(feature = "wasm_simd", since = "1.54.0")]
2931pub fn u16x8_avgr(a: v128, b: v128) -> v128 {
2932 unsafe { llvm_avgr_u_i16x8(a.as_i16x8(), b.as_i16x8()).v128() }
2933}
2934
2935#[inline]
2940#[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_s))]
2941#[target_feature(enable = "simd128")]
2942#[doc(alias("i16x8.extmul_low_i8x16_s"))]
2943#[stable(feature = "wasm_simd", since = "1.54.0")]
2944pub fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 {
2945 unsafe {
2946 let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2947 a.as_i8x16(),
2948 a.as_i8x16(),
2949 [0, 1, 2, 3, 4, 5, 6, 7],
2950 ));
2951 let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2952 b.as_i8x16(),
2953 b.as_i8x16(),
2954 [0, 1, 2, 3, 4, 5, 6, 7],
2955 ));
2956 simd_mul(lhs, rhs).v128()
2957 }
2958}
2959
2960#[inline]
2965#[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_s))]
2966#[target_feature(enable = "simd128")]
2967#[doc(alias("i16x8.extmul_high_i8x16_s"))]
2968#[stable(feature = "wasm_simd", since = "1.54.0")]
2969pub fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 {
2970 unsafe {
2971 let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2972 a.as_i8x16(),
2973 a.as_i8x16(),
2974 [8, 9, 10, 11, 12, 13, 14, 15],
2975 ));
2976 let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2977 b.as_i8x16(),
2978 b.as_i8x16(),
2979 [8, 9, 10, 11, 12, 13, 14, 15],
2980 ));
2981 simd_mul(lhs, rhs).v128()
2982 }
2983}
2984
2985#[inline]
2990#[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_u))]
2991#[target_feature(enable = "simd128")]
2992#[doc(alias("i16x8.extmul_low_i8x16_u"))]
2993#[stable(feature = "wasm_simd", since = "1.54.0")]
2994pub fn i16x8_extmul_low_u8x16(a: v128, b: v128) -> v128 {
2995 unsafe {
2996 let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2997 a.as_u8x16(),
2998 a.as_u8x16(),
2999 [0, 1, 2, 3, 4, 5, 6, 7],
3000 ));
3001 let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
3002 b.as_u8x16(),
3003 b.as_u8x16(),
3004 [0, 1, 2, 3, 4, 5, 6, 7],
3005 ));
3006 simd_mul(lhs, rhs).v128()
3007 }
3008}
3009
3010#[stable(feature = "wasm_simd", since = "1.54.0")]
3011pub use i16x8_extmul_low_u8x16 as u16x8_extmul_low_u8x16;
3012
3013#[inline]
3018#[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_u))]
3019#[target_feature(enable = "simd128")]
3020#[doc(alias("i16x8.extmul_high_i8x16_u"))]
3021#[stable(feature = "wasm_simd", since = "1.54.0")]
3022pub fn i16x8_extmul_high_u8x16(a: v128, b: v128) -> v128 {
3023 unsafe {
3024 let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
3025 a.as_u8x16(),
3026 a.as_u8x16(),
3027 [8, 9, 10, 11, 12, 13, 14, 15],
3028 ));
3029 let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
3030 b.as_u8x16(),
3031 b.as_u8x16(),
3032 [8, 9, 10, 11, 12, 13, 14, 15],
3033 ));
3034 simd_mul(lhs, rhs).v128()
3035 }
3036}
3037
3038#[stable(feature = "wasm_simd", since = "1.54.0")]
3039pub use i16x8_extmul_high_u8x16 as u16x8_extmul_high_u8x16;
3040
3041#[inline]
3044#[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_s))]
3045#[target_feature(enable = "simd128")]
3046#[doc(alias("i32x4.extadd_pairwise_i16x8_s"))]
3047#[stable(feature = "wasm_simd", since = "1.54.0")]
3048pub fn i32x4_extadd_pairwise_i16x8(a: v128) -> v128 {
3049 unsafe { llvm_i32x4_extadd_pairwise_i16x8_s(a.as_i16x8()).v128() }
3050}
3051
3052#[inline]
3055#[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_u))]
3056#[doc(alias("i32x4.extadd_pairwise_i16x8_u"))]
3057#[target_feature(enable = "simd128")]
3058#[stable(feature = "wasm_simd", since = "1.54.0")]
3059pub fn i32x4_extadd_pairwise_u16x8(a: v128) -> v128 {
3060 unsafe { llvm_i32x4_extadd_pairwise_i16x8_u(a.as_i16x8()).v128() }
3061}
3062
3063#[stable(feature = "wasm_simd", since = "1.54.0")]
3064pub use i32x4_extadd_pairwise_u16x8 as u32x4_extadd_pairwise_u16x8;
3065
3066#[inline]
3068#[cfg_attr(test, assert_instr(i32x4.abs))]
3069#[target_feature(enable = "simd128")]
3070#[doc(alias("i32x4.abs"))]
3071#[stable(feature = "wasm_simd", since = "1.54.0")]
3072pub fn i32x4_abs(a: v128) -> v128 {
3073 let a = a.as_i32x4();
3074 let zero = simd::i32x4::ZERO;
3075 unsafe {
3076 simd_select::<simd::m32x4, simd::i32x4>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
3077 }
3078}
3079
3080#[inline]
3082#[cfg_attr(test, assert_instr(i32x4.neg))]
3083#[target_feature(enable = "simd128")]
3084#[doc(alias("i32x4.neg"))]
3085#[stable(feature = "wasm_simd", since = "1.54.0")]
3086pub fn i32x4_neg(a: v128) -> v128 {
3087 unsafe { simd_mul(a.as_i32x4(), simd::i32x4::splat(-1)).v128() }
3088}
3089
3090#[inline]
3092#[cfg_attr(test, assert_instr(i32x4.all_true))]
3093#[target_feature(enable = "simd128")]
3094#[doc(alias("i32x4.all_true"))]
3095#[stable(feature = "wasm_simd", since = "1.54.0")]
3096pub fn i32x4_all_true(a: v128) -> bool {
3097 unsafe { llvm_i32x4_all_true(a.as_i32x4()) != 0 }
3098}
3099
3100#[stable(feature = "wasm_simd", since = "1.54.0")]
3101pub use i32x4_all_true as u32x4_all_true;
3102
3103#[inline]
3106#[cfg_attr(test, assert_instr(i32x4.bitmask))]
3107#[target_feature(enable = "simd128")]
3108#[doc(alias("i32x4.bitmask"))]
3109#[stable(feature = "wasm_simd", since = "1.54.0")]
3110pub fn i32x4_bitmask(a: v128) -> u8 {
3111 unsafe { llvm_bitmask_i32x4(a.as_i32x4()) as u8 }
3112}
3113
3114#[stable(feature = "wasm_simd", since = "1.54.0")]
3115pub use i32x4_bitmask as u32x4_bitmask;
3116
3117#[inline]
3120#[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_s))]
3121#[target_feature(enable = "simd128")]
3122#[doc(alias("i32x4.extend_low_i16x8_s"))]
3123#[stable(feature = "wasm_simd", since = "1.54.0")]
3124pub fn i32x4_extend_low_i16x8(a: v128) -> v128 {
3125 unsafe {
3126 simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3127 a.as_i16x8(),
3128 a.as_i16x8(),
3129 [0, 1, 2, 3]
3130 ))
3131 .v128()
3132 }
3133}
3134
3135#[inline]
3138#[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_s))]
3139#[target_feature(enable = "simd128")]
3140#[doc(alias("i32x4.extend_high_i16x8_s"))]
3141#[stable(feature = "wasm_simd", since = "1.54.0")]
3142pub fn i32x4_extend_high_i16x8(a: v128) -> v128 {
3143 unsafe {
3144 simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3145 a.as_i16x8(),
3146 a.as_i16x8(),
3147 [4, 5, 6, 7]
3148 ))
3149 .v128()
3150 }
3151}
3152
3153#[inline]
3156#[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_u))]
3157#[target_feature(enable = "simd128")]
3158#[doc(alias("i32x4.extend_low_i16x8_u"))]
3159#[stable(feature = "wasm_simd", since = "1.54.0")]
3160pub fn i32x4_extend_low_u16x8(a: v128) -> v128 {
3161 unsafe {
3162 simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3163 a.as_u16x8(),
3164 a.as_u16x8(),
3165 [0, 1, 2, 3]
3166 ))
3167 .v128()
3168 }
3169}
3170
3171#[stable(feature = "wasm_simd", since = "1.54.0")]
3172pub use i32x4_extend_low_u16x8 as u32x4_extend_low_u16x8;
3173
3174#[inline]
3177#[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_u))]
3178#[target_feature(enable = "simd128")]
3179#[doc(alias("i32x4.extend_high_i16x8_u"))]
3180#[stable(feature = "wasm_simd", since = "1.54.0")]
3181pub fn i32x4_extend_high_u16x8(a: v128) -> v128 {
3182 unsafe {
3183 simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3184 a.as_u16x8(),
3185 a.as_u16x8(),
3186 [4, 5, 6, 7]
3187 ))
3188 .v128()
3189 }
3190}
3191
3192#[stable(feature = "wasm_simd", since = "1.54.0")]
3193pub use i32x4_extend_high_u16x8 as u32x4_extend_high_u16x8;
3194
3195#[inline]
3200#[cfg_attr(test, assert_instr(i32x4.shl))]
3201#[target_feature(enable = "simd128")]
3202#[doc(alias("i32x4.shl"))]
3203#[stable(feature = "wasm_simd", since = "1.54.0")]
3204pub fn i32x4_shl(a: v128, amt: u32) -> v128 {
3205 unsafe { simd_shl(a.as_i32x4(), simd::i32x4::splat((amt & 0x1f) as i32)).v128() }
3208}
3209
3210#[stable(feature = "wasm_simd", since = "1.54.0")]
3211pub use i32x4_shl as u32x4_shl;
3212
3213#[inline]
3219#[cfg_attr(test, assert_instr(i32x4.shr_s))]
3220#[target_feature(enable = "simd128")]
3221#[doc(alias("i32x4.shr_s"))]
3222#[stable(feature = "wasm_simd", since = "1.54.0")]
3223pub fn i32x4_shr(a: v128, amt: u32) -> v128 {
3224 unsafe { simd_shr(a.as_i32x4(), simd::i32x4::splat((amt & 0x1f) as i32)).v128() }
3227}
3228
3229#[inline]
3235#[cfg_attr(test, assert_instr(i32x4.shr_u))]
3236#[target_feature(enable = "simd128")]
3237#[doc(alias("i32x4.shr_u"))]
3238#[stable(feature = "wasm_simd", since = "1.54.0")]
3239pub fn u32x4_shr(a: v128, amt: u32) -> v128 {
3240 unsafe { simd_shr(a.as_u32x4(), simd::u32x4::splat(amt & 0x1f)).v128() }
3243}
3244
3245#[inline]
3247#[cfg_attr(test, assert_instr(i32x4.add))]
3248#[target_feature(enable = "simd128")]
3249#[doc(alias("i32x4.add"))]
3250#[stable(feature = "wasm_simd", since = "1.54.0")]
3251pub fn i32x4_add(a: v128, b: v128) -> v128 {
3252 unsafe { simd_add(a.as_i32x4(), b.as_i32x4()).v128() }
3253}
3254
3255#[stable(feature = "wasm_simd", since = "1.54.0")]
3256pub use i32x4_add as u32x4_add;
3257
3258#[inline]
3260#[cfg_attr(test, assert_instr(i32x4.sub))]
3261#[target_feature(enable = "simd128")]
3262#[doc(alias("i32x4.sub"))]
3263#[stable(feature = "wasm_simd", since = "1.54.0")]
3264pub fn i32x4_sub(a: v128, b: v128) -> v128 {
3265 unsafe { simd_sub(a.as_i32x4(), b.as_i32x4()).v128() }
3266}
3267
3268#[stable(feature = "wasm_simd", since = "1.54.0")]
3269pub use i32x4_sub as u32x4_sub;
3270
3271#[inline]
3274#[cfg_attr(test, assert_instr(i32x4.mul))]
3275#[target_feature(enable = "simd128")]
3276#[doc(alias("i32x4.mul"))]
3277#[stable(feature = "wasm_simd", since = "1.54.0")]
3278pub fn i32x4_mul(a: v128, b: v128) -> v128 {
3279 unsafe { simd_mul(a.as_i32x4(), b.as_i32x4()).v128() }
3280}
3281
3282#[stable(feature = "wasm_simd", since = "1.54.0")]
3283pub use i32x4_mul as u32x4_mul;
3284
3285#[inline]
3288#[cfg_attr(test, assert_instr(i32x4.min_s))]
3289#[target_feature(enable = "simd128")]
3290#[doc(alias("i32x4.min_s"))]
3291#[stable(feature = "wasm_simd", since = "1.54.0")]
3292pub fn i32x4_min(a: v128, b: v128) -> v128 {
3293 let a = a.as_i32x4();
3294 let b = b.as_i32x4();
3295 unsafe { simd_select::<simd::i32x4, _>(simd_lt(a, b), a, b).v128() }
3296}
3297
3298#[inline]
3301#[cfg_attr(test, assert_instr(i32x4.min_u))]
3302#[target_feature(enable = "simd128")]
3303#[doc(alias("i32x4.min_u"))]
3304#[stable(feature = "wasm_simd", since = "1.54.0")]
3305pub fn u32x4_min(a: v128, b: v128) -> v128 {
3306 let a = a.as_u32x4();
3307 let b = b.as_u32x4();
3308 unsafe { simd_select::<simd::i32x4, _>(simd_lt(a, b), a, b).v128() }
3309}
3310
3311#[inline]
3314#[cfg_attr(test, assert_instr(i32x4.max_s))]
3315#[target_feature(enable = "simd128")]
3316#[doc(alias("i32x4.max_s"))]
3317#[stable(feature = "wasm_simd", since = "1.54.0")]
3318pub fn i32x4_max(a: v128, b: v128) -> v128 {
3319 let a = a.as_i32x4();
3320 let b = b.as_i32x4();
3321 unsafe { simd_select::<simd::i32x4, _>(simd_gt(a, b), a, b).v128() }
3322}
3323
3324#[inline]
3327#[cfg_attr(test, assert_instr(i32x4.max_u))]
3328#[target_feature(enable = "simd128")]
3329#[doc(alias("i32x4.max_u"))]
3330#[stable(feature = "wasm_simd", since = "1.54.0")]
3331pub fn u32x4_max(a: v128, b: v128) -> v128 {
3332 let a = a.as_u32x4();
3333 let b = b.as_u32x4();
3334 unsafe { simd_select::<simd::i32x4, _>(simd_gt(a, b), a, b).v128() }
3335}
3336
3337#[inline]
3340#[cfg_attr(test, assert_instr(i32x4.dot_i16x8_s))]
3341#[target_feature(enable = "simd128")]
3342#[doc(alias("i32x4.dot_i16x8_s"))]
3343#[stable(feature = "wasm_simd", since = "1.54.0")]
3344pub fn i32x4_dot_i16x8(a: v128, b: v128) -> v128 {
3345 unsafe { llvm_i32x4_dot_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() }
3346}
3347
3348#[inline]
3353#[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_s))]
3354#[target_feature(enable = "simd128")]
3355#[doc(alias("i32x4.extmul_low_i16x8_s"))]
3356#[stable(feature = "wasm_simd", since = "1.54.0")]
3357pub fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 {
3358 unsafe {
3359 let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3360 a.as_i16x8(),
3361 a.as_i16x8(),
3362 [0, 1, 2, 3]
3363 ));
3364 let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3365 b.as_i16x8(),
3366 b.as_i16x8(),
3367 [0, 1, 2, 3]
3368 ));
3369 simd_mul(lhs, rhs).v128()
3370 }
3371}
3372
3373#[inline]
3378#[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_s))]
3379#[target_feature(enable = "simd128")]
3380#[doc(alias("i32x4.extmul_high_i16x8_s"))]
3381#[stable(feature = "wasm_simd", since = "1.54.0")]
3382pub fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 {
3383 unsafe {
3384 let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3385 a.as_i16x8(),
3386 a.as_i16x8(),
3387 [4, 5, 6, 7]
3388 ));
3389 let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3390 b.as_i16x8(),
3391 b.as_i16x8(),
3392 [4, 5, 6, 7]
3393 ));
3394 simd_mul(lhs, rhs).v128()
3395 }
3396}
3397
3398#[inline]
3403#[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_u))]
3404#[target_feature(enable = "simd128")]
3405#[doc(alias("i32x4.extmul_low_i16x8_u"))]
3406#[stable(feature = "wasm_simd", since = "1.54.0")]
3407pub fn i32x4_extmul_low_u16x8(a: v128, b: v128) -> v128 {
3408 unsafe {
3409 let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3410 a.as_u16x8(),
3411 a.as_u16x8(),
3412 [0, 1, 2, 3]
3413 ));
3414 let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3415 b.as_u16x8(),
3416 b.as_u16x8(),
3417 [0, 1, 2, 3]
3418 ));
3419 simd_mul(lhs, rhs).v128()
3420 }
3421}
3422
3423#[stable(feature = "wasm_simd", since = "1.54.0")]
3424pub use i32x4_extmul_low_u16x8 as u32x4_extmul_low_u16x8;
3425
3426#[inline]
3431#[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_u))]
3432#[target_feature(enable = "simd128")]
3433#[doc(alias("i32x4.extmul_high_i16x8_u"))]
3434#[stable(feature = "wasm_simd", since = "1.54.0")]
3435pub fn i32x4_extmul_high_u16x8(a: v128, b: v128) -> v128 {
3436 unsafe {
3437 let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3438 a.as_u16x8(),
3439 a.as_u16x8(),
3440 [4, 5, 6, 7]
3441 ));
3442 let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3443 b.as_u16x8(),
3444 b.as_u16x8(),
3445 [4, 5, 6, 7]
3446 ));
3447 simd_mul(lhs, rhs).v128()
3448 }
3449}
3450
3451#[stable(feature = "wasm_simd", since = "1.54.0")]
3452pub use i32x4_extmul_high_u16x8 as u32x4_extmul_high_u16x8;
3453
3454#[inline]
3456#[cfg_attr(test, assert_instr(i64x2.abs))]
3457#[target_feature(enable = "simd128")]
3458#[doc(alias("i64x2.abs"))]
3459#[stable(feature = "wasm_simd", since = "1.54.0")]
3460pub fn i64x2_abs(a: v128) -> v128 {
3461 let a = a.as_i64x2();
3462 let zero = simd::i64x2::ZERO;
3463 unsafe {
3464 simd_select::<simd::m64x2, simd::i64x2>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
3465 }
3466}
3467
3468#[inline]
3470#[cfg_attr(test, assert_instr(i64x2.neg))]
3471#[target_feature(enable = "simd128")]
3472#[doc(alias("i64x2.neg"))]
3473#[stable(feature = "wasm_simd", since = "1.54.0")]
3474pub fn i64x2_neg(a: v128) -> v128 {
3475 unsafe { simd_mul(a.as_i64x2(), simd::i64x2::splat(-1)).v128() }
3476}
3477
3478#[inline]
3480#[cfg_attr(test, assert_instr(i64x2.all_true))]
3481#[target_feature(enable = "simd128")]
3482#[doc(alias("i64x2.all_true"))]
3483#[stable(feature = "wasm_simd", since = "1.54.0")]
3484pub fn i64x2_all_true(a: v128) -> bool {
3485 unsafe { llvm_i64x2_all_true(a.as_i64x2()) != 0 }
3486}
3487
3488#[stable(feature = "wasm_simd", since = "1.54.0")]
3489pub use i64x2_all_true as u64x2_all_true;
3490
3491#[inline]
3494#[cfg_attr(test, assert_instr(i64x2.bitmask))]
3495#[target_feature(enable = "simd128")]
3496#[doc(alias("i64x2.bitmask"))]
3497#[stable(feature = "wasm_simd", since = "1.54.0")]
3498pub fn i64x2_bitmask(a: v128) -> u8 {
3499 unsafe { llvm_bitmask_i64x2(a.as_i64x2()) as u8 }
3500}
3501
3502#[stable(feature = "wasm_simd", since = "1.54.0")]
3503pub use i64x2_bitmask as u64x2_bitmask;
3504
3505#[inline]
3508#[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_s))]
3509#[target_feature(enable = "simd128")]
3510#[doc(alias("i64x2.extend_low_i32x4_s"))]
3511#[stable(feature = "wasm_simd", since = "1.54.0")]
3512pub fn i64x2_extend_low_i32x4(a: v128) -> v128 {
3513 unsafe {
3514 simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [0, 1]))
3515 .v128()
3516 }
3517}
3518
3519#[inline]
3522#[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_s))]
3523#[target_feature(enable = "simd128")]
3524#[doc(alias("i64x2.extend_high_i32x4_s"))]
3525#[stable(feature = "wasm_simd", since = "1.54.0")]
3526pub fn i64x2_extend_high_i32x4(a: v128) -> v128 {
3527 unsafe {
3528 simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [2, 3]))
3529 .v128()
3530 }
3531}
3532
3533#[inline]
3536#[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_u))]
3537#[target_feature(enable = "simd128")]
3538#[doc(alias("i64x2.extend_low_i32x4_u"))]
3539#[stable(feature = "wasm_simd", since = "1.54.0")]
3540pub fn i64x2_extend_low_u32x4(a: v128) -> v128 {
3541 unsafe {
3542 simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [0, 1]))
3543 .v128()
3544 }
3545}
3546
3547#[stable(feature = "wasm_simd", since = "1.54.0")]
3548pub use i64x2_extend_low_u32x4 as u64x2_extend_low_u32x4;
3549
3550#[inline]
3553#[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_u))]
3554#[target_feature(enable = "simd128")]
3555#[doc(alias("i64x2.extend_high_i32x4_u"))]
3556#[stable(feature = "wasm_simd", since = "1.54.0")]
3557pub fn i64x2_extend_high_u32x4(a: v128) -> v128 {
3558 unsafe {
3559 simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [2, 3]))
3560 .v128()
3561 }
3562}
3563
3564#[stable(feature = "wasm_simd", since = "1.54.0")]
3565pub use i64x2_extend_high_u32x4 as u64x2_extend_high_u32x4;
3566
3567#[inline]
3572#[cfg_attr(test, assert_instr(i64x2.shl))]
3573#[target_feature(enable = "simd128")]
3574#[doc(alias("i64x2.shl"))]
3575#[stable(feature = "wasm_simd", since = "1.54.0")]
3576pub fn i64x2_shl(a: v128, amt: u32) -> v128 {
3577 unsafe { simd_shl(a.as_i64x2(), simd::i64x2::splat((amt & 0x3f) as i64)).v128() }
3580}
3581
3582#[stable(feature = "wasm_simd", since = "1.54.0")]
3583pub use i64x2_shl as u64x2_shl;
3584
3585#[inline]
3591#[cfg_attr(test, assert_instr(i64x2.shr_s))]
3592#[target_feature(enable = "simd128")]
3593#[doc(alias("i64x2.shr_s"))]
3594#[stable(feature = "wasm_simd", since = "1.54.0")]
3595pub fn i64x2_shr(a: v128, amt: u32) -> v128 {
3596 unsafe { simd_shr(a.as_i64x2(), simd::i64x2::splat((amt & 0x3f) as i64)).v128() }
3599}
3600
3601#[inline]
3607#[cfg_attr(test, assert_instr(i64x2.shr_u))]
3608#[target_feature(enable = "simd128")]
3609#[doc(alias("i64x2.shr_u"))]
3610#[stable(feature = "wasm_simd", since = "1.54.0")]
3611pub fn u64x2_shr(a: v128, amt: u32) -> v128 {
3612 unsafe { simd_shr(a.as_u64x2(), simd::u64x2::splat((amt & 0x3f) as u64)).v128() }
3615}
3616
3617#[inline]
3619#[cfg_attr(test, assert_instr(i64x2.add))]
3620#[target_feature(enable = "simd128")]
3621#[doc(alias("i64x2.add"))]
3622#[stable(feature = "wasm_simd", since = "1.54.0")]
3623pub fn i64x2_add(a: v128, b: v128) -> v128 {
3624 unsafe { simd_add(a.as_i64x2(), b.as_i64x2()).v128() }
3625}
3626
3627#[stable(feature = "wasm_simd", since = "1.54.0")]
3628pub use i64x2_add as u64x2_add;
3629
3630#[inline]
3632#[cfg_attr(test, assert_instr(i64x2.sub))]
3633#[target_feature(enable = "simd128")]
3634#[doc(alias("i64x2.sub"))]
3635#[stable(feature = "wasm_simd", since = "1.54.0")]
3636pub fn i64x2_sub(a: v128, b: v128) -> v128 {
3637 unsafe { simd_sub(a.as_i64x2(), b.as_i64x2()).v128() }
3638}
3639
3640#[stable(feature = "wasm_simd", since = "1.54.0")]
3641pub use i64x2_sub as u64x2_sub;
3642
3643#[inline]
3645#[cfg_attr(test, assert_instr(i64x2.mul))]
3646#[target_feature(enable = "simd128")]
3647#[doc(alias("i64x2.mul"))]
3648#[stable(feature = "wasm_simd", since = "1.54.0")]
3649pub fn i64x2_mul(a: v128, b: v128) -> v128 {
3650 unsafe { simd_mul(a.as_i64x2(), b.as_i64x2()).v128() }
3651}
3652
3653#[stable(feature = "wasm_simd", since = "1.54.0")]
3654pub use i64x2_mul as u64x2_mul;
3655
3656#[inline]
3661#[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_s))]
3662#[target_feature(enable = "simd128")]
3663#[doc(alias("i64x2.extmul_low_i32x4_s"))]
3664#[stable(feature = "wasm_simd", since = "1.54.0")]
3665pub fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 {
3666 unsafe {
3667 let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3668 a.as_i32x4(),
3669 a.as_i32x4(),
3670 [0, 1]
3671 ));
3672 let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3673 b.as_i32x4(),
3674 b.as_i32x4(),
3675 [0, 1]
3676 ));
3677 simd_mul(lhs, rhs).v128()
3678 }
3679}
3680
3681#[inline]
3686#[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_s))]
3687#[target_feature(enable = "simd128")]
3688#[doc(alias("i64x2.extmul_high_i32x4_s"))]
3689#[stable(feature = "wasm_simd", since = "1.54.0")]
3690pub fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 {
3691 unsafe {
3692 let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3693 a.as_i32x4(),
3694 a.as_i32x4(),
3695 [2, 3]
3696 ));
3697 let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3698 b.as_i32x4(),
3699 b.as_i32x4(),
3700 [2, 3]
3701 ));
3702 simd_mul(lhs, rhs).v128()
3703 }
3704}
3705
3706#[inline]
3711#[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_u))]
3712#[target_feature(enable = "simd128")]
3713#[doc(alias("i64x2.extmul_low_i32x4_u"))]
3714#[stable(feature = "wasm_simd", since = "1.54.0")]
3715pub fn i64x2_extmul_low_u32x4(a: v128, b: v128) -> v128 {
3716 unsafe {
3717 let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3718 a.as_u32x4(),
3719 a.as_u32x4(),
3720 [0, 1]
3721 ));
3722 let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3723 b.as_u32x4(),
3724 b.as_u32x4(),
3725 [0, 1]
3726 ));
3727 simd_mul(lhs, rhs).v128()
3728 }
3729}
3730
3731#[stable(feature = "wasm_simd", since = "1.54.0")]
3732pub use i64x2_extmul_low_u32x4 as u64x2_extmul_low_u32x4;
3733
3734#[inline]
3739#[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_u))]
3740#[target_feature(enable = "simd128")]
3741#[doc(alias("i64x2.extmul_high_i32x4_u"))]
3742#[stable(feature = "wasm_simd", since = "1.54.0")]
3743pub fn i64x2_extmul_high_u32x4(a: v128, b: v128) -> v128 {
3744 unsafe {
3745 let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3746 a.as_u32x4(),
3747 a.as_u32x4(),
3748 [2, 3]
3749 ));
3750 let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3751 b.as_u32x4(),
3752 b.as_u32x4(),
3753 [2, 3]
3754 ));
3755 simd_mul(lhs, rhs).v128()
3756 }
3757}
3758
3759#[stable(feature = "wasm_simd", since = "1.54.0")]
3760pub use i64x2_extmul_high_u32x4 as u64x2_extmul_high_u32x4;
3761
3762#[inline]
3764#[cfg_attr(test, assert_instr(f32x4.ceil))]
3765#[target_feature(enable = "simd128")]
3766#[doc(alias("f32x4.ceil"))]
3767#[stable(feature = "wasm_simd", since = "1.54.0")]
3768pub fn f32x4_ceil(a: v128) -> v128 {
3769 unsafe { simd_ceil(a.as_f32x4()).v128() }
3770}
3771
3772#[inline]
3774#[cfg_attr(test, assert_instr(f32x4.floor))]
3775#[target_feature(enable = "simd128")]
3776#[doc(alias("f32x4.floor"))]
3777#[stable(feature = "wasm_simd", since = "1.54.0")]
3778pub fn f32x4_floor(a: v128) -> v128 {
3779 unsafe { simd_floor(a.as_f32x4()).v128() }
3780}
3781
3782#[inline]
3785#[cfg_attr(test, assert_instr(f32x4.trunc))]
3786#[target_feature(enable = "simd128")]
3787#[doc(alias("f32x4.trunc"))]
3788#[stable(feature = "wasm_simd", since = "1.54.0")]
3789pub fn f32x4_trunc(a: v128) -> v128 {
3790 unsafe { simd_trunc(a.as_f32x4()).v128() }
3791}
3792
3793#[inline]
3796#[cfg_attr(test, assert_instr(f32x4.nearest))]
3797#[target_feature(enable = "simd128")]
3798#[doc(alias("f32x4.nearest"))]
3799#[stable(feature = "wasm_simd", since = "1.54.0")]
3800pub fn f32x4_nearest(a: v128) -> v128 {
3801 unsafe { llvm_f32x4_nearest(a.as_f32x4()).v128() }
3802}
3803
3804#[inline]
3807#[cfg_attr(test, assert_instr(f32x4.abs))]
3808#[target_feature(enable = "simd128")]
3809#[doc(alias("f32x4.abs"))]
3810#[stable(feature = "wasm_simd", since = "1.54.0")]
3811pub fn f32x4_abs(a: v128) -> v128 {
3812 unsafe { simd_fabs(a.as_f32x4()).v128() }
3813}
3814
3815#[inline]
3818#[cfg_attr(test, assert_instr(f32x4.neg))]
3819#[target_feature(enable = "simd128")]
3820#[doc(alias("f32x4.neg"))]
3821#[stable(feature = "wasm_simd", since = "1.54.0")]
3822pub fn f32x4_neg(a: v128) -> v128 {
3823 unsafe { simd_neg(a.as_f32x4()).v128() }
3824}
3825
3826#[inline]
3829#[cfg_attr(test, assert_instr(f32x4.sqrt))]
3830#[target_feature(enable = "simd128")]
3831#[doc(alias("f32x4.sqrt"))]
3832#[stable(feature = "wasm_simd", since = "1.54.0")]
3833pub fn f32x4_sqrt(a: v128) -> v128 {
3834 unsafe { simd_fsqrt(a.as_f32x4()).v128() }
3835}
3836
3837#[inline]
3840#[cfg_attr(test, assert_instr(f32x4.add))]
3841#[target_feature(enable = "simd128")]
3842#[doc(alias("f32x4.add"))]
3843#[stable(feature = "wasm_simd", since = "1.54.0")]
3844pub fn f32x4_add(a: v128, b: v128) -> v128 {
3845 unsafe { simd_add(a.as_f32x4(), b.as_f32x4()).v128() }
3846}
3847
3848#[inline]
3851#[cfg_attr(test, assert_instr(f32x4.sub))]
3852#[target_feature(enable = "simd128")]
3853#[doc(alias("f32x4.sub"))]
3854#[stable(feature = "wasm_simd", since = "1.54.0")]
3855pub fn f32x4_sub(a: v128, b: v128) -> v128 {
3856 unsafe { simd_sub(a.as_f32x4(), b.as_f32x4()).v128() }
3857}
3858
3859#[inline]
3862#[cfg_attr(test, assert_instr(f32x4.mul))]
3863#[target_feature(enable = "simd128")]
3864#[doc(alias("f32x4.mul"))]
3865#[stable(feature = "wasm_simd", since = "1.54.0")]
3866pub fn f32x4_mul(a: v128, b: v128) -> v128 {
3867 unsafe { simd_mul(a.as_f32x4(), b.as_f32x4()).v128() }
3868}
3869
3870#[inline]
3873#[cfg_attr(test, assert_instr(f32x4.div))]
3874#[target_feature(enable = "simd128")]
3875#[doc(alias("f32x4.div"))]
3876#[stable(feature = "wasm_simd", since = "1.54.0")]
3877pub fn f32x4_div(a: v128, b: v128) -> v128 {
3878 unsafe { simd_div(a.as_f32x4(), b.as_f32x4()).v128() }
3879}
3880
3881#[inline]
3884#[cfg_attr(test, assert_instr(f32x4.min))]
3885#[target_feature(enable = "simd128")]
3886#[doc(alias("f32x4.min"))]
3887#[stable(feature = "wasm_simd", since = "1.54.0")]
3888pub fn f32x4_min(a: v128, b: v128) -> v128 {
3889 unsafe { llvm_f32x4_min(a.as_f32x4(), b.as_f32x4()).v128() }
3890}
3891
3892#[inline]
3895#[cfg_attr(test, assert_instr(f32x4.max))]
3896#[target_feature(enable = "simd128")]
3897#[doc(alias("f32x4.max"))]
3898#[stable(feature = "wasm_simd", since = "1.54.0")]
3899pub fn f32x4_max(a: v128, b: v128) -> v128 {
3900 unsafe { llvm_f32x4_max(a.as_f32x4(), b.as_f32x4()).v128() }
3901}
3902
3903#[inline]
3905#[cfg_attr(test, assert_instr(f32x4.pmin))]
3906#[target_feature(enable = "simd128")]
3907#[doc(alias("f32x4.pmin"))]
3908#[stable(feature = "wasm_simd", since = "1.54.0")]
3909pub fn f32x4_pmin(a: v128, b: v128) -> v128 {
3910 unsafe {
3911 simd_select::<simd::m32x4, simd::f32x4>(
3912 simd_lt(b.as_f32x4(), a.as_f32x4()),
3913 b.as_f32x4(),
3914 a.as_f32x4(),
3915 )
3916 .v128()
3917 }
3918}
3919
3920#[inline]
3922#[cfg_attr(test, assert_instr(f32x4.pmax))]
3923#[target_feature(enable = "simd128")]
3924#[doc(alias("f32x4.pmax"))]
3925#[stable(feature = "wasm_simd", since = "1.54.0")]
3926pub fn f32x4_pmax(a: v128, b: v128) -> v128 {
3927 unsafe {
3928 simd_select::<simd::m32x4, simd::f32x4>(
3929 simd_lt(a.as_f32x4(), b.as_f32x4()),
3930 b.as_f32x4(),
3931 a.as_f32x4(),
3932 )
3933 .v128()
3934 }
3935}
3936
3937#[inline]
3939#[cfg_attr(test, assert_instr(f64x2.ceil))]
3940#[target_feature(enable = "simd128")]
3941#[doc(alias("f64x2.ceil"))]
3942#[stable(feature = "wasm_simd", since = "1.54.0")]
3943pub fn f64x2_ceil(a: v128) -> v128 {
3944 unsafe { simd_ceil(a.as_f64x2()).v128() }
3945}
3946
3947#[inline]
3949#[cfg_attr(test, assert_instr(f64x2.floor))]
3950#[target_feature(enable = "simd128")]
3951#[doc(alias("f64x2.floor"))]
3952#[stable(feature = "wasm_simd", since = "1.54.0")]
3953pub fn f64x2_floor(a: v128) -> v128 {
3954 unsafe { simd_floor(a.as_f64x2()).v128() }
3955}
3956
3957#[inline]
3960#[cfg_attr(test, assert_instr(f64x2.trunc))]
3961#[target_feature(enable = "simd128")]
3962#[doc(alias("f64x2.trunc"))]
3963#[stable(feature = "wasm_simd", since = "1.54.0")]
3964pub fn f64x2_trunc(a: v128) -> v128 {
3965 unsafe { simd_trunc(a.as_f64x2()).v128() }
3966}
3967
3968#[inline]
3971#[cfg_attr(test, assert_instr(f64x2.nearest))]
3972#[target_feature(enable = "simd128")]
3973#[doc(alias("f64x2.nearest"))]
3974#[stable(feature = "wasm_simd", since = "1.54.0")]
3975pub fn f64x2_nearest(a: v128) -> v128 {
3976 unsafe { llvm_f64x2_nearest(a.as_f64x2()).v128() }
3977}
3978
3979#[inline]
3982#[cfg_attr(test, assert_instr(f64x2.abs))]
3983#[target_feature(enable = "simd128")]
3984#[doc(alias("f64x2.abs"))]
3985#[stable(feature = "wasm_simd", since = "1.54.0")]
3986pub fn f64x2_abs(a: v128) -> v128 {
3987 unsafe { simd_fabs(a.as_f64x2()).v128() }
3988}
3989
3990#[inline]
3993#[cfg_attr(test, assert_instr(f64x2.neg))]
3994#[target_feature(enable = "simd128")]
3995#[doc(alias("f64x2.neg"))]
3996#[stable(feature = "wasm_simd", since = "1.54.0")]
3997pub fn f64x2_neg(a: v128) -> v128 {
3998 unsafe { simd_neg(a.as_f64x2()).v128() }
3999}
4000
4001#[inline]
4004#[cfg_attr(test, assert_instr(f64x2.sqrt))]
4005#[target_feature(enable = "simd128")]
4006#[doc(alias("f64x2.sqrt"))]
4007#[stable(feature = "wasm_simd", since = "1.54.0")]
4008pub fn f64x2_sqrt(a: v128) -> v128 {
4009 unsafe { simd_fsqrt(a.as_f64x2()).v128() }
4010}
4011
4012#[inline]
4015#[cfg_attr(test, assert_instr(f64x2.add))]
4016#[target_feature(enable = "simd128")]
4017#[doc(alias("f64x2.add"))]
4018#[stable(feature = "wasm_simd", since = "1.54.0")]
4019pub fn f64x2_add(a: v128, b: v128) -> v128 {
4020 unsafe { simd_add(a.as_f64x2(), b.as_f64x2()).v128() }
4021}
4022
4023#[inline]
4026#[cfg_attr(test, assert_instr(f64x2.sub))]
4027#[target_feature(enable = "simd128")]
4028#[doc(alias("f64x2.sub"))]
4029#[stable(feature = "wasm_simd", since = "1.54.0")]
4030pub fn f64x2_sub(a: v128, b: v128) -> v128 {
4031 unsafe { simd_sub(a.as_f64x2(), b.as_f64x2()).v128() }
4032}
4033
4034#[inline]
4037#[cfg_attr(test, assert_instr(f64x2.mul))]
4038#[target_feature(enable = "simd128")]
4039#[doc(alias("f64x2.mul"))]
4040#[stable(feature = "wasm_simd", since = "1.54.0")]
4041pub fn f64x2_mul(a: v128, b: v128) -> v128 {
4042 unsafe { simd_mul(a.as_f64x2(), b.as_f64x2()).v128() }
4043}
4044
4045#[inline]
4048#[cfg_attr(test, assert_instr(f64x2.div))]
4049#[target_feature(enable = "simd128")]
4050#[doc(alias("f64x2.div"))]
4051#[stable(feature = "wasm_simd", since = "1.54.0")]
4052pub fn f64x2_div(a: v128, b: v128) -> v128 {
4053 unsafe { simd_div(a.as_f64x2(), b.as_f64x2()).v128() }
4054}
4055
4056#[inline]
4059#[cfg_attr(test, assert_instr(f64x2.min))]
4060#[target_feature(enable = "simd128")]
4061#[doc(alias("f64x2.min"))]
4062#[stable(feature = "wasm_simd", since = "1.54.0")]
4063pub fn f64x2_min(a: v128, b: v128) -> v128 {
4064 unsafe { llvm_f64x2_min(a.as_f64x2(), b.as_f64x2()).v128() }
4065}
4066
4067#[inline]
4070#[cfg_attr(test, assert_instr(f64x2.max))]
4071#[target_feature(enable = "simd128")]
4072#[doc(alias("f64x2.max"))]
4073#[stable(feature = "wasm_simd", since = "1.54.0")]
4074pub fn f64x2_max(a: v128, b: v128) -> v128 {
4075 unsafe { llvm_f64x2_max(a.as_f64x2(), b.as_f64x2()).v128() }
4076}
4077
4078#[inline]
4080#[cfg_attr(test, assert_instr(f64x2.pmin))]
4081#[target_feature(enable = "simd128")]
4082#[doc(alias("f64x2.pmin"))]
4083#[stable(feature = "wasm_simd", since = "1.54.0")]
4084pub fn f64x2_pmin(a: v128, b: v128) -> v128 {
4085 unsafe {
4086 simd_select::<simd::m64x2, simd::f64x2>(
4087 simd_lt(b.as_f64x2(), a.as_f64x2()),
4088 b.as_f64x2(),
4089 a.as_f64x2(),
4090 )
4091 .v128()
4092 }
4093}
4094
4095#[inline]
4097#[cfg_attr(test, assert_instr(f64x2.pmax))]
4098#[target_feature(enable = "simd128")]
4099#[doc(alias("f64x2.pmax"))]
4100#[stable(feature = "wasm_simd", since = "1.54.0")]
4101pub fn f64x2_pmax(a: v128, b: v128) -> v128 {
4102 unsafe {
4103 simd_select::<simd::m64x2, simd::f64x2>(
4104 simd_lt(a.as_f64x2(), b.as_f64x2()),
4105 b.as_f64x2(),
4106 a.as_f64x2(),
4107 )
4108 .v128()
4109 }
4110}
4111
4112#[inline]
4118#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_s))]
4119#[target_feature(enable = "simd128")]
4120#[doc(alias("i32x4.trunc_sat_f32x4_s"))]
4121#[stable(feature = "wasm_simd", since = "1.54.0")]
4122pub fn i32x4_trunc_sat_f32x4(a: v128) -> v128 {
4123 unsafe { simd_as::<simd::f32x4, simd::i32x4>(a.as_f32x4()).v128() }
4124}
4125
4126#[inline]
4132#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_u))]
4133#[target_feature(enable = "simd128")]
4134#[doc(alias("i32x4.trunc_sat_f32x4_u"))]
4135#[stable(feature = "wasm_simd", since = "1.54.0")]
4136pub fn u32x4_trunc_sat_f32x4(a: v128) -> v128 {
4137 unsafe { simd_as::<simd::f32x4, simd::u32x4>(a.as_f32x4()).v128() }
4138}
4139
4140#[inline]
4143#[cfg_attr(test, assert_instr(f32x4.convert_i32x4_s))]
4144#[target_feature(enable = "simd128")]
4145#[doc(alias("f32x4.convert_i32x4_s"))]
4146#[stable(feature = "wasm_simd", since = "1.54.0")]
4147pub fn f32x4_convert_i32x4(a: v128) -> v128 {
4148 unsafe { simd_cast::<_, simd::f32x4>(a.as_i32x4()).v128() }
4149}
4150
4151#[inline]
4154#[cfg_attr(test, assert_instr(f32x4.convert_i32x4_u))]
4155#[target_feature(enable = "simd128")]
4156#[doc(alias("f32x4.convert_i32x4_u"))]
4157#[stable(feature = "wasm_simd", since = "1.54.0")]
4158pub fn f32x4_convert_u32x4(a: v128) -> v128 {
4159 unsafe { simd_cast::<_, simd::f32x4>(a.as_u32x4()).v128() }
4160}
4161
4162#[inline]
4171#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_s_zero))]
4172#[target_feature(enable = "simd128")]
4173#[doc(alias("i32x4.trunc_sat_f64x2_s_zero"))]
4174#[stable(feature = "wasm_simd", since = "1.54.0")]
4175pub fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
4176 let ret: simd::i32x4 = unsafe {
4177 simd_shuffle!(
4178 simd_as::<simd::f64x2, simd::i32x2>(a.as_f64x2()),
4179 simd::i32x2::ZERO,
4180 [0, 1, 2, 3],
4181 )
4182 };
4183 ret.v128()
4184}
4185
4186#[inline]
4195#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_u_zero))]
4196#[target_feature(enable = "simd128")]
4197#[doc(alias("i32x4.trunc_sat_f64x2_u_zero"))]
4198#[stable(feature = "wasm_simd", since = "1.54.0")]
4199pub fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
4200 let ret: simd::u32x4 = unsafe {
4201 simd_shuffle!(
4202 simd_as::<simd::f64x2, simd::u32x2>(a.as_f64x2()),
4203 simd::u32x2::ZERO,
4204 [0, 1, 2, 3],
4205 )
4206 };
4207 ret.v128()
4208}
4209
4210#[inline]
4212#[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_s))]
4213#[target_feature(enable = "simd128")]
4214#[doc(alias("f64x2.convert_low_i32x4_s"))]
4215#[stable(feature = "wasm_simd", since = "1.54.0")]
4216pub fn f64x2_convert_low_i32x4(a: v128) -> v128 {
4217 unsafe {
4218 simd_cast::<simd::i32x2, simd::f64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [0, 1],))
4219 .v128()
4220 }
4221}
4222
4223#[inline]
4225#[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_u))]
4226#[target_feature(enable = "simd128")]
4227#[doc(alias("f64x2.convert_low_i32x4_u"))]
4228#[stable(feature = "wasm_simd", since = "1.54.0")]
4229pub fn f64x2_convert_low_u32x4(a: v128) -> v128 {
4230 unsafe {
4231 simd_cast::<simd::u32x2, simd::f64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [0, 1],))
4232 .v128()
4233 }
4234}
4235
4236#[inline]
4242#[cfg_attr(test, assert_instr(f32x4.demote_f64x2_zero))]
4243#[target_feature(enable = "simd128")]
4244#[doc(alias("f32x4.demote_f64x2_zero"))]
4245#[stable(feature = "wasm_simd", since = "1.54.0")]
4246pub fn f32x4_demote_f64x2_zero(a: v128) -> v128 {
4247 unsafe {
4248 simd_cast::<simd::f64x4, simd::f32x4>(simd_shuffle!(
4249 a.as_f64x2(),
4250 simd::f64x2::ZERO,
4251 [0, 1, 2, 3]
4252 ))
4253 .v128()
4254 }
4255}
4256
4257#[inline]
4260#[cfg_attr(test, assert_instr(f64x2.promote_low_f32x4))]
4261#[target_feature(enable = "simd128")]
4262#[doc(alias("f32x4.promote_low_f32x4"))]
4263#[stable(feature = "wasm_simd", since = "1.54.0")]
4264pub fn f64x2_promote_low_f32x4(a: v128) -> v128 {
4265 unsafe {
4266 simd_cast::<simd::f32x2, simd::f64x2>(simd_shuffle!(a.as_f32x4(), a.as_f32x4(), [0, 1]))
4267 .v128()
4268 }
4269}
4270
4271#[cfg(test)]
4272mod tests {
4273 use super::*;
4274 use core::ops::{Add, Div, Mul, Neg, Sub};
4275
4276 use std::fmt::Debug;
4277 use std::mem::transmute;
4278 use std::num::Wrapping;
4279 use std::prelude::v1::*;
4280
4281 const _C1: v128 = i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4282 const _C2: v128 = u8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4283 const _C3: v128 = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4284 const _C4: v128 = u16x8(0, 1, 2, 3, 4, 5, 6, 7);
4285 const _C5: v128 = i32x4(0, 1, 2, 3);
4286 const _C6: v128 = u32x4(0, 1, 2, 3);
4287 const _C7: v128 = i64x2(0, 1);
4288 const _C8: v128 = u64x2(0, 1);
4289 const _C9: v128 = f32x4(0.0, 1.0, 2.0, 3.0);
4290 const _C10: v128 = f64x2(0.0, 1.0);
4291
4292 fn compare_bytes(a: v128, b: v128) {
4293 let a: [u8; 16] = unsafe { transmute(a) };
4294 let b: [u8; 16] = unsafe { transmute(b) };
4295 assert_eq!(a, b);
4296 }
4297
4298 #[test]
4299 fn test_load() {
4300 unsafe {
4301 let arr: [i32; 4] = [0, 1, 2, 3];
4302 let vec = v128_load(arr.as_ptr() as *const v128);
4303 compare_bytes(vec, i32x4(0, 1, 2, 3));
4304 }
4305 }
4306
4307 #[test]
4308 fn test_load_extend() {
4309 unsafe {
4310 let arr: [i8; 8] = [-3, -2, -1, 0, 1, 2, 3, 4];
4311 let vec = i16x8_load_extend_i8x8(arr.as_ptr());
4312 compare_bytes(vec, i16x8(-3, -2, -1, 0, 1, 2, 3, 4));
4313 let vec = i16x8_load_extend_u8x8(arr.as_ptr() as *const u8);
4314 compare_bytes(vec, i16x8(253, 254, 255, 0, 1, 2, 3, 4));
4315
4316 let arr: [i16; 4] = [-1, 0, 1, 2];
4317 let vec = i32x4_load_extend_i16x4(arr.as_ptr());
4318 compare_bytes(vec, i32x4(-1, 0, 1, 2));
4319 let vec = i32x4_load_extend_u16x4(arr.as_ptr() as *const u16);
4320 compare_bytes(vec, i32x4(65535, 0, 1, 2));
4321
4322 let arr: [i32; 2] = [-1, 1];
4323 let vec = i64x2_load_extend_i32x2(arr.as_ptr());
4324 compare_bytes(vec, i64x2(-1, 1));
4325 let vec = i64x2_load_extend_u32x2(arr.as_ptr() as *const u32);
4326 compare_bytes(vec, i64x2(u32::max_value().into(), 1));
4327 }
4328 }
4329
4330 #[test]
4331 fn test_load_splat() {
4332 unsafe {
4333 compare_bytes(v128_load8_splat(&8), i8x16_splat(8));
4334 compare_bytes(v128_load16_splat(&9), i16x8_splat(9));
4335 compare_bytes(v128_load32_splat(&10), i32x4_splat(10));
4336 compare_bytes(v128_load64_splat(&11), i64x2_splat(11));
4337 }
4338 }
4339
4340 #[test]
4341 fn test_load_zero() {
4342 unsafe {
4343 compare_bytes(v128_load32_zero(&10), i32x4(10, 0, 0, 0));
4344 compare_bytes(v128_load64_zero(&11), i64x2(11, 0));
4345 }
4346 }
4347
4348 #[test]
4349 fn test_store() {
4350 unsafe {
4351 let mut spot = i8x16_splat(0);
4352 v128_store(&mut spot, i8x16_splat(1));
4353 compare_bytes(spot, i8x16_splat(1));
4354 }
4355 }
4356
4357 #[test]
4358 fn test_load_lane() {
4359 unsafe {
4360 let zero = i8x16_splat(0);
4361 compare_bytes(
4362 v128_load8_lane::<2>(zero, &1),
4363 i8x16_replace_lane::<2>(zero, 1),
4364 );
4365
4366 compare_bytes(
4367 v128_load16_lane::<2>(zero, &1),
4368 i16x8_replace_lane::<2>(zero, 1),
4369 );
4370
4371 compare_bytes(
4372 v128_load32_lane::<2>(zero, &1),
4373 i32x4_replace_lane::<2>(zero, 1),
4374 );
4375
4376 compare_bytes(
4377 v128_load64_lane::<1>(zero, &1),
4378 i64x2_replace_lane::<1>(zero, 1),
4379 );
4380 }
4381 }
4382
4383 #[test]
4384 fn test_store_lane() {
4385 unsafe {
4386 let mut spot = 0;
4387 let zero = i8x16_splat(0);
4388 v128_store8_lane::<5>(i8x16_replace_lane::<5>(zero, 7), &mut spot);
4389 assert_eq!(spot, 7);
4390
4391 let mut spot = 0;
4392 v128_store16_lane::<5>(i16x8_replace_lane::<5>(zero, 7), &mut spot);
4393 assert_eq!(spot, 7);
4394
4395 let mut spot = 0;
4396 v128_store32_lane::<3>(i32x4_replace_lane::<3>(zero, 7), &mut spot);
4397 assert_eq!(spot, 7);
4398
4399 let mut spot = 0;
4400 v128_store64_lane::<0>(i64x2_replace_lane::<0>(zero, 7), &mut spot);
4401 assert_eq!(spot, 7);
4402 }
4403 }
4404
4405 #[test]
4406 fn test_i8x16() {
4407 const A: v128 = super::i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4408 compare_bytes(A, A);
4409
4410 const _: v128 = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4411 const _: v128 = i32x4(0, 1, 2, 3);
4412 const _: v128 = i64x2(0, 1);
4413 const _: v128 = f32x4(0., 1., 2., 3.);
4414 const _: v128 = f64x2(0., 1.);
4415
4416 let bytes: [i16; 8] = unsafe { mem::transmute(i16x8(-1, -2, -3, -4, -5, -6, -7, -8)) };
4417 assert_eq!(bytes, [-1, -2, -3, -4, -5, -6, -7, -8]);
4418 let bytes: [i8; 16] = unsafe {
4419 mem::transmute(i8x16(
4420 -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16,
4421 ))
4422 };
4423 assert_eq!(
4424 bytes,
4425 [
4426 -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16
4427 ]
4428 );
4429 }
4430
4431 #[test]
4432 fn test_shuffle() {
4433 let vec_a = i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4434 let vec_b = i8x16(
4435 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
4436 );
4437
4438 let vec_r = i8x16_shuffle::<0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30>(
4439 vec_a, vec_b,
4440 );
4441 let vec_e = i8x16(0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
4442 compare_bytes(vec_r, vec_e);
4443
4444 let vec_a = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4445 let vec_b = i16x8(8, 9, 10, 11, 12, 13, 14, 15);
4446 let vec_r = i16x8_shuffle::<0, 8, 2, 10, 4, 12, 6, 14>(vec_a, vec_b);
4447 let vec_e = i16x8(0, 8, 2, 10, 4, 12, 6, 14);
4448 compare_bytes(vec_r, vec_e);
4449
4450 let vec_a = i32x4(0, 1, 2, 3);
4451 let vec_b = i32x4(4, 5, 6, 7);
4452 let vec_r = i32x4_shuffle::<0, 4, 2, 6>(vec_a, vec_b);
4453 let vec_e = i32x4(0, 4, 2, 6);
4454 compare_bytes(vec_r, vec_e);
4455
4456 let vec_a = i64x2(0, 1);
4457 let vec_b = i64x2(2, 3);
4458 let vec_r = i64x2_shuffle::<0, 2>(vec_a, vec_b);
4459 let vec_e = i64x2(0, 2);
4460 compare_bytes(vec_r, vec_e);
4461 }
4462
4463 macro_rules! test_extract {
4465 (
4466 name: $test_id:ident,
4467 extract: $extract:ident,
4468 replace: $replace:ident,
4469 elem: $elem:ty,
4470 count: $count:expr,
4471 indices: [$($idx:expr),*],
4472 ) => {
4473 #[test]
4474 fn $test_id() {
4475 unsafe {
4476 let arr: [$elem; $count] = [123 as $elem; $count];
4477 let vec: v128 = transmute(arr);
4478 $(
4479 assert_eq!($extract::<$idx>(vec), 123 as $elem);
4480 )*
4481
4482 let arr: [$elem; $count] = [$($idx as $elem),*];
4485 let vec: v128 = transmute(arr);
4486 $(
4487 assert_eq!($extract::<$idx>(vec), $idx as $elem);
4488
4489 let tmp = $replace::<$idx>(vec, 124 as $elem);
4490 assert_eq!($extract::<$idx>(tmp), 124 as $elem);
4491 )*
4492 }
4493 }
4494 }
4495 }
4496
4497 test_extract! {
4498 name: test_i8x16_extract_replace,
4499 extract: i8x16_extract_lane,
4500 replace: i8x16_replace_lane,
4501 elem: i8,
4502 count: 16,
4503 indices: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
4504 }
4505 test_extract! {
4506 name: test_i16x8_extract_replace,
4507 extract: i16x8_extract_lane,
4508 replace: i16x8_replace_lane,
4509 elem: i16,
4510 count: 8,
4511 indices: [0, 1, 2, 3, 4, 5, 6, 7],
4512 }
4513 test_extract! {
4514 name: test_i32x4_extract_replace,
4515 extract: i32x4_extract_lane,
4516 replace: i32x4_replace_lane,
4517 elem: i32,
4518 count: 4,
4519 indices: [0, 1, 2, 3],
4520 }
4521 test_extract! {
4522 name: test_i64x2_extract_replace,
4523 extract: i64x2_extract_lane,
4524 replace: i64x2_replace_lane,
4525 elem: i64,
4526 count: 2,
4527 indices: [0, 1],
4528 }
4529 test_extract! {
4530 name: test_f32x4_extract_replace,
4531 extract: f32x4_extract_lane,
4532 replace: f32x4_replace_lane,
4533 elem: f32,
4534 count: 4,
4535 indices: [0, 1, 2, 3],
4536 }
4537 test_extract! {
4538 name: test_f64x2_extract_replace,
4539 extract: f64x2_extract_lane,
4540 replace: f64x2_replace_lane,
4541 elem: f64,
4542 count: 2,
4543 indices: [0, 1],
4544 }
4545
4546 #[test]
4547 #[rustfmt::skip]
4548 fn test_swizzle() {
4549 compare_bytes(
4550 i8x16_swizzle(
4551 i32x4(1, 2, 3, 4),
4552 i8x16(
4553 32, 31, 30, 29,
4554 0, 1, 2, 3,
4555 12, 13, 14, 15,
4556 0, 4, 8, 12),
4557 ),
4558 i32x4(0, 1, 4, 0x04030201),
4559 );
4560 }
4561
4562 macro_rules! test_splat {
4563 ($test_id:ident: $val:expr => $($vals:expr),*) => {
4564 #[test]
4565 fn $test_id() {
4566 let a = super::$test_id($val);
4567 let b = u8x16($($vals as u8),*);
4568 compare_bytes(a, b);
4569 }
4570 }
4571 }
4572
4573 mod splats {
4574 use super::*;
4575 test_splat!(i8x16_splat: 42 => 42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42);
4576 test_splat!(i16x8_splat: 42 => 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0);
4577 test_splat!(i32x4_splat: 42 => 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0);
4578 test_splat!(i64x2_splat: 42 => 42, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0);
4579 test_splat!(f32x4_splat: 42. => 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66);
4580 test_splat!(f64x2_splat: 42. => 0, 0, 0, 0, 0, 0, 69, 64, 0, 0, 0, 0, 0, 0, 69, 64);
4581 }
4582
4583 #[test]
4584 fn test_bitmasks() {
4585 let zero = i8x16_splat(0);
4586 let ones = i8x16_splat(!0);
4587
4588 assert_eq!(i8x16_bitmask(zero), 0);
4589 assert_eq!(i8x16_bitmask(ones), 0xffff);
4590 assert_eq!(i8x16_bitmask(i8x16_splat(i8::MAX)), 0);
4591 assert_eq!(i8x16_bitmask(i8x16_splat(i8::MIN)), 0xffff);
4592 assert_eq!(i8x16_bitmask(i8x16_replace_lane::<1>(zero, -1)), 0b10);
4593
4594 assert_eq!(i16x8_bitmask(zero), 0);
4595 assert_eq!(i16x8_bitmask(ones), 0xff);
4596 assert_eq!(i16x8_bitmask(i16x8_splat(i16::MAX)), 0);
4597 assert_eq!(i16x8_bitmask(i16x8_splat(i16::MIN)), 0xff);
4598 assert_eq!(i16x8_bitmask(i16x8_replace_lane::<1>(zero, -1)), 0b10);
4599
4600 assert_eq!(i32x4_bitmask(zero), 0);
4601 assert_eq!(i32x4_bitmask(ones), 0b1111);
4602 assert_eq!(i32x4_bitmask(i32x4_splat(i32::MAX)), 0);
4603 assert_eq!(i32x4_bitmask(i32x4_splat(i32::MIN)), 0b1111);
4604 assert_eq!(i32x4_bitmask(i32x4_replace_lane::<1>(zero, -1)), 0b10);
4605
4606 assert_eq!(i64x2_bitmask(zero), 0);
4607 assert_eq!(i64x2_bitmask(ones), 0b11);
4608 assert_eq!(i64x2_bitmask(i64x2_splat(i64::MAX)), 0);
4609 assert_eq!(i64x2_bitmask(i64x2_splat(i64::MIN)), 0b11);
4610 assert_eq!(i64x2_bitmask(i64x2_replace_lane::<1>(zero, -1)), 0b10);
4611 }
4612
4613 #[test]
4614 fn test_narrow() {
4615 let zero = i8x16_splat(0);
4616 let ones = i8x16_splat(!0);
4617
4618 compare_bytes(i8x16_narrow_i16x8(zero, zero), zero);
4619 compare_bytes(u8x16_narrow_i16x8(zero, zero), zero);
4620 compare_bytes(i8x16_narrow_i16x8(ones, ones), ones);
4621 compare_bytes(u8x16_narrow_i16x8(ones, ones), zero);
4622
4623 compare_bytes(
4624 i8x16_narrow_i16x8(
4625 i16x8(
4626 0,
4627 1,
4628 2,
4629 -1,
4630 i8::MIN.into(),
4631 i8::MAX.into(),
4632 u8::MIN.into(),
4633 u8::MAX.into(),
4634 ),
4635 i16x8(
4636 i16::MIN,
4637 i16::MAX,
4638 u16::MIN as i16,
4639 u16::MAX as i16,
4640 0,
4641 0,
4642 0,
4643 0,
4644 ),
4645 ),
4646 i8x16(0, 1, 2, -1, -128, 127, 0, 127, -128, 127, 0, -1, 0, 0, 0, 0),
4647 );
4648
4649 compare_bytes(
4650 u8x16_narrow_i16x8(
4651 i16x8(
4652 0,
4653 1,
4654 2,
4655 -1,
4656 i8::MIN.into(),
4657 i8::MAX.into(),
4658 u8::MIN.into(),
4659 u8::MAX.into(),
4660 ),
4661 i16x8(
4662 i16::MIN,
4663 i16::MAX,
4664 u16::MIN as i16,
4665 u16::MAX as i16,
4666 0,
4667 0,
4668 0,
4669 0,
4670 ),
4671 ),
4672 i8x16(0, 1, 2, 0, 0, 127, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0),
4673 );
4674
4675 compare_bytes(i16x8_narrow_i32x4(zero, zero), zero);
4676 compare_bytes(u16x8_narrow_i32x4(zero, zero), zero);
4677 compare_bytes(i16x8_narrow_i32x4(ones, ones), ones);
4678 compare_bytes(u16x8_narrow_i32x4(ones, ones), zero);
4679
4680 compare_bytes(
4681 i16x8_narrow_i32x4(
4682 i32x4(0, -1, i16::MIN.into(), i16::MAX.into()),
4683 i32x4(i32::MIN, i32::MAX, u32::MIN as i32, u32::MAX as i32),
4684 ),
4685 i16x8(0, -1, i16::MIN, i16::MAX, i16::MIN, i16::MAX, 0, -1),
4686 );
4687
4688 compare_bytes(
4689 u16x8_narrow_i32x4(
4690 i32x4(u16::MAX.into(), -1, i16::MIN.into(), i16::MAX.into()),
4691 i32x4(i32::MIN, i32::MAX, u32::MIN as i32, u32::MAX as i32),
4692 ),
4693 i16x8(-1, 0, 0, i16::MAX, 0, -1, 0, 0),
4694 );
4695 }
4696
4697 #[test]
4698 fn test_extend() {
4699 let zero = i8x16_splat(0);
4700 let ones = i8x16_splat(!0);
4701
4702 compare_bytes(i16x8_extend_low_i8x16(zero), zero);
4703 compare_bytes(i16x8_extend_high_i8x16(zero), zero);
4704 compare_bytes(i16x8_extend_low_u8x16(zero), zero);
4705 compare_bytes(i16x8_extend_high_u8x16(zero), zero);
4706 compare_bytes(i16x8_extend_low_i8x16(ones), ones);
4707 compare_bytes(i16x8_extend_high_i8x16(ones), ones);
4708 let halves = u16x8_splat(u8::MAX.into());
4709 compare_bytes(i16x8_extend_low_u8x16(ones), halves);
4710 compare_bytes(i16x8_extend_high_u8x16(ones), halves);
4711
4712 compare_bytes(i32x4_extend_low_i16x8(zero), zero);
4713 compare_bytes(i32x4_extend_high_i16x8(zero), zero);
4714 compare_bytes(i32x4_extend_low_u16x8(zero), zero);
4715 compare_bytes(i32x4_extend_high_u16x8(zero), zero);
4716 compare_bytes(i32x4_extend_low_i16x8(ones), ones);
4717 compare_bytes(i32x4_extend_high_i16x8(ones), ones);
4718 let halves = u32x4_splat(u16::MAX.into());
4719 compare_bytes(i32x4_extend_low_u16x8(ones), halves);
4720 compare_bytes(i32x4_extend_high_u16x8(ones), halves);
4721
4722 compare_bytes(i64x2_extend_low_i32x4(zero), zero);
4723 compare_bytes(i64x2_extend_high_i32x4(zero), zero);
4724 compare_bytes(i64x2_extend_low_u32x4(zero), zero);
4725 compare_bytes(i64x2_extend_high_u32x4(zero), zero);
4726 compare_bytes(i64x2_extend_low_i32x4(ones), ones);
4727 compare_bytes(i64x2_extend_high_i32x4(ones), ones);
4728 let halves = i64x2_splat(u32::MAX.into());
4729 compare_bytes(u64x2_extend_low_u32x4(ones), halves);
4730 compare_bytes(u64x2_extend_high_u32x4(ones), halves);
4731 }
4732
4733 #[test]
4734 fn test_dot() {
4735 let zero = i8x16_splat(0);
4736 let ones = i8x16_splat(!0);
4737 let two = i32x4_splat(2);
4738 compare_bytes(i32x4_dot_i16x8(zero, zero), zero);
4739 compare_bytes(i32x4_dot_i16x8(ones, ones), two);
4740 }
4741
4742 macro_rules! test_binop {
4743 (
4744 $($name:ident => {
4745 $([$($vec1:tt)*] ($op:ident | $f:ident) [$($vec2:tt)*],)*
4746 })*
4747 ) => ($(
4748 #[test]
4749 fn $name() {
4750 unsafe {
4751 $(
4752 let v1 = [$($vec1)*];
4753 let v2 = [$($vec2)*];
4754 let v1_v128: v128 = mem::transmute(v1);
4755 let v2_v128: v128 = mem::transmute(v2);
4756 let v3_v128 = super::$f(v1_v128, v2_v128);
4757 let mut v3 = [$($vec1)*];
4758 let _ignore = v3;
4759 v3 = mem::transmute(v3_v128);
4760
4761 for (i, actual) in v3.iter().enumerate() {
4762 let expected = v1[i].$op(v2[i]);
4763 assert_eq!(*actual, expected);
4764 }
4765 )*
4766 }
4767 }
4768 )*)
4769 }
4770
4771 macro_rules! test_unop {
4772 (
4773 $($name:ident => {
4774 $(($op:ident | $f:ident) [$($vec1:tt)*],)*
4775 })*
4776 ) => ($(
4777 #[test]
4778 fn $name() {
4779 unsafe {
4780 $(
4781 let v1 = [$($vec1)*];
4782 let v1_v128: v128 = mem::transmute(v1);
4783 let v2_v128 = super::$f(v1_v128);
4784 let mut v2 = [$($vec1)*];
4785 let _ignore = v2;
4786 v2 = mem::transmute(v2_v128);
4787
4788 for (i, actual) in v2.iter().enumerate() {
4789 let expected = v1[i].$op();
4790 assert_eq!(*actual, expected);
4791 }
4792 )*
4793 }
4794 }
4795 )*)
4796 }
4797
4798 trait Avgr: Sized {
4799 fn avgr(self, other: Self) -> Self;
4800 }
4801
4802 macro_rules! impl_avgr {
4803 ($($i:ident)*) => ($(impl Avgr for $i {
4804 fn avgr(self, other: Self) -> Self {
4805 ((self as u64 + other as u64 + 1) / 2) as $i
4806 }
4807 })*)
4808 }
4809
4810 impl_avgr!(u8 u16);
4811
4812 test_binop! {
4813 test_i8x16_add => {
4814 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4815 (wrapping_add | i8x16_add)
4816 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4817
4818 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4819 (wrapping_add | i8x16_add)
4820 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4821
4822 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4823 (wrapping_add | i8x16_add)
4824 [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
4825 }
4826
4827 test_i8x16_add_sat_s => {
4828 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4829 (saturating_add | i8x16_add_sat)
4830 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4831
4832 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4833 (saturating_add | i8x16_add_sat)
4834 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4835
4836 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4837 (saturating_add | i8x16_add_sat)
4838 [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
4839 }
4840
4841 test_i8x16_add_sat_u => {
4842 [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4843 (saturating_add | u8x16_add_sat)
4844 [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4845
4846 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4847 (saturating_add | u8x16_add_sat)
4848 [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4849
4850 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4851 (saturating_add | u8x16_add_sat)
4852 [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4853 }
4854
4855 test_i8x16_sub => {
4856 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4857 (wrapping_sub | i8x16_sub)
4858 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4859
4860 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4861 (wrapping_sub | i8x16_sub)
4862 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4863
4864 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4865 (wrapping_sub | i8x16_sub)
4866 [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4867 }
4868
4869 test_i8x16_sub_sat_s => {
4870 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4871 (saturating_sub | i8x16_sub_sat)
4872 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4873
4874 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4875 (saturating_sub | i8x16_sub_sat)
4876 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4877
4878 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4879 (saturating_sub | i8x16_sub_sat)
4880 [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4881 }
4882
4883 test_i8x16_sub_sat_u => {
4884 [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4885 (saturating_sub | u8x16_sub_sat)
4886 [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4887
4888 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4889 (saturating_sub | u8x16_sub_sat)
4890 [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4891
4892 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4893 (saturating_sub | u8x16_sub_sat)
4894 [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4895 }
4896
4897 test_i8x16_min_s => {
4898 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4899 (min | i8x16_min)
4900 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4901
4902 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4903 (min | i8x16_min)
4904 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4905
4906 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4907 (min | i8x16_min)
4908 [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4909 }
4910
4911 test_i8x16_min_u => {
4912 [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4913 (min | u8x16_min)
4914 [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4915
4916 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4917 (min | u8x16_min)
4918 [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4919
4920 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4921 (min | u8x16_min)
4922 [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4923 }
4924
4925 test_i8x16_max_s => {
4926 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4927 (max | i8x16_max)
4928 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4929
4930 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4931 (max | i8x16_max)
4932 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4933
4934 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4935 (max | i8x16_max)
4936 [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4937 }
4938
4939 test_i8x16_max_u => {
4940 [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4941 (max | u8x16_max)
4942 [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4943
4944 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4945 (max | u8x16_max)
4946 [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4947
4948 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4949 (max | u8x16_max)
4950 [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4951 }
4952
4953 test_i8x16_avgr_u => {
4954 [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4955 (avgr | u8x16_avgr)
4956 [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4957
4958 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4959 (avgr | u8x16_avgr)
4960 [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4961
4962 [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4963 (avgr | u8x16_avgr)
4964 [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4965 }
4966
4967 test_i16x8_add => {
4968 [0i16, 0, 0, 0, 0, 0, 0, 0]
4969 (wrapping_add | i16x8_add)
4970 [1i16, 1, 1, 1, 1, 1, 1, 1],
4971
4972 [1i16, 2, 3, 4, 5, 6, 7, 8]
4973 (wrapping_add | i16x8_add)
4974 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4975 }
4976
4977 test_i16x8_add_sat_s => {
4978 [0i16, 0, 0, 0, 0, 0, 0, 0]
4979 (saturating_add | i16x8_add_sat)
4980 [1i16, 1, 1, 1, 1, 1, 1, 1],
4981
4982 [1i16, 2, 3, 4, 5, 6, 7, 8]
4983 (saturating_add | i16x8_add_sat)
4984 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4985 }
4986
4987 test_i16x8_add_sat_u => {
4988 [0u16, 0, 0, 0, 0, 0, 0, 0]
4989 (saturating_add | u16x8_add_sat)
4990 [1u16, 1, 1, 1, 1, 1, 1, 1],
4991
4992 [1u16, 2, 3, 4, 5, 6, 7, 8]
4993 (saturating_add | u16x8_add_sat)
4994 [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
4995 }
4996
4997 test_i16x8_sub => {
4998 [0i16, 0, 0, 0, 0, 0, 0, 0]
4999 (wrapping_sub | i16x8_sub)
5000 [1i16, 1, 1, 1, 1, 1, 1, 1],
5001
5002 [1i16, 2, 3, 4, 5, 6, 7, 8]
5003 (wrapping_sub | i16x8_sub)
5004 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
5005 }
5006
5007 test_i16x8_sub_sat_s => {
5008 [0i16, 0, 0, 0, 0, 0, 0, 0]
5009 (saturating_sub | i16x8_sub_sat)
5010 [1i16, 1, 1, 1, 1, 1, 1, 1],
5011
5012 [1i16, 2, 3, 4, 5, 6, 7, 8]
5013 (saturating_sub | i16x8_sub_sat)
5014 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
5015 }
5016
5017 test_i16x8_sub_sat_u => {
5018 [0u16, 0, 0, 0, 0, 0, 0, 0]
5019 (saturating_sub | u16x8_sub_sat)
5020 [1u16, 1, 1, 1, 1, 1, 1, 1],
5021
5022 [1u16, 2, 3, 4, 5, 6, 7, 8]
5023 (saturating_sub | u16x8_sub_sat)
5024 [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5025 }
5026
5027 test_i16x8_mul => {
5028 [0i16, 0, 0, 0, 0, 0, 0, 0]
5029 (wrapping_mul | i16x8_mul)
5030 [1i16, 1, 1, 1, 1, 1, 1, 1],
5031
5032 [1i16, 2, 3, 4, 5, 6, 7, 8]
5033 (wrapping_mul | i16x8_mul)
5034 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
5035 }
5036
5037 test_i16x8_min_s => {
5038 [0i16, 0, 0, 0, 0, 0, 0, 0]
5039 (min | i16x8_min)
5040 [1i16, 1, 1, 1, 1, 1, 1, 1],
5041
5042 [1i16, 2, 3, 4, 5, 6, 7, 8]
5043 (min | i16x8_min)
5044 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
5045 }
5046
5047 test_i16x8_min_u => {
5048 [0u16, 0, 0, 0, 0, 0, 0, 0]
5049 (min | u16x8_min)
5050 [1u16, 1, 1, 1, 1, 1, 1, 1],
5051
5052 [1u16, 2, 3, 4, 5, 6, 7, 8]
5053 (min | u16x8_min)
5054 [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5055 }
5056
5057 test_i16x8_max_s => {
5058 [0i16, 0, 0, 0, 0, 0, 0, 0]
5059 (max | i16x8_max)
5060 [1i16, 1, 1, 1, 1, 1, 1, 1],
5061
5062 [1i16, 2, 3, 4, 5, 6, 7, 8]
5063 (max | i16x8_max)
5064 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
5065 }
5066
5067 test_i16x8_max_u => {
5068 [0u16, 0, 0, 0, 0, 0, 0, 0]
5069 (max | u16x8_max)
5070 [1u16, 1, 1, 1, 1, 1, 1, 1],
5071
5072 [1u16, 2, 3, 4, 5, 6, 7, 8]
5073 (max | u16x8_max)
5074 [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5075 }
5076
5077 test_i16x8_avgr_u => {
5078 [0u16, 0, 0, 0, 0, 0, 0, 0]
5079 (avgr | u16x8_avgr)
5080 [1u16, 1, 1, 1, 1, 1, 1, 1],
5081
5082 [1u16, 2, 3, 4, 5, 6, 7, 8]
5083 (avgr | u16x8_avgr)
5084 [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5085 }
5086
5087 test_i32x4_add => {
5088 [0i32, 0, 0, 0] (wrapping_add | i32x4_add) [1, 2, 3, 4],
5089 [1i32, 1283, i32::MAX, i32::MIN]
5090 (wrapping_add | i32x4_add)
5091 [i32::MAX; 4],
5092 }
5093
5094 test_i32x4_sub => {
5095 [0i32, 0, 0, 0] (wrapping_sub | i32x4_sub) [1, 2, 3, 4],
5096 [1i32, 1283, i32::MAX, i32::MIN]
5097 (wrapping_sub | i32x4_sub)
5098 [i32::MAX; 4],
5099 }
5100
5101 test_i32x4_mul => {
5102 [0i32, 0, 0, 0] (wrapping_mul | i32x4_mul) [1, 2, 3, 4],
5103 [1i32, 1283, i32::MAX, i32::MIN]
5104 (wrapping_mul | i32x4_mul)
5105 [i32::MAX; 4],
5106 }
5107
5108 test_i32x4_min_s => {
5109 [0i32, 0, 0, 0] (min | i32x4_min) [1, 2, 3, 4],
5110 [1i32, 1283, i32::MAX, i32::MIN]
5111 (min | i32x4_min)
5112 [i32::MAX; 4],
5113 }
5114
5115 test_i32x4_min_u => {
5116 [0u32, 0, 0, 0] (min | u32x4_min) [1, 2, 3, 4],
5117 [1u32, 1283, i32::MAX as u32, i32::MIN as u32]
5118 (min | u32x4_min)
5119 [i32::MAX as u32; 4],
5120 }
5121
5122 test_i32x4_max_s => {
5123 [0i32, 0, 0, 0] (max | i32x4_max) [1, 2, 3, 4],
5124 [1i32, 1283, i32::MAX, i32::MIN]
5125 (max | i32x4_max)
5126 [i32::MAX; 4],
5127 }
5128
5129 test_i32x4_max_u => {
5130 [0u32, 0, 0, 0] (max | u32x4_max) [1, 2, 3, 4],
5131 [1u32, 1283, i32::MAX as u32, i32::MIN as u32]
5132 (max | u32x4_max)
5133 [i32::MAX as u32; 4],
5134 }
5135
5136 test_i64x2_add => {
5137 [0i64, 0] (wrapping_add | i64x2_add) [1, 2],
5138 [i64::MIN, i64::MAX] (wrapping_add | i64x2_add) [i64::MAX, i64::MIN],
5139 [i64::MAX; 2] (wrapping_add | i64x2_add) [i64::MAX; 2],
5140 [-4i64, -4] (wrapping_add | i64x2_add) [800, 939],
5141 }
5142
5143 test_i64x2_sub => {
5144 [0i64, 0] (wrapping_sub | i64x2_sub) [1, 2],
5145 [i64::MIN, i64::MAX] (wrapping_sub | i64x2_sub) [i64::MAX, i64::MIN],
5146 [i64::MAX; 2] (wrapping_sub | i64x2_sub) [i64::MAX; 2],
5147 [-4i64, -4] (wrapping_sub | i64x2_sub) [800, 939],
5148 }
5149
5150 test_i64x2_mul => {
5151 [0i64, 0] (wrapping_mul | i64x2_mul) [1, 2],
5152 [i64::MIN, i64::MAX] (wrapping_mul | i64x2_mul) [i64::MAX, i64::MIN],
5153 [i64::MAX; 2] (wrapping_mul | i64x2_mul) [i64::MAX; 2],
5154 [-4i64, -4] (wrapping_mul | i64x2_mul) [800, 939],
5155 }
5156
5157 test_f32x4_add => {
5158 [-1.0f32, 2.0, 3.0, 4.0] (add | f32x4_add) [1., 2., 0., 0.],
5159 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5160 (add | f32x4_add)
5161 [1., 2., 0., 0.],
5162 }
5163
5164 test_f32x4_sub => {
5165 [-1.0f32, 2.0, 3.0, 4.0] (sub | f32x4_sub) [1., 2., 0., 0.],
5166 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5167 (sub | f32x4_sub)
5168 [1., 2., 0., 0.],
5169 }
5170
5171 test_f32x4_mul => {
5172 [-1.0f32, 2.0, 3.0, 4.0] (mul | f32x4_mul) [1., 2., 0., 0.],
5173 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5174 (mul | f32x4_mul)
5175 [1., 2., 1., 0.],
5176 }
5177
5178 test_f32x4_div => {
5179 [-1.0f32, 2.0, 3.0, 4.0] (div | f32x4_div) [1., 2., 0., 0.],
5180 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5181 (div | f32x4_div)
5182 [1., 2., 0., 0.],
5183 }
5184
5185 test_f32x4_min => {
5186 [-1.0f32, 2.0, 3.0, 4.0] (min | f32x4_min) [1., 2., 0., 0.],
5187 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5188 (min | f32x4_min)
5189 [1., 2., 0., 0.],
5190 }
5191
5192 test_f32x4_max => {
5193 [-1.0f32, 2.0, 3.0, 4.0] (max | f32x4_max) [1., 2., 0., 0.],
5194 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5195 (max | f32x4_max)
5196 [1., 2., 0., 0.],
5197 }
5198
5199 test_f32x4_pmin => {
5200 [-1.0f32, 2.0, 3.0, 4.0] (min | f32x4_pmin) [1., 2., 0., 0.],
5201 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5202 (min | f32x4_pmin)
5203 [1., 2., 0., 0.],
5204 }
5205
5206 test_f32x4_pmax => {
5207 [-1.0f32, 2.0, 3.0, 4.0] (max | f32x4_pmax) [1., 2., 0., 0.],
5208 [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5209 (max | f32x4_pmax)
5210 [1., 2., 0., 0.],
5211 }
5212
5213 test_f64x2_add => {
5214 [-1.0f64, 2.0] (add | f64x2_add) [1., 2.],
5215 [f64::INFINITY, f64::NEG_INFINITY] (add | f64x2_add) [1., 2.],
5216 }
5217
5218 test_f64x2_sub => {
5219 [-1.0f64, 2.0] (sub | f64x2_sub) [1., 2.],
5220 [f64::INFINITY, f64::NEG_INFINITY] (sub | f64x2_sub) [1., 2.],
5221 }
5222
5223 test_f64x2_mul => {
5224 [-1.0f64, 2.0] (mul | f64x2_mul) [1., 2.],
5225 [f64::INFINITY, f64::NEG_INFINITY] (mul | f64x2_mul) [1., 2.],
5226 }
5227
5228 test_f64x2_div => {
5229 [-1.0f64, 2.0] (div | f64x2_div) [1., 2.],
5230 [f64::INFINITY, f64::NEG_INFINITY] (div | f64x2_div) [1., 2.],
5231 }
5232
5233 test_f64x2_min => {
5234 [-1.0f64, 2.0] (min | f64x2_min) [1., 2.],
5235 [f64::INFINITY, f64::NEG_INFINITY] (min | f64x2_min) [1., 2.],
5236 }
5237
5238 test_f64x2_max => {
5239 [-1.0f64, 2.0] (max | f64x2_max) [1., 2.],
5240 [f64::INFINITY, f64::NEG_INFINITY] (max | f64x2_max) [1., 2.],
5241 }
5242
5243 test_f64x2_pmin => {
5244 [-1.0f64, 2.0] (min | f64x2_pmin) [1., 2.],
5245 [f64::INFINITY, f64::NEG_INFINITY] (min | f64x2_pmin) [1., 2.],
5246 }
5247
5248 test_f64x2_pmax => {
5249 [-1.0f64, 2.0] (max | f64x2_pmax) [1., 2.],
5250 [f64::INFINITY, f64::NEG_INFINITY] (max | f64x2_pmax) [1., 2.],
5251 }
5252 }
5253
5254 test_unop! {
5255 test_i8x16_abs => {
5256 (wrapping_abs | i8x16_abs)
5257 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
5258
5259 (wrapping_abs | i8x16_abs)
5260 [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
5261
5262 (wrapping_abs | i8x16_abs)
5263 [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
5264 }
5265
5266 test_i8x16_neg => {
5267 (wrapping_neg | i8x16_neg)
5268 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
5269
5270 (wrapping_neg | i8x16_neg)
5271 [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
5272
5273 (wrapping_neg | i8x16_neg)
5274 [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
5275 }
5276
5277 test_i16x8_abs => {
5278 (wrapping_abs | i16x8_abs) [1i16, 1, 1, 1, 1, 1, 1, 1],
5279 (wrapping_abs | i16x8_abs) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
5280 }
5281
5282 test_i16x8_neg => {
5283 (wrapping_neg | i16x8_neg) [1i16, 1, 1, 1, 1, 1, 1, 1],
5284 (wrapping_neg | i16x8_neg) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
5285 }
5286
5287 test_i32x4_abs => {
5288 (wrapping_abs | i32x4_abs) [1i32, 2, 3, 4],
5289 (wrapping_abs | i32x4_abs) [i32::MIN, i32::MAX, 0, 4],
5290 }
5291
5292 test_i32x4_neg => {
5293 (wrapping_neg | i32x4_neg) [1i32, 2, 3, 4],
5294 (wrapping_neg | i32x4_neg) [i32::MIN, i32::MAX, 0, 4],
5295 }
5296
5297 test_i64x2_abs => {
5298 (wrapping_abs | i64x2_abs) [1i64, 2],
5299 (wrapping_abs | i64x2_abs) [i64::MIN, i64::MAX],
5300 }
5301
5302 test_i64x2_neg => {
5303 (wrapping_neg | i64x2_neg) [1i64, 2],
5304 (wrapping_neg | i64x2_neg) [i64::MIN, i64::MAX],
5305 }
5306
5307 test_f32x4_ceil => {
5308 (ceil | f32x4_ceil) [1.0f32, 2., 2.5, 3.3],
5309 (ceil | f32x4_ceil) [0.0, -0.3, f32::INFINITY, -0.0],
5310 }
5311
5312 test_f32x4_floor => {
5313 (floor | f32x4_floor) [1.0f32, 2., 2.5, 3.3],
5314 (floor | f32x4_floor) [0.0, -0.3, f32::INFINITY, -0.0],
5315 }
5316
5317 test_f32x4_trunc => {
5318 (trunc | f32x4_trunc) [1.0f32, 2., 2.5, 3.3],
5319 (trunc | f32x4_trunc) [0.0, -0.3, f32::INFINITY, -0.0],
5320 }
5321
5322 test_f32x4_nearest => {
5323 (round | f32x4_nearest) [1.0f32, 2., 2.6, 3.3],
5324 (round | f32x4_nearest) [0.0, -0.3, f32::INFINITY, -0.0],
5325 }
5326
5327 test_f32x4_abs => {
5328 (abs | f32x4_abs) [1.0f32, 2., 2.6, 3.3],
5329 (abs | f32x4_abs) [0.0, -0.3, f32::INFINITY, -0.0],
5330 }
5331
5332 test_f32x4_neg => {
5333 (neg | f32x4_neg) [1.0f32, 2., 2.6, 3.3],
5334 (neg | f32x4_neg) [0.0, -0.3, f32::INFINITY, -0.0],
5335 }
5336
5337 test_f32x4_sqrt => {
5338 (sqrt | f32x4_sqrt) [1.0f32, 2., 2.6, 3.3],
5339 (sqrt | f32x4_sqrt) [0.0, 0.3, f32::INFINITY, 0.1],
5340 }
5341
5342 test_f64x2_ceil => {
5343 (ceil | f64x2_ceil) [1.0f64, 2.3],
5344 (ceil | f64x2_ceil) [f64::INFINITY, -0.1],
5345 }
5346
5347 test_f64x2_floor => {
5348 (floor | f64x2_floor) [1.0f64, 2.3],
5349 (floor | f64x2_floor) [f64::INFINITY, -0.1],
5350 }
5351
5352 test_f64x2_trunc => {
5353 (trunc | f64x2_trunc) [1.0f64, 2.3],
5354 (trunc | f64x2_trunc) [f64::INFINITY, -0.1],
5355 }
5356
5357 test_f64x2_nearest => {
5358 (round | f64x2_nearest) [1.0f64, 2.3],
5359 (round | f64x2_nearest) [f64::INFINITY, -0.1],
5360 }
5361
5362 test_f64x2_abs => {
5363 (abs | f64x2_abs) [1.0f64, 2.3],
5364 (abs | f64x2_abs) [f64::INFINITY, -0.1],
5365 }
5366
5367 test_f64x2_neg => {
5368 (neg | f64x2_neg) [1.0f64, 2.3],
5369 (neg | f64x2_neg) [f64::INFINITY, -0.1],
5370 }
5371
5372 test_f64x2_sqrt => {
5373 (sqrt | f64x2_sqrt) [1.0f64, 2.3],
5374 (sqrt | f64x2_sqrt) [f64::INFINITY, 0.1],
5375 }
5376 }
5377
5378 macro_rules! floating_point {
5379 (f32) => {
5380 true
5381 };
5382 (f64) => {
5383 true
5384 };
5385 ($id:ident) => {
5386 false
5387 };
5388 }
5389
5390 trait IsNan: Sized {
5391 fn is_nan(self) -> bool {
5392 false
5393 }
5394 }
5395 impl IsNan for i8 {}
5396 impl IsNan for i16 {}
5397 impl IsNan for i32 {}
5398 impl IsNan for i64 {}
5399
5400 macro_rules! test_bop {
5401 ($id:ident[$ety:ident; $ecount:expr] |
5402 $binary_op:ident [$op_test_id:ident] :
5403 ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
5404 test_bop!(
5405 $id[$ety; $ecount] => $ety | $binary_op [ $op_test_id ]:
5406 ([$($in_a),*], [$($in_b),*]) => [$($out),*]
5407 );
5408
5409 };
5410 ($id:ident[$ety:ident; $ecount:expr] => $oty:ident |
5411 $binary_op:ident [$op_test_id:ident] :
5412 ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
5413 #[test]
5414 fn $op_test_id() {
5415 unsafe {
5416 let a_input: [$ety; $ecount] = [$($in_a),*];
5417 let b_input: [$ety; $ecount] = [$($in_b),*];
5418 let output: [$oty; $ecount] = [$($out),*];
5419
5420 let a_vec_in: v128 = transmute(a_input);
5421 let b_vec_in: v128 = transmute(b_input);
5422 let vec_res: v128 = $binary_op(a_vec_in, b_vec_in);
5423
5424 let res: [$oty; $ecount] = transmute(vec_res);
5425
5426 if !floating_point!($ety) {
5427 assert_eq!(res, output);
5428 } else {
5429 for i in 0..$ecount {
5430 let r = res[i];
5431 let o = output[i];
5432 assert_eq!(r.is_nan(), o.is_nan());
5433 if !r.is_nan() {
5434 assert_eq!(r, o);
5435 }
5436 }
5437 }
5438 }
5439 }
5440 }
5441 }
5442
5443 macro_rules! test_bops {
5444 ($id:ident[$ety:ident; $ecount:expr] |
5445 $binary_op:ident [$op_test_id:ident]:
5446 ([$($in_a:expr),*], $in_b:expr) => [$($out:expr),*]) => {
5447 #[test]
5448 fn $op_test_id() {
5449 unsafe {
5450 let a_input: [$ety; $ecount] = [$($in_a),*];
5451 let output: [$ety; $ecount] = [$($out),*];
5452
5453 let a_vec_in: v128 = transmute(a_input);
5454 let vec_res: v128 = $binary_op(a_vec_in, $in_b);
5455
5456 let res: [$ety; $ecount] = transmute(vec_res);
5457 assert_eq!(res, output);
5458 }
5459 }
5460 }
5461 }
5462
5463 macro_rules! test_uop {
5464 ($id:ident[$ety:ident; $ecount:expr] |
5465 $unary_op:ident [$op_test_id:ident]: [$($in_a:expr),*] => [$($out:expr),*]) => {
5466 #[test]
5467 fn $op_test_id() {
5468 unsafe {
5469 let a_input: [$ety; $ecount] = [$($in_a),*];
5470 let output: [$ety; $ecount] = [$($out),*];
5471
5472 let a_vec_in: v128 = transmute(a_input);
5473 let vec_res: v128 = $unary_op(a_vec_in);
5474
5475 let res: [$ety; $ecount] = transmute(vec_res);
5476 assert_eq!(res, output);
5477 }
5478 }
5479 }
5480 }
5481
5482 test_bops!(i8x16[i8; 16] | i8x16_shl[i8x16_shl_test]:
5483 ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5484 [0, -2, 4, 6, 8, 10, 12, -2, 2, 2, 2, 2, 2, 2, 2, 2]);
5485 test_bops!(i16x8[i16; 8] | i16x8_shl[i16x8_shl_test]:
5486 ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5487 [0, -2, 4, 6, 8, 10, 12, -2]);
5488 test_bops!(i32x4[i32; 4] | i32x4_shl[i32x4_shl_test]:
5489 ([0, -1, 2, 3], 1) => [0, -2, 4, 6]);
5490 test_bops!(i64x2[i64; 2] | i64x2_shl[i64x2_shl_test]:
5491 ([0, -1], 1) => [0, -2]);
5492
5493 test_bops!(i8x16[i8; 16] | i8x16_shr[i8x16_shr_s_test]:
5494 ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5495 [0, -1, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
5496 test_bops!(i16x8[i16; 8] | i16x8_shr[i16x8_shr_s_test]:
5497 ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5498 [0, -1, 1, 1, 2, 2, 3, i16::MAX / 2]);
5499 test_bops!(i32x4[i32; 4] | i32x4_shr[i32x4_shr_s_test]:
5500 ([0, -1, 2, 3], 1) => [0, -1, 1, 1]);
5501 test_bops!(i64x2[i64; 2] | i64x2_shr[i64x2_shr_s_test]:
5502 ([0, -1], 1) => [0, -1]);
5503
5504 test_bops!(i8x16[i8; 16] | u8x16_shr[i8x16_uhr_u_test]:
5505 ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5506 [0, i8::MAX, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
5507 test_bops!(i16x8[i16; 8] | u16x8_shr[i16x8_uhr_u_test]:
5508 ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5509 [0, i16::MAX, 1, 1, 2, 2, 3, i16::MAX / 2]);
5510 test_bops!(i32x4[i32; 4] | u32x4_shr[i32x4_uhr_u_test]:
5511 ([0, -1, 2, 3], 1) => [0, i32::MAX, 1, 1]);
5512 test_bops!(i64x2[i64; 2] | u64x2_shr[i64x2_uhr_u_test]:
5513 ([0, -1], 1) => [0, i64::MAX]);
5514
5515 #[test]
5516 fn v128_bitwise_logical_ops() {
5517 unsafe {
5518 let a: [u32; 4] = [u32::MAX, 0, u32::MAX, 0];
5519 let b: [u32; 4] = [u32::MAX; 4];
5520 let c: [u32; 4] = [0; 4];
5521
5522 let vec_a: v128 = transmute(a);
5523 let vec_b: v128 = transmute(b);
5524 let vec_c: v128 = transmute(c);
5525
5526 let r: v128 = v128_and(vec_a, vec_a);
5527 compare_bytes(r, vec_a);
5528 let r: v128 = v128_and(vec_a, vec_b);
5529 compare_bytes(r, vec_a);
5530 let r: v128 = v128_andnot(vec_a, vec_b);
5531 compare_bytes(r, vec_c);
5532 let r: v128 = v128_andnot(vec_a, vec_a);
5533 compare_bytes(r, vec_c);
5534 let r: v128 = v128_andnot(vec_a, vec_c);
5535 compare_bytes(r, vec_a);
5536 let r: v128 = v128_or(vec_a, vec_b);
5537 compare_bytes(r, vec_b);
5538 let r: v128 = v128_not(vec_b);
5539 compare_bytes(r, vec_c);
5540 let r: v128 = v128_xor(vec_a, vec_c);
5541 compare_bytes(r, vec_a);
5542
5543 let r: v128 = v128_bitselect(vec_b, vec_c, vec_b);
5544 compare_bytes(r, vec_b);
5545 let r: v128 = v128_bitselect(vec_b, vec_c, vec_c);
5546 compare_bytes(r, vec_c);
5547 let r: v128 = v128_bitselect(vec_b, vec_c, vec_a);
5548 compare_bytes(r, vec_a);
5549 }
5550 }
5551
5552 macro_rules! test_bool_red {
5553 ([$test_id:ident, $any:ident, $all:ident] | [$($true:expr),*] | [$($false:expr),*] | [$($alt:expr),*]) => {
5554 #[test]
5555 fn $test_id() {
5556 unsafe {
5557 let vec_a: v128 = transmute([$($true),*]); let vec_b: v128 = transmute([$($false),*]); let vec_c: v128 = transmute([$($alt),*]); assert_eq!($all(vec_a), true);
5567 assert_eq!($all(vec_b), false);
5568 assert_eq!($all(vec_c), false);
5569 }
5570 }
5571 }
5572 }
5573
5574 test_bool_red!(
5575 [i8x16_boolean_reductions, v128_any_true, i8x16_all_true]
5576 | [1_i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
5577 | [0_i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5578 | [1_i8, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
5579 );
5580 test_bool_red!(
5581 [i16x8_boolean_reductions, v128_any_true, i16x8_all_true]
5582 | [1_i16, 1, 1, 1, 1, 1, 1, 1]
5583 | [0_i16, 0, 0, 0, 0, 0, 0, 0]
5584 | [1_i16, 0, 1, 0, 1, 0, 1, 0]
5585 );
5586 test_bool_red!(
5587 [i32x4_boolean_reductions, v128_any_true, i32x4_all_true]
5588 | [1_i32, 1, 1, 1]
5589 | [0_i32, 0, 0, 0]
5590 | [1_i32, 0, 1, 0]
5591 );
5592 test_bool_red!(
5593 [i64x2_boolean_reductions, v128_any_true, i64x2_all_true]
5594 | [1_i64, 1]
5595 | [0_i64, 0]
5596 | [1_i64, 0]
5597 );
5598
5599 test_bop!(i8x16[i8; 16] | i8x16_eq[i8x16_eq_test]:
5600 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5601 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5602 [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5603 test_bop!(i16x8[i16; 8] | i16x8_eq[i16x8_eq_test]:
5604 ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5605 [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5606 test_bop!(i32x4[i32; 4] | i32x4_eq[i32x4_eq_test]:
5607 ([0, 1, 2, 3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
5608 test_bop!(i64x2[i64; 2] | i64x2_eq[i64x2_eq_test]:
5609 ([0, 1], [0, 2]) => [-1, 0]);
5610 test_bop!(f32x4[f32; 4] => i32 | f32x4_eq[f32x4_eq_test]:
5611 ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
5612 test_bop!(f64x2[f64; 2] => i64 | f64x2_eq[f64x2_eq_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
5613
5614 test_bop!(i8x16[i8; 16] | i8x16_ne[i8x16_ne_test]:
5615 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5616 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5617 [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5618 test_bop!(i16x8[i16; 8] | i16x8_ne[i16x8_ne_test]:
5619 ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5620 [0, -1, 0, -1 ,0, -1, 0, 0]);
5621 test_bop!(i32x4[i32; 4] | i32x4_ne[i32x4_ne_test]:
5622 ([0, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
5623 test_bop!(i64x2[i64; 2] | i64x2_ne[i64x2_ne_test]:
5624 ([0, 1], [0, 2]) => [0, -1]);
5625 test_bop!(f32x4[f32; 4] => i32 | f32x4_ne[f32x4_ne_test]:
5626 ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
5627 test_bop!(f64x2[f64; 2] => i64 | f64x2_ne[f64x2_ne_test]: ([0., 1.], [0., 2.]) => [0, -1]);
5628
5629 test_bop!(i8x16[i8; 16] | i8x16_lt[i8x16_lt_s_test]:
5630 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -12, 13, 14, 15],
5631 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5632 [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1, -1, -1, 0, 0]);
5633 test_bop!(i8x16[i8; 16] | u8x16_lt[i8x16_lt_u_test]:
5634 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -12, 13, 14, 15],
5635 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5636 [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5637 test_bop!(i16x8[i16; 8] | i16x8_lt[i16x8_lt_s_test]:
5638 ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5639 [0, -1, 0, -1 ,0, -1, 0, -1]);
5640 test_bop!(i16x8[i16; 8] | u16x8_lt[i16x8_lt_u_test]:
5641 ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5642 [0, -1, 0, -1 ,0, -1, 0, 0]);
5643 test_bop!(i32x4[i32; 4] | i32x4_lt[i32x4_lt_s_test]:
5644 ([-1, 1, 2, 3], [0, 2, 2, 4]) => [-1, -1, 0, -1]);
5645 test_bop!(i32x4[i32; 4] | u32x4_lt[i32x4_lt_u_test]:
5646 ([-1, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
5647 test_bop!(i64x2[i64; 2] | i64x2_lt[i64x2_lt_s_test]:
5648 ([-1, 3], [0, 2]) => [-1, 0]);
5649 test_bop!(f32x4[f32; 4] => i32 | f32x4_lt[f32x4_lt_test]:
5650 ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
5651 test_bop!(f64x2[f64; 2] => i64 | f64x2_lt[f64x2_lt_test]: ([0., 1.], [0., 2.]) => [0, -1]);
5652
5653 test_bop!(i8x16[i8; 16] | i8x16_gt[i8x16_gt_s_test]:
5654 ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5655 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
5656 [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5657 test_bop!(i8x16[i8; 16] | u8x16_gt[i8x16_gt_u_test]:
5658 ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5659 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
5660 [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, -1]);
5661 test_bop!(i16x8[i16; 8] | i16x8_gt[i16x8_gt_s_test]:
5662 ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5663 [0, -1, 0, -1 ,0, -1, 0, 0]);
5664 test_bop!(i16x8[i16; 8] | u16x8_gt[i16x8_gt_u_test]:
5665 ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5666 [0, -1, 0, -1 ,0, -1, 0, -1]);
5667 test_bop!(i32x4[i32; 4] | i32x4_gt[i32x4_gt_s_test]:
5668 ([0, 2, 2, -4], [0, 1, 2, 3]) => [0, -1, 0, 0]);
5669 test_bop!(i32x4[i32; 4] | u32x4_gt[i32x4_gt_u_test]:
5670 ([0, 2, 2, -4], [0, 1, 2, 3]) => [0, -1, 0, -1]);
5671 test_bop!(i64x2[i64; 2] | i64x2_gt[i64x2_gt_s_test]:
5672 ([-1, 2], [0, 1]) => [0, -1]);
5673 test_bop!(f32x4[f32; 4] => i32 | f32x4_gt[f32x4_gt_test]:
5674 ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [0, -1, 0, -1]);
5675 test_bop!(f64x2[f64; 2] => i64 | f64x2_gt[f64x2_gt_test]: ([0., 2.], [0., 1.]) => [0, -1]);
5676
5677 test_bop!(i8x16[i8; 16] | i8x16_ge[i8x16_ge_s_test]:
5678 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -15],
5679 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5680 [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, 0]);
5681 test_bop!(i8x16[i8; 16] | u8x16_ge[i8x16_ge_u_test]:
5682 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -15],
5683 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5684 [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5685 test_bop!(i16x8[i16; 8] | i16x8_ge[i16x8_ge_s_test]:
5686 ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5687 [-1, 0, -1, 0 ,-1, 0, -1, 0]);
5688 test_bop!(i16x8[i16; 8] | u16x8_ge[i16x8_ge_u_test]:
5689 ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5690 [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5691 test_bop!(i32x4[i32; 4] | i32x4_ge[i32x4_ge_s_test]:
5692 ([0, 1, 2, -3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
5693 test_bop!(i32x4[i32; 4] | u32x4_ge[i32x4_ge_u_test]:
5694 ([0, 1, 2, -3], [0, 2, 2, 4]) => [-1, 0, -1, -1]);
5695 test_bop!(i64x2[i64; 2] | i64x2_ge[i64x2_ge_s_test]:
5696 ([0, 1], [-1, 2]) => [-1, 0]);
5697 test_bop!(f32x4[f32; 4] => i32 | f32x4_ge[f32x4_ge_test]:
5698 ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
5699 test_bop!(f64x2[f64; 2] => i64 | f64x2_ge[f64x2_ge_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
5700
5701 test_bop!(i8x16[i8; 16] | i8x16_le[i8x16_le_s_test]:
5702 ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5703 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
5704 ) =>
5705 [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5706 test_bop!(i8x16[i8; 16] | u8x16_le[i8x16_le_u_test]:
5707 ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5708 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
5709 ) =>
5710 [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, 0]);
5711 test_bop!(i16x8[i16; 8] | i16x8_le[i16x8_le_s_test]:
5712 ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5713 [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5714 test_bop!(i16x8[i16; 8] | u16x8_le[i16x8_le_u_test]:
5715 ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5716 [-1, 0, -1, 0 ,-1, 0, -1, 0]);
5717 test_bop!(i32x4[i32; 4] | i32x4_le[i32x4_le_s_test]:
5718 ([0, 2, 2, -4], [0, 1, 2, 3]) => [-1, 0, -1, -1]);
5719 test_bop!(i32x4[i32; 4] | u32x4_le[i32x4_le_u_test]:
5720 ([0, 2, 2, -4], [0, 1, 2, 3]) => [-1, 0, -1, 0]);
5721 test_bop!(i64x2[i64; 2] | i64x2_le[i64x2_le_s_test]:
5722 ([0, 2], [0, 1]) => [-1, 0]);
5723 test_bop!(f32x4[f32; 4] => i32 | f32x4_le[f32x4_le_test]:
5724 ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [-1, 0, -1, -0]);
5725 test_bop!(f64x2[f64; 2] => i64 | f64x2_le[f64x2_le_test]: ([0., 2.], [0., 1.]) => [-1, 0]);
5726
5727 test_uop!(f32x4[f32; 4] | f32x4_neg[f32x4_neg_test]: [0., 1., 2., 3.] => [ 0., -1., -2., -3.]);
5728 test_uop!(f32x4[f32; 4] | f32x4_abs[f32x4_abs_test]: [0., -1., 2., -3.] => [ 0., 1., 2., 3.]);
5729 test_bop!(f32x4[f32; 4] | f32x4_min[f32x4_min_test]:
5730 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., -3., -4., 8.]);
5731 test_bop!(f32x4[f32; 4] | f32x4_min[f32x4_min_test_nan]:
5732 ([0., -1., 7., 8.], [1., -3., -4., f32::NAN])
5733 => [0., -3., -4., f32::NAN]);
5734 test_bop!(f32x4[f32; 4] | f32x4_max[f32x4_max_test]:
5735 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -1., 7., 10.]);
5736 test_bop!(f32x4[f32; 4] | f32x4_max[f32x4_max_test_nan]:
5737 ([0., -1., 7., 8.], [1., -3., -4., f32::NAN])
5738 => [1., -1., 7., f32::NAN]);
5739 test_bop!(f32x4[f32; 4] | f32x4_add[f32x4_add_test]:
5740 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -4., 3., 18.]);
5741 test_bop!(f32x4[f32; 4] | f32x4_sub[f32x4_sub_test]:
5742 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [-1., 2., 11., -2.]);
5743 test_bop!(f32x4[f32; 4] | f32x4_mul[f32x4_mul_test]:
5744 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., 3., -28., 80.]);
5745 test_bop!(f32x4[f32; 4] | f32x4_div[f32x4_div_test]:
5746 ([0., -8., 70., 8.], [1., 4., 10., 2.]) => [0., -2., 7., 4.]);
5747
5748 test_uop!(f64x2[f64; 2] | f64x2_neg[f64x2_neg_test]: [0., 1.] => [ 0., -1.]);
5749 test_uop!(f64x2[f64; 2] | f64x2_abs[f64x2_abs_test]: [0., -1.] => [ 0., 1.]);
5750 test_bop!(f64x2[f64; 2] | f64x2_min[f64x2_min_test]:
5751 ([0., -1.], [1., -3.]) => [0., -3.]);
5752 test_bop!(f64x2[f64; 2] | f64x2_min[f64x2_min_test_nan]:
5753 ([7., 8.], [-4., f64::NAN])
5754 => [ -4., f64::NAN]);
5755 test_bop!(f64x2[f64; 2] | f64x2_max[f64x2_max_test]:
5756 ([0., -1.], [1., -3.]) => [1., -1.]);
5757 test_bop!(f64x2[f64; 2] | f64x2_max[f64x2_max_test_nan]:
5758 ([7., 8.], [ -4., f64::NAN])
5759 => [7., f64::NAN]);
5760 test_bop!(f64x2[f64; 2] | f64x2_add[f64x2_add_test]:
5761 ([0., -1.], [1., -3.]) => [1., -4.]);
5762 test_bop!(f64x2[f64; 2] | f64x2_sub[f64x2_sub_test]:
5763 ([0., -1.], [1., -3.]) => [-1., 2.]);
5764 test_bop!(f64x2[f64; 2] | f64x2_mul[f64x2_mul_test]:
5765 ([0., -1.], [1., -3.]) => [0., 3.]);
5766 test_bop!(f64x2[f64; 2] | f64x2_div[f64x2_div_test]:
5767 ([0., -8.], [1., 4.]) => [0., -2.]);
5768
5769 macro_rules! test_conv {
5770 ($test_id:ident | $conv_id:ident | $to_ty:ident | $from:expr, $to:expr) => {
5771 #[test]
5772 fn $test_id() {
5773 unsafe {
5774 let from: v128 = transmute($from);
5775 let to: v128 = transmute($to);
5776
5777 let r: v128 = $conv_id(from);
5778
5779 compare_bytes(r, to);
5780 }
5781 }
5782 };
5783 }
5784
5785 test_conv!(
5786 f32x4_convert_s_i32x4 | f32x4_convert_i32x4 | f32x4 | [1_i32, 2, 3, 4],
5787 [1_f32, 2., 3., 4.]
5788 );
5789 test_conv!(
5790 f32x4_convert_u_i32x4 | f32x4_convert_u32x4 | f32x4 | [u32::MAX, 2, 3, 4],
5791 [u32::MAX as f32, 2., 3., 4.]
5792 );
5793
5794 #[test]
5795 fn test_conversions() {
5796 compare_bytes(
5797 i32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)),
5798 i32x4(1, i32::MIN, i32::MAX, 0),
5799 );
5800 compare_bytes(
5801 u32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)),
5802 u32x4(1, 0, u32::MAX, 0),
5803 );
5804 compare_bytes(f64x2_convert_low_i32x4(i32x4(1, 2, 3, 4)), f64x2(1., 2.));
5805 compare_bytes(
5806 f64x2_convert_low_i32x4(i32x4(i32::MIN, i32::MAX, 3, 4)),
5807 f64x2(f64::from(i32::MIN), f64::from(i32::MAX)),
5808 );
5809 compare_bytes(f64x2_convert_low_u32x4(u32x4(1, 2, 3, 4)), f64x2(1., 2.));
5810 compare_bytes(
5811 f64x2_convert_low_u32x4(u32x4(u32::MIN, u32::MAX, 3, 4)),
5812 f64x2(f64::from(u32::MIN), f64::from(u32::MAX)),
5813 );
5814
5815 compare_bytes(
5816 i32x4_trunc_sat_f64x2_zero(f64x2(1., f64::NEG_INFINITY)),
5817 i32x4(1, i32::MIN, 0, 0),
5818 );
5819 compare_bytes(
5820 i32x4_trunc_sat_f64x2_zero(f64x2(f64::NAN, f64::INFINITY)),
5821 i32x4(0, i32::MAX, 0, 0),
5822 );
5823 compare_bytes(
5824 u32x4_trunc_sat_f64x2_zero(f64x2(1., f64::NEG_INFINITY)),
5825 u32x4(1, 0, 0, 0),
5826 );
5827 compare_bytes(
5828 u32x4_trunc_sat_f64x2_zero(f64x2(f64::NAN, f64::INFINITY)),
5829 u32x4(0, u32::MAX, 0, 0),
5830 );
5831 }
5832
5833 #[test]
5834 fn test_popcnt() {
5835 unsafe {
5836 for i in 0..=255 {
5837 compare_bytes(
5838 i8x16_popcnt(u8x16_splat(i)),
5839 u8x16_splat(i.count_ones() as u8),
5840 )
5841 }
5842
5843 let vectors = [
5844 [0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5845 [
5846 100, 200, 50, 0, 10, 7, 38, 185, 192, 3, 34, 85, 93, 7, 31, 99,
5847 ],
5848 ];
5849
5850 for vector in vectors.iter() {
5851 compare_bytes(
5852 i8x16_popcnt(transmute(*vector)),
5853 i8x16(
5854 vector[0].count_ones() as i8,
5855 vector[1].count_ones() as i8,
5856 vector[2].count_ones() as i8,
5857 vector[3].count_ones() as i8,
5858 vector[4].count_ones() as i8,
5859 vector[5].count_ones() as i8,
5860 vector[6].count_ones() as i8,
5861 vector[7].count_ones() as i8,
5862 vector[8].count_ones() as i8,
5863 vector[9].count_ones() as i8,
5864 vector[10].count_ones() as i8,
5865 vector[11].count_ones() as i8,
5866 vector[12].count_ones() as i8,
5867 vector[13].count_ones() as i8,
5868 vector[14].count_ones() as i8,
5869 vector[15].count_ones() as i8,
5870 ),
5871 )
5872 }
5873 }
5874 }
5875
5876 #[test]
5877 fn test_promote_demote() {
5878 let tests = [
5879 [1., 2.],
5880 [f64::NAN, f64::INFINITY],
5881 [100., 201.],
5882 [0., -0.],
5883 [f64::NEG_INFINITY, 0.],
5884 ];
5885
5886 for [a, b] in tests {
5887 compare_bytes(
5888 f32x4_demote_f64x2_zero(f64x2(a, b)),
5889 f32x4(a as f32, b as f32, 0., 0.),
5890 );
5891 compare_bytes(
5892 f64x2_promote_low_f32x4(f32x4(a as f32, b as f32, 0., 0.)),
5893 f64x2(a, b),
5894 );
5895 }
5896 }
5897
5898 #[test]
5899 fn test_extmul() {
5900 macro_rules! test {
5901 ($(
5902 $ctor:ident {
5903 from: $from:ident,
5904 to: $to:ident,
5905 low: $low:ident,
5906 high: $high:ident,
5907 } => {
5908 $(([$($a:tt)*] * [$($b:tt)*]))*
5909 }
5910 )*) => ($(
5911 $(unsafe {
5912 let a: [$from; 16 / mem::size_of::<$from>()] = [$($a)*];
5913 let b: [$from; 16 / mem::size_of::<$from>()] = [$($b)*];
5914 let low = mem::transmute::<_, [$to; 16 / mem::size_of::<$to>()]>($low($ctor($($a)*), $ctor($($b)*)));
5915 let high = mem::transmute::<_, [$to; 16 / mem::size_of::<$to>()]>($high($ctor($($a)*), $ctor($($b)*)));
5916
5917 let half = a.len() / 2;
5918 for i in 0..half {
5919 assert_eq!(
5920 (a[i] as $to).wrapping_mul((b[i] as $to)),
5921 low[i],
5922 "expected {} * {}", a[i] as $to, b[i] as $to,
5923 );
5924 assert_eq!(
5925 (a[half + i] as $to).wrapping_mul((b[half + i] as $to)),
5926 high[i],
5927 "expected {} * {}", a[half + i] as $to, b[half + i] as $to,
5928 );
5929 }
5930 })*
5931 )*)
5932 }
5933 test! {
5934 i8x16 {
5935 from: i8,
5936 to: i16,
5937 low: i16x8_extmul_low_i8x16,
5938 high: i16x8_extmul_high_i8x16,
5939 } => {
5940 (
5941 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5942 *
5943 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5944 )
5945 (
5946 [-1, -2, 3, 100, 124, -38, 33, 87, 92, 108, 22, 8, -43, -128, 22, 0]
5947 *
5948 [-5, -2, 6, 10, 45, -4, 4, -2, 0, 88, 92, -102, -98, 83, 73, 54]
5949 )
5950 }
5951 u8x16 {
5952 from: u8,
5953 to: u16,
5954 low: u16x8_extmul_low_u8x16,
5955 high: u16x8_extmul_high_u8x16,
5956 } => {
5957 (
5958 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5959 *
5960 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5961 )
5962 (
5963 [1, 2, 3, 100, 124, 38, 33, 87, 92, 198, 22, 8, 43, 128, 22, 0]
5964 *
5965 [5, 200, 6, 10, 45, 248, 4, 2, 0, 2, 92, 102, 234, 83, 73, 54]
5966 )
5967 }
5968 i16x8 {
5969 from: i16,
5970 to: i32,
5971 low: i32x4_extmul_low_i16x8,
5972 high: i32x4_extmul_high_i16x8,
5973 } => {
5974 (
5975 [0, 0, 0, 0, 0, 0, 0, 0]
5976 *
5977 [0, 0, 0, 0, 0, 0, 0, 0]
5978 )
5979 (
5980 [-1, 0, i16::MAX, 19931, -2259, 64, 200, 87]
5981 *
5982 [1, 1, i16::MIN, 29391, 105, 2, 100, -2]
5983 )
5984 }
5985 u16x8 {
5986 from: u16,
5987 to: u32,
5988 low: u32x4_extmul_low_u16x8,
5989 high: u32x4_extmul_high_u16x8,
5990 } => {
5991 (
5992 [0, 0, 0, 0, 0, 0, 0, 0]
5993 *
5994 [0, 0, 0, 0, 0, 0, 0, 0]
5995 )
5996 (
5997 [1, 0, u16::MAX, 19931, 2259, 64, 200, 87]
5998 *
5999 [1, 1, 3, 29391, 105, 2, 100, 2]
6000 )
6001 }
6002 i32x4 {
6003 from: i32,
6004 to: i64,
6005 low: i64x2_extmul_low_i32x4,
6006 high: i64x2_extmul_high_i32x4,
6007 } => {
6008 (
6009 [0, 0, 0, 0]
6010 *
6011 [0, 0, 0, 0]
6012 )
6013 (
6014 [-1, 0, i32::MAX, 19931]
6015 *
6016 [1, 1, i32::MIN, 29391]
6017 )
6018 (
6019 [i32::MAX, 3003183, 3 << 20, 0xffffff]
6020 *
6021 [i32::MAX, i32::MIN, -40042, 300]
6022 )
6023 }
6024 u32x4 {
6025 from: u32,
6026 to: u64,
6027 low: u64x2_extmul_low_u32x4,
6028 high: u64x2_extmul_high_u32x4,
6029 } => {
6030 (
6031 [0, 0, 0, 0]
6032 *
6033 [0, 0, 0, 0]
6034 )
6035 (
6036 [1, 0, u32::MAX, 19931]
6037 *
6038 [1, 1, 3, 29391]
6039 )
6040 (
6041 [u32::MAX, 3003183, 3 << 20, 0xffffff]
6042 *
6043 [u32::MAX, 3000, 40042, 300]
6044 )
6045 }
6046 }
6047 }
6048
6049 #[test]
6050 fn test_q15mulr_sat_s() {
6051 fn test(a: [i16; 8], b: [i16; 8]) {
6052 let a_v = i16x8(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7]);
6053 let b_v = i16x8(b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
6054 let result = i16x8_q15mulr_sat(a_v, b_v);
6055 let result = unsafe { mem::transmute::<v128, [i16; 8]>(result) };
6056
6057 for (i, (a, b)) in a.iter().zip(&b).enumerate() {
6058 assert_eq!(
6059 result[i],
6060 (((*a as i32) * (*b as i32) + 0x4000) >> 15) as i16
6061 );
6062 }
6063 }
6064
6065 test([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]);
6066 test([1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1]);
6067 test(
6068 [-1, 100, 2003, -29494, 12, 128, 994, 1],
6069 [-4049, 8494, -10483, 0, 5, 2222, 883, -9],
6070 );
6071 }
6072
6073 #[test]
6074 fn test_extadd() {
6075 macro_rules! test {
6076 ($(
6077 $func:ident {
6078 from: $from:ident,
6079 to: $to:ident,
6080 } => {
6081 $([$($a:tt)*])*
6082 }
6083 )*) => ($(
6084 $(unsafe {
6085 let a: [$from; 16 / mem::size_of::<$from>()] = [$($a)*];
6086 let a_v = mem::transmute::<_, v128>(a);
6087 let r = mem::transmute::<v128, [$to; 16 / mem::size_of::<$to>()]>($func(a_v));
6088
6089 let half = a.len() / 2;
6090 for i in 0..half {
6091 assert_eq!(
6092 (a[2 * i] as $to).wrapping_add((a[2 * i + 1] as $to)),
6093 r[i],
6094 "failed {} + {} != {}",
6095 a[2 * i] as $to,
6096 a[2 * i + 1] as $to,
6097 r[i],
6098 );
6099 }
6100 })*
6101 )*)
6102 }
6103 test! {
6104 i16x8_extadd_pairwise_i8x16 {
6105 from: i8,
6106 to: i16,
6107 } => {
6108 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
6109 [-1, -2, 3, 100, 124, -38, 33, 87, 92, 108, 22, 8, -43, -128, 22, 0]
6110 [-5, -2, 6, 10, 45, -4, 4, -2, 0, 88, 92, -102, -98, 83, 73, 54]
6111 }
6112 i16x8_extadd_pairwise_u8x16 {
6113 from: u8,
6114 to: i16,
6115 } => {
6116 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
6117 [1, 2, 3, 100, 124, 38, 33, 87, 92, 198, 22, 8, 43, 128, 22, 0]
6118 [5, 200, 6, 10, 45, 248, 4, 2, 0, 2, 92, 102, 234, 83, 73, 54]
6119 }
6120 i32x4_extadd_pairwise_i16x8 {
6121 from: i16,
6122 to: i32,
6123 } => {
6124 [0, 0, 0, 0, 0, 0, 0, 0]
6125 [-1, 0, i16::MAX, 19931, -2259, 64, 200, 87]
6126 [1, 1, i16::MIN, 29391, 105, 2, 100, -2]
6127 }
6128 i32x4_extadd_pairwise_u16x8 {
6129 from: u16,
6130 to: i32,
6131 } => {
6132 [0, 0, 0, 0, 0, 0, 0, 0]
6133 [1, 0, u16::MAX, 19931, 2259, 64, 200, 87]
6134 [1, 1, 3, 29391, 105, 2, 100, 2]
6135 }
6136 }
6137 }
6138}