1#![allow(non_camel_case_types)]
7#![allow(unused_imports)]
8
9use crate::{core_arch::simd, intrinsics::simd::*, marker::Sized, mem, ptr};
10
11#[cfg(test)]
12use stdarch_test::assert_instr;
13
14types! {
15    #![stable(feature = "wasm_simd", since = "1.54.0")]
16
17    pub struct v128(4 x i32);
39}
40
41macro_rules! conversions {
42    ($(($name:ident = $ty:ty))*) => {
43        impl v128 {
44            $(
45                #[inline(always)]
46                pub(crate) fn $name(self) -> $ty {
47                    unsafe { mem::transmute(self) }
48                }
49            )*
50        }
51        $(
52            impl $ty {
53                #[inline(always)]
54                pub(crate) const fn v128(self) -> v128 {
55                    unsafe { mem::transmute(self) }
56                }
57            }
58        )*
59    }
60}
61
62conversions! {
63    (as_u8x16 = simd::u8x16)
64    (as_u16x8 = simd::u16x8)
65    (as_u32x4 = simd::u32x4)
66    (as_u64x2 = simd::u64x2)
67    (as_i8x16 = simd::i8x16)
68    (as_i16x8 = simd::i16x8)
69    (as_i32x4 = simd::i32x4)
70    (as_i64x2 = simd::i64x2)
71    (as_f32x4 = simd::f32x4)
72    (as_f64x2 = simd::f64x2)
73}
74
75#[allow(improper_ctypes)]
76unsafe extern "C" {
77    #[link_name = "llvm.wasm.swizzle"]
78    fn llvm_swizzle(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
79
80    #[link_name = "llvm.wasm.bitselect.v16i8"]
81    fn llvm_bitselect(a: simd::i8x16, b: simd::i8x16, c: simd::i8x16) -> simd::i8x16;
82    #[link_name = "llvm.wasm.anytrue.v16i8"]
83    fn llvm_any_true_i8x16(x: simd::i8x16) -> i32;
84
85    #[link_name = "llvm.wasm.alltrue.v16i8"]
86    fn llvm_i8x16_all_true(x: simd::i8x16) -> i32;
87    #[link_name = "llvm.wasm.bitmask.v16i8"]
88    fn llvm_bitmask_i8x16(a: simd::i8x16) -> i32;
89    #[link_name = "llvm.wasm.narrow.signed.v16i8.v8i16"]
90    fn llvm_narrow_i8x16_s(a: simd::i16x8, b: simd::i16x8) -> simd::i8x16;
91    #[link_name = "llvm.wasm.narrow.unsigned.v16i8.v8i16"]
92    fn llvm_narrow_i8x16_u(a: simd::i16x8, b: simd::i16x8) -> simd::i8x16;
93    #[link_name = "llvm.wasm.avgr.unsigned.v16i8"]
94    fn llvm_avgr_u_i8x16(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
95
96    #[link_name = "llvm.wasm.extadd.pairwise.signed.v8i16"]
97    fn llvm_i16x8_extadd_pairwise_i8x16_s(x: simd::i8x16) -> simd::i16x8;
98    #[link_name = "llvm.wasm.extadd.pairwise.unsigned.v8i16"]
99    fn llvm_i16x8_extadd_pairwise_i8x16_u(x: simd::i8x16) -> simd::i16x8;
100    #[link_name = "llvm.wasm.q15mulr.sat.signed"]
101    fn llvm_q15mulr(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
102    #[link_name = "llvm.wasm.alltrue.v8i16"]
103    fn llvm_i16x8_all_true(x: simd::i16x8) -> i32;
104    #[link_name = "llvm.wasm.bitmask.v8i16"]
105    fn llvm_bitmask_i16x8(a: simd::i16x8) -> i32;
106    #[link_name = "llvm.wasm.narrow.signed.v8i16.v4i32"]
107    fn llvm_narrow_i16x8_s(a: simd::i32x4, b: simd::i32x4) -> simd::i16x8;
108    #[link_name = "llvm.wasm.narrow.unsigned.v8i16.v4i32"]
109    fn llvm_narrow_i16x8_u(a: simd::i32x4, b: simd::i32x4) -> simd::i16x8;
110    #[link_name = "llvm.wasm.avgr.unsigned.v8i16"]
111    fn llvm_avgr_u_i16x8(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
112
113    #[link_name = "llvm.wasm.extadd.pairwise.signed.v16i8"]
114    fn llvm_i32x4_extadd_pairwise_i16x8_s(x: simd::i16x8) -> simd::i32x4;
115    #[link_name = "llvm.wasm.extadd.pairwise.unsigned.v16i8"]
116    fn llvm_i32x4_extadd_pairwise_i16x8_u(x: simd::i16x8) -> simd::i32x4;
117    #[link_name = "llvm.wasm.alltrue.v4i32"]
118    fn llvm_i32x4_all_true(x: simd::i32x4) -> i32;
119    #[link_name = "llvm.wasm.bitmask.v4i32"]
120    fn llvm_bitmask_i32x4(a: simd::i32x4) -> i32;
121    #[link_name = "llvm.wasm.dot"]
122    fn llvm_i32x4_dot_i16x8_s(a: simd::i16x8, b: simd::i16x8) -> simd::i32x4;
123
124    #[link_name = "llvm.wasm.alltrue.v2i64"]
125    fn llvm_i64x2_all_true(x: simd::i64x2) -> i32;
126    #[link_name = "llvm.wasm.bitmask.v2i64"]
127    fn llvm_bitmask_i64x2(a: simd::i64x2) -> i32;
128
129    #[link_name = "llvm.nearbyint.v4f32"]
130    fn llvm_f32x4_nearest(x: simd::f32x4) -> simd::f32x4;
131    #[link_name = "llvm.minimum.v4f32"]
132    fn llvm_f32x4_min(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4;
133    #[link_name = "llvm.maximum.v4f32"]
134    fn llvm_f32x4_max(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4;
135
136    #[link_name = "llvm.nearbyint.v2f64"]
137    fn llvm_f64x2_nearest(x: simd::f64x2) -> simd::f64x2;
138    #[link_name = "llvm.minimum.v2f64"]
139    fn llvm_f64x2_min(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2;
140    #[link_name = "llvm.maximum.v2f64"]
141    fn llvm_f64x2_max(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2;
142}
143
144#[repr(packed)]
145#[derive(Copy)]
146struct Unaligned<T>(T);
147
148impl<T: Copy> Clone for Unaligned<T> {
149    fn clone(&self) -> Unaligned<T> {
150        *self
151    }
152}
153
154#[inline]
177#[cfg_attr(test, assert_instr(v128.load))]
178#[target_feature(enable = "simd128")]
179#[doc(alias("v128.load"))]
180#[stable(feature = "wasm_simd", since = "1.54.0")]
181pub unsafe fn v128_load(m: *const v128) -> v128 {
182    (*(m as *const Unaligned<v128>)).0
183}
184
185#[inline]
194#[cfg_attr(test, assert_instr(v128.load8x8_s))]
195#[target_feature(enable = "simd128")]
196#[doc(alias("v128.load8x8_s"))]
197#[stable(feature = "wasm_simd", since = "1.54.0")]
198pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 {
199    let m = *(m as *const Unaligned<simd::i8x8>);
200    simd_cast::<_, simd::i16x8>(m.0).v128()
201}
202
203#[inline]
212#[cfg_attr(test, assert_instr(v128.load8x8_u))]
213#[target_feature(enable = "simd128")]
214#[doc(alias("v128.load8x8_u"))]
215#[stable(feature = "wasm_simd", since = "1.54.0")]
216pub unsafe fn i16x8_load_extend_u8x8(m: *const u8) -> v128 {
217    let m = *(m as *const Unaligned<simd::u8x8>);
218    simd_cast::<_, simd::u16x8>(m.0).v128()
219}
220
221#[stable(feature = "wasm_simd", since = "1.54.0")]
222pub use i16x8_load_extend_u8x8 as u16x8_load_extend_u8x8;
223
224#[inline]
233#[cfg_attr(test, assert_instr(v128.load16x4_s))]
234#[target_feature(enable = "simd128")]
235#[doc(alias("v128.load16x4_s"))]
236#[stable(feature = "wasm_simd", since = "1.54.0")]
237pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 {
238    let m = *(m as *const Unaligned<simd::i16x4>);
239    simd_cast::<_, simd::i32x4>(m.0).v128()
240}
241
242#[inline]
251#[cfg_attr(test, assert_instr(v128.load16x4_u))]
252#[target_feature(enable = "simd128")]
253#[doc(alias("v128.load16x4_u"))]
254#[stable(feature = "wasm_simd", since = "1.54.0")]
255pub unsafe fn i32x4_load_extend_u16x4(m: *const u16) -> v128 {
256    let m = *(m as *const Unaligned<simd::u16x4>);
257    simd_cast::<_, simd::u32x4>(m.0).v128()
258}
259
260#[stable(feature = "wasm_simd", since = "1.54.0")]
261pub use i32x4_load_extend_u16x4 as u32x4_load_extend_u16x4;
262
263#[inline]
272#[cfg_attr(test, assert_instr(v128.load32x2_s))]
273#[target_feature(enable = "simd128")]
274#[doc(alias("v128.load32x2_s"))]
275#[stable(feature = "wasm_simd", since = "1.54.0")]
276pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 {
277    let m = *(m as *const Unaligned<simd::i32x2>);
278    simd_cast::<_, simd::i64x2>(m.0).v128()
279}
280
281#[inline]
290#[cfg_attr(test, assert_instr(v128.load32x2_u))]
291#[target_feature(enable = "simd128")]
292#[doc(alias("v128.load32x2_u"))]
293#[stable(feature = "wasm_simd", since = "1.54.0")]
294pub unsafe fn i64x2_load_extend_u32x2(m: *const u32) -> v128 {
295    let m = *(m as *const Unaligned<simd::u32x2>);
296    simd_cast::<_, simd::u64x2>(m.0).v128()
297}
298
299#[stable(feature = "wasm_simd", since = "1.54.0")]
300pub use i64x2_load_extend_u32x2 as u64x2_load_extend_u32x2;
301
302#[inline]
315#[cfg_attr(test, assert_instr(v128.load8_splat))]
316#[target_feature(enable = "simd128")]
317#[doc(alias("v128.load8_splat"))]
318#[stable(feature = "wasm_simd", since = "1.54.0")]
319pub unsafe fn v128_load8_splat(m: *const u8) -> v128 {
320    u8x16_splat(*m)
321}
322
323#[inline]
336#[cfg_attr(test, assert_instr(v128.load16_splat))]
337#[target_feature(enable = "simd128")]
338#[doc(alias("v128.load16_splat"))]
339#[stable(feature = "wasm_simd", since = "1.54.0")]
340pub unsafe fn v128_load16_splat(m: *const u16) -> v128 {
341    u16x8_splat(ptr::read_unaligned(m))
342}
343
344#[inline]
357#[cfg_attr(test, assert_instr(v128.load32_splat))]
358#[target_feature(enable = "simd128")]
359#[doc(alias("v128.load32_splat"))]
360#[stable(feature = "wasm_simd", since = "1.54.0")]
361pub unsafe fn v128_load32_splat(m: *const u32) -> v128 {
362    u32x4_splat(ptr::read_unaligned(m))
363}
364
365#[inline]
378#[cfg_attr(test, assert_instr(v128.load64_splat))]
379#[target_feature(enable = "simd128")]
380#[doc(alias("v128.load64_splat"))]
381#[stable(feature = "wasm_simd", since = "1.54.0")]
382pub unsafe fn v128_load64_splat(m: *const u64) -> v128 {
383    u64x2_splat(ptr::read_unaligned(m))
384}
385
386#[inline]
399#[cfg_attr(test, assert_instr(v128.load32_zero))]
400#[target_feature(enable = "simd128")]
401#[doc(alias("v128.load32_zero"))]
402#[stable(feature = "wasm_simd", since = "1.54.0")]
403pub unsafe fn v128_load32_zero(m: *const u32) -> v128 {
404    u32x4(ptr::read_unaligned(m), 0, 0, 0)
405}
406
407#[inline]
420#[cfg_attr(test, assert_instr(v128.load64_zero))]
421#[target_feature(enable = "simd128")]
422#[doc(alias("v128.load64_zero"))]
423#[stable(feature = "wasm_simd", since = "1.54.0")]
424pub unsafe fn v128_load64_zero(m: *const u64) -> v128 {
425    u64x2_replace_lane::<0>(u64x2(0, 0), ptr::read_unaligned(m))
426}
427
428#[inline]
451#[cfg_attr(test, assert_instr(v128.store))]
452#[target_feature(enable = "simd128")]
453#[doc(alias("v128.store"))]
454#[stable(feature = "wasm_simd", since = "1.54.0")]
455pub unsafe fn v128_store(m: *mut v128, a: v128) {
456    *(m as *mut Unaligned<v128>) = Unaligned(a);
457}
458
459#[inline]
471#[cfg_attr(test, assert_instr(v128.load8_lane, L = 0))]
472#[target_feature(enable = "simd128")]
473#[doc(alias("v128.load8_lane"))]
474#[stable(feature = "wasm_simd", since = "1.54.0")]
475pub unsafe fn v128_load8_lane<const L: usize>(v: v128, m: *const u8) -> v128 {
476    u8x16_replace_lane::<L>(v, *m)
477}
478
479#[inline]
491#[cfg_attr(test, assert_instr(v128.load16_lane, L = 0))]
492#[target_feature(enable = "simd128")]
493#[doc(alias("v128.load16_lane"))]
494#[stable(feature = "wasm_simd", since = "1.54.0")]
495pub unsafe fn v128_load16_lane<const L: usize>(v: v128, m: *const u16) -> v128 {
496    u16x8_replace_lane::<L>(v, ptr::read_unaligned(m))
497}
498
499#[inline]
511#[cfg_attr(test, assert_instr(v128.load32_lane, L = 0))]
512#[target_feature(enable = "simd128")]
513#[doc(alias("v128.load32_lane"))]
514#[stable(feature = "wasm_simd", since = "1.54.0")]
515pub unsafe fn v128_load32_lane<const L: usize>(v: v128, m: *const u32) -> v128 {
516    u32x4_replace_lane::<L>(v, ptr::read_unaligned(m))
517}
518
519#[inline]
531#[cfg_attr(test, assert_instr(v128.load64_lane, L = 0))]
532#[target_feature(enable = "simd128")]
533#[doc(alias("v128.load64_lane"))]
534#[stable(feature = "wasm_simd", since = "1.54.0")]
535pub unsafe fn v128_load64_lane<const L: usize>(v: v128, m: *const u64) -> v128 {
536    u64x2_replace_lane::<L>(v, ptr::read_unaligned(m))
537}
538
539#[inline]
551#[cfg_attr(test, assert_instr(v128.store8_lane, L = 0))]
552#[target_feature(enable = "simd128")]
553#[doc(alias("v128.store8_lane"))]
554#[stable(feature = "wasm_simd", since = "1.54.0")]
555pub unsafe fn v128_store8_lane<const L: usize>(v: v128, m: *mut u8) {
556    *m = u8x16_extract_lane::<L>(v);
557}
558
559#[inline]
571#[cfg_attr(test, assert_instr(v128.store16_lane, L = 0))]
572#[target_feature(enable = "simd128")]
573#[doc(alias("v128.store16_lane"))]
574#[stable(feature = "wasm_simd", since = "1.54.0")]
575pub unsafe fn v128_store16_lane<const L: usize>(v: v128, m: *mut u16) {
576    ptr::write_unaligned(m, u16x8_extract_lane::<L>(v))
577}
578
579#[inline]
591#[cfg_attr(test, assert_instr(v128.store32_lane, L = 0))]
592#[target_feature(enable = "simd128")]
593#[doc(alias("v128.store32_lane"))]
594#[stable(feature = "wasm_simd", since = "1.54.0")]
595pub unsafe fn v128_store32_lane<const L: usize>(v: v128, m: *mut u32) {
596    ptr::write_unaligned(m, u32x4_extract_lane::<L>(v))
597}
598
599#[inline]
611#[cfg_attr(test, assert_instr(v128.store64_lane, L = 0))]
612#[target_feature(enable = "simd128")]
613#[doc(alias("v128.store64_lane"))]
614#[stable(feature = "wasm_simd", since = "1.54.0")]
615pub unsafe fn v128_store64_lane<const L: usize>(v: v128, m: *mut u64) {
616    ptr::write_unaligned(m, u64x2_extract_lane::<L>(v))
617}
618
619#[inline]
624#[cfg_attr(
625    test,
626    assert_instr(
627        v128.const,
628        a0 = 0,
629        a1 = 1,
630        a2 = 2,
631        a3 = 3,
632        a4 = 4,
633        a5 = 5,
634        a6 = 6,
635        a7 = 7,
636        a8 = 8,
637        a9 = 9,
638        a10 = 10,
639        a11 = 11,
640        a12 = 12,
641        a13 = 13,
642        a14 = 14,
643        a15 = 15,
644    )
645)]
646#[doc(alias("v128.const"))]
647#[stable(feature = "wasm_simd", since = "1.54.0")]
648#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
649#[target_feature(enable = "simd128")]
650pub const fn i8x16(
651    a0: i8,
652    a1: i8,
653    a2: i8,
654    a3: i8,
655    a4: i8,
656    a5: i8,
657    a6: i8,
658    a7: i8,
659    a8: i8,
660    a9: i8,
661    a10: i8,
662    a11: i8,
663    a12: i8,
664    a13: i8,
665    a14: i8,
666    a15: i8,
667) -> v128 {
668    simd::i8x16::new(
669        a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
670    )
671    .v128()
672}
673
674#[inline]
679#[doc(alias("v128.const"))]
680#[stable(feature = "wasm_simd", since = "1.54.0")]
681#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
682#[target_feature(enable = "simd128")]
683pub const fn u8x16(
684    a0: u8,
685    a1: u8,
686    a2: u8,
687    a3: u8,
688    a4: u8,
689    a5: u8,
690    a6: u8,
691    a7: u8,
692    a8: u8,
693    a9: u8,
694    a10: u8,
695    a11: u8,
696    a12: u8,
697    a13: u8,
698    a14: u8,
699    a15: u8,
700) -> v128 {
701    simd::u8x16::new(
702        a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
703    )
704    .v128()
705}
706
707#[inline]
712#[cfg_attr(
713    test,
714    assert_instr(
715        v128.const,
716        a0 = 0,
717        a1 = 1,
718        a2 = 2,
719        a3 = 3,
720        a4 = 4,
721        a5 = 5,
722        a6 = 6,
723        a7 = 7,
724    )
725)]
726#[doc(alias("v128.const"))]
727#[stable(feature = "wasm_simd", since = "1.54.0")]
728#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
729#[target_feature(enable = "simd128")]
730pub const fn i16x8(a0: i16, a1: i16, a2: i16, a3: i16, a4: i16, a5: i16, a6: i16, a7: i16) -> v128 {
731    simd::i16x8::new(a0, a1, a2, a3, a4, a5, a6, a7).v128()
732}
733
734#[inline]
739#[doc(alias("v128.const"))]
740#[stable(feature = "wasm_simd", since = "1.54.0")]
741#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
742#[target_feature(enable = "simd128")]
743pub const fn u16x8(a0: u16, a1: u16, a2: u16, a3: u16, a4: u16, a5: u16, a6: u16, a7: u16) -> v128 {
744    simd::u16x8::new(a0, a1, a2, a3, a4, a5, a6, a7).v128()
745}
746
747#[inline]
752#[cfg_attr(test, assert_instr(v128.const, a0 = 0, a1 = 1, a2 = 2, a3 = 3))]
753#[doc(alias("v128.const"))]
754#[stable(feature = "wasm_simd", since = "1.54.0")]
755#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
756#[target_feature(enable = "simd128")]
757pub const fn i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128 {
758    simd::i32x4::new(a0, a1, a2, a3).v128()
759}
760
761#[inline]
766#[doc(alias("v128.const"))]
767#[stable(feature = "wasm_simd", since = "1.54.0")]
768#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
769#[target_feature(enable = "simd128")]
770pub const fn u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128 {
771    simd::u32x4::new(a0, a1, a2, a3).v128()
772}
773
774#[inline]
779#[cfg_attr(test, assert_instr(v128.const, a0 = 1, a1 = 2))]
780#[doc(alias("v128.const"))]
781#[stable(feature = "wasm_simd", since = "1.54.0")]
782#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
783#[target_feature(enable = "simd128")]
784pub const fn i64x2(a0: i64, a1: i64) -> v128 {
785    simd::i64x2::new(a0, a1).v128()
786}
787
788#[inline]
793#[doc(alias("v128.const"))]
794#[stable(feature = "wasm_simd", since = "1.54.0")]
795#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
796#[target_feature(enable = "simd128")]
797pub const fn u64x2(a0: u64, a1: u64) -> v128 {
798    simd::u64x2::new(a0, a1).v128()
799}
800
801#[inline]
806#[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0, a2 = 2.0, a3 = 3.0))]
807#[doc(alias("v128.const"))]
808#[stable(feature = "wasm_simd", since = "1.54.0")]
809#[rustc_const_stable(feature = "wasm_simd_const", since = "1.56.0")]
810#[target_feature(enable = "simd128")]
811pub const fn f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128 {
812    simd::f32x4::new(a0, a1, a2, a3).v128()
813}
814
815#[inline]
820#[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0))]
821#[doc(alias("v128.const"))]
822#[stable(feature = "wasm_simd", since = "1.54.0")]
823#[rustc_const_stable(feature = "wasm_simd_const", since = "1.56.0")]
824#[target_feature(enable = "simd128")]
825pub const fn f64x2(a0: f64, a1: f64) -> v128 {
826    simd::f64x2::new(a0, a1).v128()
827}
828
829#[inline]
844#[cfg_attr(test,
845    assert_instr(
846        i8x16.shuffle,
847        I0 = 0,
848        I1 = 2,
849        I2 = 4,
850        I3 = 6,
851        I4 = 8,
852        I5 = 10,
853        I6 = 12,
854        I7 = 14,
855        I8 = 16,
856        I9 = 18,
857        I10 = 20,
858        I11 = 22,
859        I12 = 24,
860        I13 = 26,
861        I14 = 28,
862        I15 = 30,
863    )
864)]
865#[target_feature(enable = "simd128")]
866#[doc(alias("i8x16.shuffle"))]
867#[stable(feature = "wasm_simd", since = "1.54.0")]
868pub fn i8x16_shuffle<
869    const I0: usize,
870    const I1: usize,
871    const I2: usize,
872    const I3: usize,
873    const I4: usize,
874    const I5: usize,
875    const I6: usize,
876    const I7: usize,
877    const I8: usize,
878    const I9: usize,
879    const I10: usize,
880    const I11: usize,
881    const I12: usize,
882    const I13: usize,
883    const I14: usize,
884    const I15: usize,
885>(
886    a: v128,
887    b: v128,
888) -> v128 {
889    static_assert!(I0 < 32);
890    static_assert!(I1 < 32);
891    static_assert!(I2 < 32);
892    static_assert!(I3 < 32);
893    static_assert!(I4 < 32);
894    static_assert!(I5 < 32);
895    static_assert!(I6 < 32);
896    static_assert!(I7 < 32);
897    static_assert!(I8 < 32);
898    static_assert!(I9 < 32);
899    static_assert!(I10 < 32);
900    static_assert!(I11 < 32);
901    static_assert!(I12 < 32);
902    static_assert!(I13 < 32);
903    static_assert!(I14 < 32);
904    static_assert!(I15 < 32);
905    let shuf: simd::u8x16 = unsafe {
906        simd_shuffle!(
907            a.as_u8x16(),
908            b.as_u8x16(),
909            [
910                I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32,
911                I7 as u32, I8 as u32, I9 as u32, I10 as u32, I11 as u32, I12 as u32, I13 as u32,
912                I14 as u32, I15 as u32,
913            ],
914        )
915    };
916    shuf.v128()
917}
918
919#[stable(feature = "wasm_simd", since = "1.54.0")]
920pub use i8x16_shuffle as u8x16_shuffle;
921
922#[inline]
930#[cfg_attr(test,
931    assert_instr(
932        i8x16.shuffle,
933        I0 = 0,
934        I1 = 2,
935        I2 = 4,
936        I3 = 6,
937        I4 = 8,
938        I5 = 10,
939        I6 = 12,
940        I7 = 14,
941    )
942)]
943#[target_feature(enable = "simd128")]
944#[doc(alias("i8x16.shuffle"))]
945#[stable(feature = "wasm_simd", since = "1.54.0")]
946pub fn i16x8_shuffle<
947    const I0: usize,
948    const I1: usize,
949    const I2: usize,
950    const I3: usize,
951    const I4: usize,
952    const I5: usize,
953    const I6: usize,
954    const I7: usize,
955>(
956    a: v128,
957    b: v128,
958) -> v128 {
959    static_assert!(I0 < 16);
960    static_assert!(I1 < 16);
961    static_assert!(I2 < 16);
962    static_assert!(I3 < 16);
963    static_assert!(I4 < 16);
964    static_assert!(I5 < 16);
965    static_assert!(I6 < 16);
966    static_assert!(I7 < 16);
967    let shuf: simd::u16x8 = unsafe {
968        simd_shuffle!(
969            a.as_u16x8(),
970            b.as_u16x8(),
971            [
972                I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32,
973                I7 as u32,
974            ],
975        )
976    };
977    shuf.v128()
978}
979
980#[stable(feature = "wasm_simd", since = "1.54.0")]
981pub use i16x8_shuffle as u16x8_shuffle;
982
983#[inline]
991#[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2, I2 = 4, I3 = 6))]
992#[target_feature(enable = "simd128")]
993#[doc(alias("i8x16.shuffle"))]
994#[stable(feature = "wasm_simd", since = "1.54.0")]
995pub fn i32x4_shuffle<const I0: usize, const I1: usize, const I2: usize, const I3: usize>(
996    a: v128,
997    b: v128,
998) -> v128 {
999    static_assert!(I0 < 8);
1000    static_assert!(I1 < 8);
1001    static_assert!(I2 < 8);
1002    static_assert!(I3 < 8);
1003    let shuf: simd::u32x4 = unsafe {
1004        simd_shuffle!(
1005            a.as_u32x4(),
1006            b.as_u32x4(),
1007            [I0 as u32, I1 as u32, I2 as u32, I3 as u32],
1008        )
1009    };
1010    shuf.v128()
1011}
1012
1013#[stable(feature = "wasm_simd", since = "1.54.0")]
1014pub use i32x4_shuffle as u32x4_shuffle;
1015
1016#[inline]
1024#[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2))]
1025#[target_feature(enable = "simd128")]
1026#[doc(alias("i8x16.shuffle"))]
1027#[stable(feature = "wasm_simd", since = "1.54.0")]
1028pub fn i64x2_shuffle<const I0: usize, const I1: usize>(a: v128, b: v128) -> v128 {
1029    static_assert!(I0 < 4);
1030    static_assert!(I1 < 4);
1031    let shuf: simd::u64x2 =
1032        unsafe { simd_shuffle!(a.as_u64x2(), b.as_u64x2(), [I0 as u32, I1 as u32]) };
1033    shuf.v128()
1034}
1035
1036#[stable(feature = "wasm_simd", since = "1.54.0")]
1037pub use i64x2_shuffle as u64x2_shuffle;
1038
1039#[inline]
1044#[cfg_attr(test, assert_instr(i8x16.extract_lane_s, N = 3))]
1045#[target_feature(enable = "simd128")]
1046#[doc(alias("i8x16.extract_lane_s"))]
1047#[stable(feature = "wasm_simd", since = "1.54.0")]
1048pub fn i8x16_extract_lane<const N: usize>(a: v128) -> i8 {
1049    static_assert!(N < 16);
1050    unsafe { simd_extract!(a.as_i8x16(), N as u32) }
1051}
1052
1053#[inline]
1058#[cfg_attr(test, assert_instr(i8x16.extract_lane_u, N = 3))]
1059#[target_feature(enable = "simd128")]
1060#[doc(alias("i8x16.extract_lane_u"))]
1061#[stable(feature = "wasm_simd", since = "1.54.0")]
1062pub fn u8x16_extract_lane<const N: usize>(a: v128) -> u8 {
1063    static_assert!(N < 16);
1064    unsafe { simd_extract!(a.as_u8x16(), N as u32) }
1065}
1066
1067#[inline]
1072#[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))]
1073#[target_feature(enable = "simd128")]
1074#[doc(alias("i8x16.replace_lane"))]
1075#[stable(feature = "wasm_simd", since = "1.54.0")]
1076pub fn i8x16_replace_lane<const N: usize>(a: v128, val: i8) -> v128 {
1077    static_assert!(N < 16);
1078    unsafe { simd_insert!(a.as_i8x16(), N as u32, val).v128() }
1079}
1080
1081#[inline]
1086#[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))]
1087#[target_feature(enable = "simd128")]
1088#[doc(alias("i8x16.replace_lane"))]
1089#[stable(feature = "wasm_simd", since = "1.54.0")]
1090pub fn u8x16_replace_lane<const N: usize>(a: v128, val: u8) -> v128 {
1091    static_assert!(N < 16);
1092    unsafe { simd_insert!(a.as_u8x16(), N as u32, val).v128() }
1093}
1094
1095#[inline]
1100#[cfg_attr(test, assert_instr(i16x8.extract_lane_s, N = 2))]
1101#[target_feature(enable = "simd128")]
1102#[doc(alias("i16x8.extract_lane_s"))]
1103#[stable(feature = "wasm_simd", since = "1.54.0")]
1104pub fn i16x8_extract_lane<const N: usize>(a: v128) -> i16 {
1105    static_assert!(N < 8);
1106    unsafe { simd_extract!(a.as_i16x8(), N as u32) }
1107}
1108
1109#[inline]
1114#[cfg_attr(test, assert_instr(i16x8.extract_lane_u, N = 2))]
1115#[target_feature(enable = "simd128")]
1116#[doc(alias("i16x8.extract_lane_u"))]
1117#[stable(feature = "wasm_simd", since = "1.54.0")]
1118pub fn u16x8_extract_lane<const N: usize>(a: v128) -> u16 {
1119    static_assert!(N < 8);
1120    unsafe { simd_extract!(a.as_u16x8(), N as u32) }
1121}
1122
1123#[inline]
1128#[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))]
1129#[target_feature(enable = "simd128")]
1130#[doc(alias("i16x8.replace_lane"))]
1131#[stable(feature = "wasm_simd", since = "1.54.0")]
1132pub fn i16x8_replace_lane<const N: usize>(a: v128, val: i16) -> v128 {
1133    static_assert!(N < 8);
1134    unsafe { simd_insert!(a.as_i16x8(), N as u32, val).v128() }
1135}
1136
1137#[inline]
1142#[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))]
1143#[target_feature(enable = "simd128")]
1144#[doc(alias("i16x8.replace_lane"))]
1145#[stable(feature = "wasm_simd", since = "1.54.0")]
1146pub fn u16x8_replace_lane<const N: usize>(a: v128, val: u16) -> v128 {
1147    static_assert!(N < 8);
1148    unsafe { simd_insert!(a.as_u16x8(), N as u32, val).v128() }
1149}
1150
1151#[inline]
1156#[cfg_attr(test, assert_instr(i32x4.extract_lane, N = 2))]
1157#[target_feature(enable = "simd128")]
1158#[doc(alias("i32x4.extract_lane"))]
1159#[stable(feature = "wasm_simd", since = "1.54.0")]
1160pub fn i32x4_extract_lane<const N: usize>(a: v128) -> i32 {
1161    static_assert!(N < 4);
1162    unsafe { simd_extract!(a.as_i32x4(), N as u32) }
1163}
1164
1165#[inline]
1170#[target_feature(enable = "simd128")]
1171#[doc(alias("i32x4.extract_lane"))]
1172#[stable(feature = "wasm_simd", since = "1.54.0")]
1173pub fn u32x4_extract_lane<const N: usize>(a: v128) -> u32 {
1174    i32x4_extract_lane::<N>(a) as u32
1175}
1176
1177#[inline]
1182#[cfg_attr(test, assert_instr(i32x4.replace_lane, N = 2))]
1183#[target_feature(enable = "simd128")]
1184#[doc(alias("i32x4.replace_lane"))]
1185#[stable(feature = "wasm_simd", since = "1.54.0")]
1186pub fn i32x4_replace_lane<const N: usize>(a: v128, val: i32) -> v128 {
1187    static_assert!(N < 4);
1188    unsafe { simd_insert!(a.as_i32x4(), N as u32, val).v128() }
1189}
1190
1191#[inline]
1196#[target_feature(enable = "simd128")]
1197#[doc(alias("i32x4.replace_lane"))]
1198#[stable(feature = "wasm_simd", since = "1.54.0")]
1199pub fn u32x4_replace_lane<const N: usize>(a: v128, val: u32) -> v128 {
1200    i32x4_replace_lane::<N>(a, val as i32)
1201}
1202
1203#[inline]
1208#[cfg_attr(test, assert_instr(i64x2.extract_lane, N = 1))]
1209#[target_feature(enable = "simd128")]
1210#[doc(alias("i64x2.extract_lane"))]
1211#[stable(feature = "wasm_simd", since = "1.54.0")]
1212pub fn i64x2_extract_lane<const N: usize>(a: v128) -> i64 {
1213    static_assert!(N < 2);
1214    unsafe { simd_extract!(a.as_i64x2(), N as u32) }
1215}
1216
1217#[inline]
1222#[target_feature(enable = "simd128")]
1223#[doc(alias("i64x2.extract_lane"))]
1224#[stable(feature = "wasm_simd", since = "1.54.0")]
1225pub fn u64x2_extract_lane<const N: usize>(a: v128) -> u64 {
1226    i64x2_extract_lane::<N>(a) as u64
1227}
1228
1229#[inline]
1234#[cfg_attr(test, assert_instr(i64x2.replace_lane, N = 0))]
1235#[target_feature(enable = "simd128")]
1236#[doc(alias("i64x2.replace_lane"))]
1237#[stable(feature = "wasm_simd", since = "1.54.0")]
1238pub fn i64x2_replace_lane<const N: usize>(a: v128, val: i64) -> v128 {
1239    static_assert!(N < 2);
1240    unsafe { simd_insert!(a.as_i64x2(), N as u32, val).v128() }
1241}
1242
1243#[inline]
1248#[target_feature(enable = "simd128")]
1249#[doc(alias("i64x2.replace_lane"))]
1250#[stable(feature = "wasm_simd", since = "1.54.0")]
1251pub fn u64x2_replace_lane<const N: usize>(a: v128, val: u64) -> v128 {
1252    i64x2_replace_lane::<N>(a, val as i64)
1253}
1254
1255#[inline]
1260#[cfg_attr(test, assert_instr(f32x4.extract_lane, N = 1))]
1261#[target_feature(enable = "simd128")]
1262#[doc(alias("f32x4.extract_lane"))]
1263#[stable(feature = "wasm_simd", since = "1.54.0")]
1264pub fn f32x4_extract_lane<const N: usize>(a: v128) -> f32 {
1265    static_assert!(N < 4);
1266    unsafe { simd_extract!(a.as_f32x4(), N as u32) }
1267}
1268
1269#[inline]
1274#[cfg_attr(test, assert_instr(f32x4.replace_lane, N = 1))]
1275#[target_feature(enable = "simd128")]
1276#[doc(alias("f32x4.replace_lane"))]
1277#[stable(feature = "wasm_simd", since = "1.54.0")]
1278pub fn f32x4_replace_lane<const N: usize>(a: v128, val: f32) -> v128 {
1279    static_assert!(N < 4);
1280    unsafe { simd_insert!(a.as_f32x4(), N as u32, val).v128() }
1281}
1282
1283#[inline]
1288#[cfg_attr(test, assert_instr(f64x2.extract_lane, N = 1))]
1289#[target_feature(enable = "simd128")]
1290#[doc(alias("f64x2.extract_lane"))]
1291#[stable(feature = "wasm_simd", since = "1.54.0")]
1292pub fn f64x2_extract_lane<const N: usize>(a: v128) -> f64 {
1293    static_assert!(N < 2);
1294    unsafe { simd_extract!(a.as_f64x2(), N as u32) }
1295}
1296
1297#[inline]
1302#[cfg_attr(test, assert_instr(f64x2.replace_lane, N = 1))]
1303#[target_feature(enable = "simd128")]
1304#[doc(alias("f64x2.replace_lane"))]
1305#[stable(feature = "wasm_simd", since = "1.54.0")]
1306pub fn f64x2_replace_lane<const N: usize>(a: v128, val: f64) -> v128 {
1307    static_assert!(N < 2);
1308    unsafe { simd_insert!(a.as_f64x2(), N as u32, val).v128() }
1309}
1310
1311#[inline]
1317#[cfg_attr(test, assert_instr(i8x16.swizzle))]
1318#[target_feature(enable = "simd128")]
1319#[doc(alias("i8x16.swizzle"))]
1320#[stable(feature = "wasm_simd", since = "1.54.0")]
1321pub fn i8x16_swizzle(a: v128, s: v128) -> v128 {
1322    unsafe { llvm_swizzle(a.as_i8x16(), s.as_i8x16()).v128() }
1323}
1324
1325#[stable(feature = "wasm_simd", since = "1.54.0")]
1326pub use i8x16_swizzle as u8x16_swizzle;
1327
1328#[inline]
1332#[cfg_attr(test, assert_instr(i8x16.splat))]
1333#[target_feature(enable = "simd128")]
1334#[doc(alias("i8x16.splat"))]
1335#[stable(feature = "wasm_simd", since = "1.54.0")]
1336pub fn i8x16_splat(a: i8) -> v128 {
1337    simd::i8x16::splat(a).v128()
1338}
1339
1340#[inline]
1344#[cfg_attr(test, assert_instr(i8x16.splat))]
1345#[target_feature(enable = "simd128")]
1346#[doc(alias("i8x16.splat"))]
1347#[stable(feature = "wasm_simd", since = "1.54.0")]
1348pub fn u8x16_splat(a: u8) -> v128 {
1349    simd::u8x16::splat(a).v128()
1350}
1351
1352#[inline]
1356#[cfg_attr(test, assert_instr(i16x8.splat))]
1357#[target_feature(enable = "simd128")]
1358#[doc(alias("i16x8.splat"))]
1359#[stable(feature = "wasm_simd", since = "1.54.0")]
1360pub fn i16x8_splat(a: i16) -> v128 {
1361    simd::i16x8::splat(a).v128()
1362}
1363
1364#[inline]
1368#[cfg_attr(test, assert_instr(i16x8.splat))]
1369#[target_feature(enable = "simd128")]
1370#[doc(alias("i16x8.splat"))]
1371#[stable(feature = "wasm_simd", since = "1.54.0")]
1372pub fn u16x8_splat(a: u16) -> v128 {
1373    simd::u16x8::splat(a).v128()
1374}
1375
1376#[inline]
1380#[cfg_attr(test, assert_instr(i32x4.splat))]
1381#[target_feature(enable = "simd128")]
1382#[doc(alias("i32x4.splat"))]
1383#[stable(feature = "wasm_simd", since = "1.54.0")]
1384pub fn i32x4_splat(a: i32) -> v128 {
1385    simd::i32x4::splat(a).v128()
1386}
1387
1388#[inline]
1392#[target_feature(enable = "simd128")]
1393#[doc(alias("i32x4.splat"))]
1394#[stable(feature = "wasm_simd", since = "1.54.0")]
1395pub fn u32x4_splat(a: u32) -> v128 {
1396    i32x4_splat(a as i32)
1397}
1398
1399#[inline]
1403#[cfg_attr(test, assert_instr(i64x2.splat))]
1404#[target_feature(enable = "simd128")]
1405#[doc(alias("i64x2.splat"))]
1406#[stable(feature = "wasm_simd", since = "1.54.0")]
1407pub fn i64x2_splat(a: i64) -> v128 {
1408    simd::i64x2::splat(a).v128()
1409}
1410
1411#[inline]
1415#[target_feature(enable = "simd128")]
1416#[doc(alias("u64x2.splat"))]
1417#[stable(feature = "wasm_simd", since = "1.54.0")]
1418pub fn u64x2_splat(a: u64) -> v128 {
1419    i64x2_splat(a as i64)
1420}
1421
1422#[inline]
1426#[cfg_attr(test, assert_instr(f32x4.splat))]
1427#[target_feature(enable = "simd128")]
1428#[doc(alias("f32x4.splat"))]
1429#[stable(feature = "wasm_simd", since = "1.54.0")]
1430pub fn f32x4_splat(a: f32) -> v128 {
1431    simd::f32x4::splat(a).v128()
1432}
1433
1434#[inline]
1438#[cfg_attr(test, assert_instr(f64x2.splat))]
1439#[target_feature(enable = "simd128")]
1440#[doc(alias("f64x2.splat"))]
1441#[stable(feature = "wasm_simd", since = "1.54.0")]
1442pub fn f64x2_splat(a: f64) -> v128 {
1443    simd::f64x2::splat(a).v128()
1444}
1445
1446#[inline]
1452#[cfg_attr(test, assert_instr(i8x16.eq))]
1453#[target_feature(enable = "simd128")]
1454#[doc(alias("i8x16.eq"))]
1455#[stable(feature = "wasm_simd", since = "1.54.0")]
1456pub fn i8x16_eq(a: v128, b: v128) -> v128 {
1457    unsafe { simd_eq::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1458}
1459
1460#[inline]
1466#[cfg_attr(test, assert_instr(i8x16.ne))]
1467#[target_feature(enable = "simd128")]
1468#[doc(alias("i8x16.ne"))]
1469#[stable(feature = "wasm_simd", since = "1.54.0")]
1470pub fn i8x16_ne(a: v128, b: v128) -> v128 {
1471    unsafe { simd_ne::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1472}
1473
1474#[stable(feature = "wasm_simd", since = "1.54.0")]
1475pub use i8x16_eq as u8x16_eq;
1476#[stable(feature = "wasm_simd", since = "1.54.0")]
1477pub use i8x16_ne as u8x16_ne;
1478
1479#[inline]
1485#[cfg_attr(test, assert_instr(i8x16.lt_s))]
1486#[target_feature(enable = "simd128")]
1487#[doc(alias("i8x16.lt_s"))]
1488#[stable(feature = "wasm_simd", since = "1.54.0")]
1489pub fn i8x16_lt(a: v128, b: v128) -> v128 {
1490    unsafe { simd_lt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1491}
1492
1493#[inline]
1499#[cfg_attr(test, assert_instr(i8x16.lt_u))]
1500#[target_feature(enable = "simd128")]
1501#[doc(alias("i8x16.lt_u"))]
1502#[stable(feature = "wasm_simd", since = "1.54.0")]
1503pub fn u8x16_lt(a: v128, b: v128) -> v128 {
1504    unsafe { simd_lt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1505}
1506
1507#[inline]
1513#[cfg_attr(test, assert_instr(i8x16.gt_s))]
1514#[target_feature(enable = "simd128")]
1515#[doc(alias("i8x16.gt_s"))]
1516#[stable(feature = "wasm_simd", since = "1.54.0")]
1517pub fn i8x16_gt(a: v128, b: v128) -> v128 {
1518    unsafe { simd_gt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1519}
1520
1521#[inline]
1527#[cfg_attr(test, assert_instr(i8x16.gt_u))]
1528#[target_feature(enable = "simd128")]
1529#[doc(alias("i8x16.gt_u"))]
1530#[stable(feature = "wasm_simd", since = "1.54.0")]
1531pub fn u8x16_gt(a: v128, b: v128) -> v128 {
1532    unsafe { simd_gt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1533}
1534
1535#[inline]
1541#[cfg_attr(test, assert_instr(i8x16.le_s))]
1542#[target_feature(enable = "simd128")]
1543#[doc(alias("i8x16.le_s"))]
1544#[stable(feature = "wasm_simd", since = "1.54.0")]
1545pub fn i8x16_le(a: v128, b: v128) -> v128 {
1546    unsafe { simd_le::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1547}
1548
1549#[inline]
1555#[cfg_attr(test, assert_instr(i8x16.le_u))]
1556#[target_feature(enable = "simd128")]
1557#[doc(alias("i8x16.le_u"))]
1558#[stable(feature = "wasm_simd", since = "1.54.0")]
1559pub fn u8x16_le(a: v128, b: v128) -> v128 {
1560    unsafe { simd_le::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1561}
1562
1563#[inline]
1569#[cfg_attr(test, assert_instr(i8x16.ge_s))]
1570#[target_feature(enable = "simd128")]
1571#[doc(alias("i8x16.ge_s"))]
1572#[stable(feature = "wasm_simd", since = "1.54.0")]
1573pub fn i8x16_ge(a: v128, b: v128) -> v128 {
1574    unsafe { simd_ge::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1575}
1576
1577#[inline]
1583#[cfg_attr(test, assert_instr(i8x16.ge_u))]
1584#[target_feature(enable = "simd128")]
1585#[doc(alias("i8x16.ge_u"))]
1586#[stable(feature = "wasm_simd", since = "1.54.0")]
1587pub fn u8x16_ge(a: v128, b: v128) -> v128 {
1588    unsafe { simd_ge::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1589}
1590
1591#[inline]
1597#[cfg_attr(test, assert_instr(i16x8.eq))]
1598#[target_feature(enable = "simd128")]
1599#[doc(alias("i16x8.eq"))]
1600#[stable(feature = "wasm_simd", since = "1.54.0")]
1601pub fn i16x8_eq(a: v128, b: v128) -> v128 {
1602    unsafe { simd_eq::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1603}
1604
1605#[inline]
1611#[cfg_attr(test, assert_instr(i16x8.ne))]
1612#[target_feature(enable = "simd128")]
1613#[doc(alias("i16x8.ne"))]
1614#[stable(feature = "wasm_simd", since = "1.54.0")]
1615pub fn i16x8_ne(a: v128, b: v128) -> v128 {
1616    unsafe { simd_ne::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1617}
1618
1619#[stable(feature = "wasm_simd", since = "1.54.0")]
1620pub use i16x8_eq as u16x8_eq;
1621#[stable(feature = "wasm_simd", since = "1.54.0")]
1622pub use i16x8_ne as u16x8_ne;
1623
1624#[inline]
1630#[cfg_attr(test, assert_instr(i16x8.lt_s))]
1631#[target_feature(enable = "simd128")]
1632#[doc(alias("i16x8.lt_s"))]
1633#[stable(feature = "wasm_simd", since = "1.54.0")]
1634pub fn i16x8_lt(a: v128, b: v128) -> v128 {
1635    unsafe { simd_lt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1636}
1637
1638#[inline]
1644#[cfg_attr(test, assert_instr(i16x8.lt_u))]
1645#[target_feature(enable = "simd128")]
1646#[doc(alias("i16x8.lt_u"))]
1647#[stable(feature = "wasm_simd", since = "1.54.0")]
1648pub fn u16x8_lt(a: v128, b: v128) -> v128 {
1649    unsafe { simd_lt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1650}
1651
1652#[inline]
1658#[cfg_attr(test, assert_instr(i16x8.gt_s))]
1659#[target_feature(enable = "simd128")]
1660#[doc(alias("i16x8.gt_s"))]
1661#[stable(feature = "wasm_simd", since = "1.54.0")]
1662pub fn i16x8_gt(a: v128, b: v128) -> v128 {
1663    unsafe { simd_gt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1664}
1665
1666#[inline]
1672#[cfg_attr(test, assert_instr(i16x8.gt_u))]
1673#[target_feature(enable = "simd128")]
1674#[doc(alias("i16x8.gt_u"))]
1675#[stable(feature = "wasm_simd", since = "1.54.0")]
1676pub fn u16x8_gt(a: v128, b: v128) -> v128 {
1677    unsafe { simd_gt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1678}
1679
1680#[inline]
1686#[cfg_attr(test, assert_instr(i16x8.le_s))]
1687#[target_feature(enable = "simd128")]
1688#[doc(alias("i16x8.le_s"))]
1689#[stable(feature = "wasm_simd", since = "1.54.0")]
1690pub fn i16x8_le(a: v128, b: v128) -> v128 {
1691    unsafe { simd_le::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1692}
1693
1694#[inline]
1700#[cfg_attr(test, assert_instr(i16x8.le_u))]
1701#[target_feature(enable = "simd128")]
1702#[doc(alias("i16x8.le_u"))]
1703#[stable(feature = "wasm_simd", since = "1.54.0")]
1704pub fn u16x8_le(a: v128, b: v128) -> v128 {
1705    unsafe { simd_le::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1706}
1707
1708#[inline]
1714#[cfg_attr(test, assert_instr(i16x8.ge_s))]
1715#[target_feature(enable = "simd128")]
1716#[doc(alias("i16x8.ge_s"))]
1717#[stable(feature = "wasm_simd", since = "1.54.0")]
1718pub fn i16x8_ge(a: v128, b: v128) -> v128 {
1719    unsafe { simd_ge::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1720}
1721
1722#[inline]
1728#[cfg_attr(test, assert_instr(i16x8.ge_u))]
1729#[target_feature(enable = "simd128")]
1730#[doc(alias("i16x8.ge_u"))]
1731#[stable(feature = "wasm_simd", since = "1.54.0")]
1732pub fn u16x8_ge(a: v128, b: v128) -> v128 {
1733    unsafe { simd_ge::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1734}
1735
1736#[inline]
1742#[cfg_attr(test, assert_instr(i32x4.eq))]
1743#[target_feature(enable = "simd128")]
1744#[doc(alias("i32x4.eq"))]
1745#[stable(feature = "wasm_simd", since = "1.54.0")]
1746pub fn i32x4_eq(a: v128, b: v128) -> v128 {
1747    unsafe { simd_eq::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1748}
1749
1750#[inline]
1756#[cfg_attr(test, assert_instr(i32x4.ne))]
1757#[target_feature(enable = "simd128")]
1758#[doc(alias("i32x4.ne"))]
1759#[stable(feature = "wasm_simd", since = "1.54.0")]
1760pub fn i32x4_ne(a: v128, b: v128) -> v128 {
1761    unsafe { simd_ne::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1762}
1763
1764#[stable(feature = "wasm_simd", since = "1.54.0")]
1765pub use i32x4_eq as u32x4_eq;
1766#[stable(feature = "wasm_simd", since = "1.54.0")]
1767pub use i32x4_ne as u32x4_ne;
1768
1769#[inline]
1775#[cfg_attr(test, assert_instr(i32x4.lt_s))]
1776#[target_feature(enable = "simd128")]
1777#[doc(alias("i32x4.lt_s"))]
1778#[stable(feature = "wasm_simd", since = "1.54.0")]
1779pub fn i32x4_lt(a: v128, b: v128) -> v128 {
1780    unsafe { simd_lt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1781}
1782
1783#[inline]
1789#[cfg_attr(test, assert_instr(i32x4.lt_u))]
1790#[target_feature(enable = "simd128")]
1791#[doc(alias("i32x4.lt_u"))]
1792#[stable(feature = "wasm_simd", since = "1.54.0")]
1793pub fn u32x4_lt(a: v128, b: v128) -> v128 {
1794    unsafe { simd_lt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1795}
1796
1797#[inline]
1803#[cfg_attr(test, assert_instr(i32x4.gt_s))]
1804#[target_feature(enable = "simd128")]
1805#[doc(alias("i32x4.gt_s"))]
1806#[stable(feature = "wasm_simd", since = "1.54.0")]
1807pub fn i32x4_gt(a: v128, b: v128) -> v128 {
1808    unsafe { simd_gt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1809}
1810
1811#[inline]
1817#[cfg_attr(test, assert_instr(i32x4.gt_u))]
1818#[target_feature(enable = "simd128")]
1819#[doc(alias("i32x4.gt_u"))]
1820#[stable(feature = "wasm_simd", since = "1.54.0")]
1821pub fn u32x4_gt(a: v128, b: v128) -> v128 {
1822    unsafe { simd_gt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1823}
1824
1825#[inline]
1831#[cfg_attr(test, assert_instr(i32x4.le_s))]
1832#[target_feature(enable = "simd128")]
1833#[doc(alias("i32x4.le_s"))]
1834#[stable(feature = "wasm_simd", since = "1.54.0")]
1835pub fn i32x4_le(a: v128, b: v128) -> v128 {
1836    unsafe { simd_le::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1837}
1838
1839#[inline]
1845#[cfg_attr(test, assert_instr(i32x4.le_u))]
1846#[target_feature(enable = "simd128")]
1847#[doc(alias("i32x4.le_u"))]
1848#[stable(feature = "wasm_simd", since = "1.54.0")]
1849pub fn u32x4_le(a: v128, b: v128) -> v128 {
1850    unsafe { simd_le::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1851}
1852
1853#[inline]
1859#[cfg_attr(test, assert_instr(i32x4.ge_s))]
1860#[target_feature(enable = "simd128")]
1861#[doc(alias("i32x4.ge_s"))]
1862#[stable(feature = "wasm_simd", since = "1.54.0")]
1863pub fn i32x4_ge(a: v128, b: v128) -> v128 {
1864    unsafe { simd_ge::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1865}
1866
1867#[inline]
1873#[cfg_attr(test, assert_instr(i32x4.ge_u))]
1874#[target_feature(enable = "simd128")]
1875#[doc(alias("i32x4.ge_u"))]
1876#[stable(feature = "wasm_simd", since = "1.54.0")]
1877pub fn u32x4_ge(a: v128, b: v128) -> v128 {
1878    unsafe { simd_ge::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1879}
1880
1881#[inline]
1887#[cfg_attr(test, assert_instr(i64x2.eq))]
1888#[target_feature(enable = "simd128")]
1889#[doc(alias("i64x2.eq"))]
1890#[stable(feature = "wasm_simd", since = "1.54.0")]
1891pub fn i64x2_eq(a: v128, b: v128) -> v128 {
1892    unsafe { simd_eq::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1893}
1894
1895#[inline]
1901#[cfg_attr(test, assert_instr(i64x2.ne))]
1902#[target_feature(enable = "simd128")]
1903#[doc(alias("i64x2.ne"))]
1904#[stable(feature = "wasm_simd", since = "1.54.0")]
1905pub fn i64x2_ne(a: v128, b: v128) -> v128 {
1906    unsafe { simd_ne::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1907}
1908
1909#[stable(feature = "wasm_simd", since = "1.54.0")]
1910pub use i64x2_eq as u64x2_eq;
1911#[stable(feature = "wasm_simd", since = "1.54.0")]
1912pub use i64x2_ne as u64x2_ne;
1913
1914#[inline]
1920#[cfg_attr(test, assert_instr(i64x2.lt_s))]
1921#[target_feature(enable = "simd128")]
1922#[doc(alias("i64x2.lt_s"))]
1923#[stable(feature = "wasm_simd", since = "1.54.0")]
1924pub fn i64x2_lt(a: v128, b: v128) -> v128 {
1925    unsafe { simd_lt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1926}
1927
1928#[inline]
1934#[cfg_attr(test, assert_instr(i64x2.gt_s))]
1935#[target_feature(enable = "simd128")]
1936#[doc(alias("i64x2.gt_s"))]
1937#[stable(feature = "wasm_simd", since = "1.54.0")]
1938pub fn i64x2_gt(a: v128, b: v128) -> v128 {
1939    unsafe { simd_gt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1940}
1941
1942#[inline]
1948#[cfg_attr(test, assert_instr(i64x2.le_s))]
1949#[target_feature(enable = "simd128")]
1950#[doc(alias("i64x2.le_s"))]
1951#[stable(feature = "wasm_simd", since = "1.54.0")]
1952pub fn i64x2_le(a: v128, b: v128) -> v128 {
1953    unsafe { simd_le::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1954}
1955
1956#[inline]
1962#[cfg_attr(test, assert_instr(i64x2.ge_s))]
1963#[target_feature(enable = "simd128")]
1964#[doc(alias("i64x2.ge_s"))]
1965#[stable(feature = "wasm_simd", since = "1.54.0")]
1966pub fn i64x2_ge(a: v128, b: v128) -> v128 {
1967    unsafe { simd_ge::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1968}
1969
1970#[inline]
1976#[cfg_attr(test, assert_instr(f32x4.eq))]
1977#[target_feature(enable = "simd128")]
1978#[doc(alias("f32x4.eq"))]
1979#[stable(feature = "wasm_simd", since = "1.54.0")]
1980pub fn f32x4_eq(a: v128, b: v128) -> v128 {
1981    unsafe { simd_eq::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
1982}
1983
1984#[inline]
1990#[cfg_attr(test, assert_instr(f32x4.ne))]
1991#[target_feature(enable = "simd128")]
1992#[doc(alias("f32x4.ne"))]
1993#[stable(feature = "wasm_simd", since = "1.54.0")]
1994pub fn f32x4_ne(a: v128, b: v128) -> v128 {
1995    unsafe { simd_ne::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
1996}
1997
1998#[inline]
2004#[cfg_attr(test, assert_instr(f32x4.lt))]
2005#[target_feature(enable = "simd128")]
2006#[doc(alias("f32x4.lt"))]
2007#[stable(feature = "wasm_simd", since = "1.54.0")]
2008pub fn f32x4_lt(a: v128, b: v128) -> v128 {
2009    unsafe { simd_lt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2010}
2011
2012#[inline]
2018#[cfg_attr(test, assert_instr(f32x4.gt))]
2019#[target_feature(enable = "simd128")]
2020#[doc(alias("f32x4.gt"))]
2021#[stable(feature = "wasm_simd", since = "1.54.0")]
2022pub fn f32x4_gt(a: v128, b: v128) -> v128 {
2023    unsafe { simd_gt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2024}
2025
2026#[inline]
2032#[cfg_attr(test, assert_instr(f32x4.le))]
2033#[target_feature(enable = "simd128")]
2034#[doc(alias("f32x4.le"))]
2035#[stable(feature = "wasm_simd", since = "1.54.0")]
2036pub fn f32x4_le(a: v128, b: v128) -> v128 {
2037    unsafe { simd_le::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2038}
2039
2040#[inline]
2046#[cfg_attr(test, assert_instr(f32x4.ge))]
2047#[target_feature(enable = "simd128")]
2048#[doc(alias("f32x4.ge"))]
2049#[stable(feature = "wasm_simd", since = "1.54.0")]
2050pub fn f32x4_ge(a: v128, b: v128) -> v128 {
2051    unsafe { simd_ge::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2052}
2053
2054#[inline]
2060#[cfg_attr(test, assert_instr(f64x2.eq))]
2061#[target_feature(enable = "simd128")]
2062#[doc(alias("f64x2.eq"))]
2063#[stable(feature = "wasm_simd", since = "1.54.0")]
2064pub fn f64x2_eq(a: v128, b: v128) -> v128 {
2065    unsafe { simd_eq::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2066}
2067
2068#[inline]
2074#[cfg_attr(test, assert_instr(f64x2.ne))]
2075#[target_feature(enable = "simd128")]
2076#[doc(alias("f64x2.ne"))]
2077#[stable(feature = "wasm_simd", since = "1.54.0")]
2078pub fn f64x2_ne(a: v128, b: v128) -> v128 {
2079    unsafe { simd_ne::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2080}
2081
2082#[inline]
2088#[cfg_attr(test, assert_instr(f64x2.lt))]
2089#[target_feature(enable = "simd128")]
2090#[doc(alias("f64x2.lt"))]
2091#[stable(feature = "wasm_simd", since = "1.54.0")]
2092pub fn f64x2_lt(a: v128, b: v128) -> v128 {
2093    unsafe { simd_lt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2094}
2095
2096#[inline]
2102#[cfg_attr(test, assert_instr(f64x2.gt))]
2103#[target_feature(enable = "simd128")]
2104#[doc(alias("f64x2.gt"))]
2105#[stable(feature = "wasm_simd", since = "1.54.0")]
2106pub fn f64x2_gt(a: v128, b: v128) -> v128 {
2107    unsafe { simd_gt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2108}
2109
2110#[inline]
2116#[cfg_attr(test, assert_instr(f64x2.le))]
2117#[target_feature(enable = "simd128")]
2118#[doc(alias("f64x2.le"))]
2119#[stable(feature = "wasm_simd", since = "1.54.0")]
2120pub fn f64x2_le(a: v128, b: v128) -> v128 {
2121    unsafe { simd_le::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2122}
2123
2124#[inline]
2130#[cfg_attr(test, assert_instr(f64x2.ge))]
2131#[target_feature(enable = "simd128")]
2132#[doc(alias("f64x2.ge"))]
2133#[stable(feature = "wasm_simd", since = "1.54.0")]
2134pub fn f64x2_ge(a: v128, b: v128) -> v128 {
2135    unsafe { simd_ge::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2136}
2137
2138#[inline]
2140#[cfg_attr(test, assert_instr(v128.not))]
2141#[target_feature(enable = "simd128")]
2142#[doc(alias("v128.not"))]
2143#[stable(feature = "wasm_simd", since = "1.54.0")]
2144pub fn v128_not(a: v128) -> v128 {
2145    unsafe { simd_xor(a.as_i64x2(), simd::i64x2::new(!0, !0)).v128() }
2146}
2147
2148#[inline]
2151#[cfg_attr(test, assert_instr(v128.and))]
2152#[target_feature(enable = "simd128")]
2153#[doc(alias("v128.and"))]
2154#[stable(feature = "wasm_simd", since = "1.54.0")]
2155pub fn v128_and(a: v128, b: v128) -> v128 {
2156    unsafe { simd_and(a.as_i64x2(), b.as_i64x2()).v128() }
2157}
2158
2159#[inline]
2163#[cfg_attr(test, assert_instr(v128.andnot))]
2164#[target_feature(enable = "simd128")]
2165#[doc(alias("v128.andnot"))]
2166#[stable(feature = "wasm_simd", since = "1.54.0")]
2167pub fn v128_andnot(a: v128, b: v128) -> v128 {
2168    unsafe {
2169        simd_and(
2170            a.as_i64x2(),
2171            simd_xor(b.as_i64x2(), simd::i64x2::new(-1, -1)),
2172        )
2173        .v128()
2174    }
2175}
2176
2177#[inline]
2180#[cfg_attr(test, assert_instr(v128.or))]
2181#[target_feature(enable = "simd128")]
2182#[doc(alias("v128.or"))]
2183#[stable(feature = "wasm_simd", since = "1.54.0")]
2184pub fn v128_or(a: v128, b: v128) -> v128 {
2185    unsafe { simd_or(a.as_i64x2(), b.as_i64x2()).v128() }
2186}
2187
2188#[inline]
2191#[cfg_attr(test, assert_instr(v128.xor))]
2192#[target_feature(enable = "simd128")]
2193#[doc(alias("v128.xor"))]
2194#[stable(feature = "wasm_simd", since = "1.54.0")]
2195pub fn v128_xor(a: v128, b: v128) -> v128 {
2196    unsafe { simd_xor(a.as_i64x2(), b.as_i64x2()).v128() }
2197}
2198
2199#[inline]
2201#[cfg_attr(test, assert_instr(v128.bitselect))]
2202#[target_feature(enable = "simd128")]
2203#[doc(alias("v128.bitselect"))]
2204#[stable(feature = "wasm_simd", since = "1.54.0")]
2205pub fn v128_bitselect(v1: v128, v2: v128, c: v128) -> v128 {
2206    unsafe { llvm_bitselect(v1.as_i8x16(), v2.as_i8x16(), c.as_i8x16()).v128() }
2207}
2208
2209#[inline]
2211#[cfg_attr(test, assert_instr(v128.any_true))]
2212#[target_feature(enable = "simd128")]
2213#[doc(alias("v128.any_true"))]
2214#[stable(feature = "wasm_simd", since = "1.54.0")]
2215pub fn v128_any_true(a: v128) -> bool {
2216    unsafe { llvm_any_true_i8x16(a.as_i8x16()) != 0 }
2217}
2218
2219#[inline]
2221#[cfg_attr(test, assert_instr(i8x16.abs))]
2222#[target_feature(enable = "simd128")]
2223#[doc(alias("i8x16.abs"))]
2224#[stable(feature = "wasm_simd", since = "1.54.0")]
2225pub fn i8x16_abs(a: v128) -> v128 {
2226    unsafe {
2227        let a = a.as_i8x16();
2228        let zero = simd::i8x16::ZERO;
2229        simd_select::<simd::m8x16, simd::i8x16>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
2230    }
2231}
2232
2233#[inline]
2235#[cfg_attr(test, assert_instr(i8x16.neg))]
2236#[target_feature(enable = "simd128")]
2237#[doc(alias("i8x16.neg"))]
2238#[stable(feature = "wasm_simd", since = "1.54.0")]
2239pub fn i8x16_neg(a: v128) -> v128 {
2240    unsafe { simd_mul(a.as_i8x16(), simd::i8x16::splat(-1)).v128() }
2241}
2242
2243#[inline]
2245#[cfg_attr(test, assert_instr(i8x16.popcnt))]
2246#[target_feature(enable = "simd128")]
2247#[doc(alias("i8x16.popcnt"))]
2248#[stable(feature = "wasm_simd", since = "1.54.0")]
2249pub fn i8x16_popcnt(v: v128) -> v128 {
2250    unsafe { simd_ctpop(v.as_i8x16()).v128() }
2251}
2252
2253#[stable(feature = "wasm_simd", since = "1.54.0")]
2254pub use i8x16_popcnt as u8x16_popcnt;
2255
2256#[inline]
2258#[cfg_attr(test, assert_instr(i8x16.all_true))]
2259#[target_feature(enable = "simd128")]
2260#[doc(alias("i8x16.all_true"))]
2261#[stable(feature = "wasm_simd", since = "1.54.0")]
2262pub fn i8x16_all_true(a: v128) -> bool {
2263    unsafe { llvm_i8x16_all_true(a.as_i8x16()) != 0 }
2264}
2265
2266#[stable(feature = "wasm_simd", since = "1.54.0")]
2267pub use i8x16_all_true as u8x16_all_true;
2268
2269#[inline]
2272#[cfg_attr(test, assert_instr(i8x16.bitmask))]
2273#[target_feature(enable = "simd128")]
2274#[doc(alias("i8x16.bitmask"))]
2275#[stable(feature = "wasm_simd", since = "1.54.0")]
2276pub fn i8x16_bitmask(a: v128) -> u16 {
2277    unsafe { llvm_bitmask_i8x16(a.as_i8x16()) as u16 }
2278}
2279
2280#[stable(feature = "wasm_simd", since = "1.54.0")]
2281pub use i8x16_bitmask as u8x16_bitmask;
2282
2283#[inline]
2289#[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_s))]
2290#[target_feature(enable = "simd128")]
2291#[doc(alias("i8x16.narrow_i16x8_s"))]
2292#[stable(feature = "wasm_simd", since = "1.54.0")]
2293pub fn i8x16_narrow_i16x8(a: v128, b: v128) -> v128 {
2294    unsafe { llvm_narrow_i8x16_s(a.as_i16x8(), b.as_i16x8()).v128() }
2295}
2296
2297#[inline]
2303#[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_u))]
2304#[target_feature(enable = "simd128")]
2305#[doc(alias("i8x16.narrow_i16x8_u"))]
2306#[stable(feature = "wasm_simd", since = "1.54.0")]
2307pub fn u8x16_narrow_i16x8(a: v128, b: v128) -> v128 {
2308    unsafe { llvm_narrow_i8x16_u(a.as_i16x8(), b.as_i16x8()).v128() }
2309}
2310
2311#[inline]
2316#[cfg_attr(test, assert_instr(i8x16.shl))]
2317#[target_feature(enable = "simd128")]
2318#[doc(alias("i8x16.shl"))]
2319#[stable(feature = "wasm_simd", since = "1.54.0")]
2320pub fn i8x16_shl(a: v128, amt: u32) -> v128 {
2321    unsafe { simd_shl(a.as_i8x16(), simd::i8x16::splat(amt as i8)).v128() }
2322}
2323
2324#[stable(feature = "wasm_simd", since = "1.54.0")]
2325pub use i8x16_shl as u8x16_shl;
2326
2327#[inline]
2333#[cfg_attr(test, assert_instr(i8x16.shr_s))]
2334#[target_feature(enable = "simd128")]
2335#[doc(alias("i8x16.shr_s"))]
2336#[stable(feature = "wasm_simd", since = "1.54.0")]
2337pub fn i8x16_shr(a: v128, amt: u32) -> v128 {
2338    unsafe { simd_shr(a.as_i8x16(), simd::i8x16::splat(amt as i8)).v128() }
2339}
2340
2341#[inline]
2347#[cfg_attr(test, assert_instr(i8x16.shr_u))]
2348#[target_feature(enable = "simd128")]
2349#[doc(alias("i8x16.shr_u"))]
2350#[stable(feature = "wasm_simd", since = "1.54.0")]
2351pub fn u8x16_shr(a: v128, amt: u32) -> v128 {
2352    unsafe { simd_shr(a.as_u8x16(), simd::u8x16::splat(amt as u8)).v128() }
2353}
2354
2355#[inline]
2357#[cfg_attr(test, assert_instr(i8x16.add))]
2358#[target_feature(enable = "simd128")]
2359#[doc(alias("i8x16.add"))]
2360#[stable(feature = "wasm_simd", since = "1.54.0")]
2361pub fn i8x16_add(a: v128, b: v128) -> v128 {
2362    unsafe { simd_add(a.as_i8x16(), b.as_i8x16()).v128() }
2363}
2364
2365#[stable(feature = "wasm_simd", since = "1.54.0")]
2366pub use i8x16_add as u8x16_add;
2367
2368#[inline]
2371#[cfg_attr(test, assert_instr(i8x16.add_sat_s))]
2372#[target_feature(enable = "simd128")]
2373#[doc(alias("i8x16.add_sat_s"))]
2374#[stable(feature = "wasm_simd", since = "1.54.0")]
2375pub fn i8x16_add_sat(a: v128, b: v128) -> v128 {
2376    unsafe { simd_saturating_add(a.as_i8x16(), b.as_i8x16()).v128() }
2377}
2378
2379#[inline]
2382#[cfg_attr(test, assert_instr(i8x16.add_sat_u))]
2383#[target_feature(enable = "simd128")]
2384#[doc(alias("i8x16.add_sat_u"))]
2385#[stable(feature = "wasm_simd", since = "1.54.0")]
2386pub fn u8x16_add_sat(a: v128, b: v128) -> v128 {
2387    unsafe { simd_saturating_add(a.as_u8x16(), b.as_u8x16()).v128() }
2388}
2389
2390#[inline]
2392#[cfg_attr(test, assert_instr(i8x16.sub))]
2393#[target_feature(enable = "simd128")]
2394#[doc(alias("i8x16.sub"))]
2395#[stable(feature = "wasm_simd", since = "1.54.0")]
2396pub fn i8x16_sub(a: v128, b: v128) -> v128 {
2397    unsafe { simd_sub(a.as_i8x16(), b.as_i8x16()).v128() }
2398}
2399
2400#[stable(feature = "wasm_simd", since = "1.54.0")]
2401pub use i8x16_sub as u8x16_sub;
2402
2403#[inline]
2406#[cfg_attr(test, assert_instr(i8x16.sub_sat_s))]
2407#[target_feature(enable = "simd128")]
2408#[doc(alias("i8x16.sub_sat_s"))]
2409#[stable(feature = "wasm_simd", since = "1.54.0")]
2410pub fn i8x16_sub_sat(a: v128, b: v128) -> v128 {
2411    unsafe { simd_saturating_sub(a.as_i8x16(), b.as_i8x16()).v128() }
2412}
2413
2414#[inline]
2417#[cfg_attr(test, assert_instr(i8x16.sub_sat_u))]
2418#[target_feature(enable = "simd128")]
2419#[doc(alias("i8x16.sub_sat_u"))]
2420#[stable(feature = "wasm_simd", since = "1.54.0")]
2421pub fn u8x16_sub_sat(a: v128, b: v128) -> v128 {
2422    unsafe { simd_saturating_sub(a.as_u8x16(), b.as_u8x16()).v128() }
2423}
2424
2425#[inline]
2428#[cfg_attr(test, assert_instr(i8x16.min_s))]
2429#[target_feature(enable = "simd128")]
2430#[doc(alias("i8x16.min_s"))]
2431#[stable(feature = "wasm_simd", since = "1.54.0")]
2432pub fn i8x16_min(a: v128, b: v128) -> v128 {
2433    let a = a.as_i8x16();
2434    let b = b.as_i8x16();
2435    unsafe { simd_select::<simd::i8x16, _>(simd_lt(a, b), a, b).v128() }
2436}
2437
2438#[inline]
2441#[cfg_attr(test, assert_instr(i8x16.min_u))]
2442#[target_feature(enable = "simd128")]
2443#[doc(alias("i8x16.min_u"))]
2444#[stable(feature = "wasm_simd", since = "1.54.0")]
2445pub fn u8x16_min(a: v128, b: v128) -> v128 {
2446    let a = a.as_u8x16();
2447    let b = b.as_u8x16();
2448    unsafe { simd_select::<simd::i8x16, _>(simd_lt(a, b), a, b).v128() }
2449}
2450
2451#[inline]
2454#[cfg_attr(test, assert_instr(i8x16.max_s))]
2455#[target_feature(enable = "simd128")]
2456#[doc(alias("i8x16.max_s"))]
2457#[stable(feature = "wasm_simd", since = "1.54.0")]
2458pub fn i8x16_max(a: v128, b: v128) -> v128 {
2459    let a = a.as_i8x16();
2460    let b = b.as_i8x16();
2461    unsafe { simd_select::<simd::i8x16, _>(simd_gt(a, b), a, b).v128() }
2462}
2463
2464#[inline]
2467#[cfg_attr(test, assert_instr(i8x16.max_u))]
2468#[target_feature(enable = "simd128")]
2469#[doc(alias("i8x16.max_u"))]
2470#[stable(feature = "wasm_simd", since = "1.54.0")]
2471pub fn u8x16_max(a: v128, b: v128) -> v128 {
2472    let a = a.as_u8x16();
2473    let b = b.as_u8x16();
2474    unsafe { simd_select::<simd::i8x16, _>(simd_gt(a, b), a, b).v128() }
2475}
2476
2477#[inline]
2479#[cfg_attr(test, assert_instr(i8x16.avgr_u))]
2480#[target_feature(enable = "simd128")]
2481#[doc(alias("i8x16.avgr_u"))]
2482#[stable(feature = "wasm_simd", since = "1.54.0")]
2483pub fn u8x16_avgr(a: v128, b: v128) -> v128 {
2484    unsafe { llvm_avgr_u_i8x16(a.as_i8x16(), b.as_i8x16()).v128() }
2485}
2486
2487#[inline]
2490#[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_s))]
2491#[target_feature(enable = "simd128")]
2492#[doc(alias("i16x8.extadd_pairwise_i8x16_s"))]
2493#[stable(feature = "wasm_simd", since = "1.54.0")]
2494pub fn i16x8_extadd_pairwise_i8x16(a: v128) -> v128 {
2495    unsafe { llvm_i16x8_extadd_pairwise_i8x16_s(a.as_i8x16()).v128() }
2496}
2497
2498#[inline]
2501#[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_u))]
2502#[target_feature(enable = "simd128")]
2503#[doc(alias("i16x8.extadd_pairwise_i8x16_u"))]
2504#[stable(feature = "wasm_simd", since = "1.54.0")]
2505pub fn i16x8_extadd_pairwise_u8x16(a: v128) -> v128 {
2506    unsafe { llvm_i16x8_extadd_pairwise_i8x16_u(a.as_i8x16()).v128() }
2507}
2508
2509#[stable(feature = "wasm_simd", since = "1.54.0")]
2510pub use i16x8_extadd_pairwise_u8x16 as u16x8_extadd_pairwise_u8x16;
2511
2512#[inline]
2514#[cfg_attr(test, assert_instr(i16x8.abs))]
2515#[target_feature(enable = "simd128")]
2516#[doc(alias("i16x8.abs"))]
2517#[stable(feature = "wasm_simd", since = "1.54.0")]
2518pub fn i16x8_abs(a: v128) -> v128 {
2519    let a = a.as_i16x8();
2520    let zero = simd::i16x8::ZERO;
2521    unsafe {
2522        simd_select::<simd::m16x8, simd::i16x8>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
2523    }
2524}
2525
2526#[inline]
2528#[cfg_attr(test, assert_instr(i16x8.neg))]
2529#[target_feature(enable = "simd128")]
2530#[doc(alias("i16x8.neg"))]
2531#[stable(feature = "wasm_simd", since = "1.54.0")]
2532pub fn i16x8_neg(a: v128) -> v128 {
2533    unsafe { simd_mul(a.as_i16x8(), simd::i16x8::splat(-1)).v128() }
2534}
2535
2536#[inline]
2538#[cfg_attr(test, assert_instr(i16x8.q15mulr_sat_s))]
2539#[target_feature(enable = "simd128")]
2540#[doc(alias("i16x8.q15mulr_sat_s"))]
2541#[stable(feature = "wasm_simd", since = "1.54.0")]
2542pub fn i16x8_q15mulr_sat(a: v128, b: v128) -> v128 {
2543    unsafe { llvm_q15mulr(a.as_i16x8(), b.as_i16x8()).v128() }
2544}
2545
2546#[inline]
2548#[cfg_attr(test, assert_instr(i16x8.all_true))]
2549#[target_feature(enable = "simd128")]
2550#[doc(alias("i16x8.all_true"))]
2551#[stable(feature = "wasm_simd", since = "1.54.0")]
2552pub fn i16x8_all_true(a: v128) -> bool {
2553    unsafe { llvm_i16x8_all_true(a.as_i16x8()) != 0 }
2554}
2555
2556#[stable(feature = "wasm_simd", since = "1.54.0")]
2557pub use i16x8_all_true as u16x8_all_true;
2558
2559#[inline]
2562#[cfg_attr(test, assert_instr(i16x8.bitmask))]
2563#[target_feature(enable = "simd128")]
2564#[doc(alias("i16x8.bitmask"))]
2565#[stable(feature = "wasm_simd", since = "1.54.0")]
2566pub fn i16x8_bitmask(a: v128) -> u8 {
2567    unsafe { llvm_bitmask_i16x8(a.as_i16x8()) as u8 }
2568}
2569
2570#[stable(feature = "wasm_simd", since = "1.54.0")]
2571pub use i16x8_bitmask as u16x8_bitmask;
2572
2573#[inline]
2579#[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_s))]
2580#[target_feature(enable = "simd128")]
2581#[doc(alias("i16x8.narrow_i32x4_s"))]
2582#[stable(feature = "wasm_simd", since = "1.54.0")]
2583pub fn i16x8_narrow_i32x4(a: v128, b: v128) -> v128 {
2584    unsafe { llvm_narrow_i16x8_s(a.as_i32x4(), b.as_i32x4()).v128() }
2585}
2586
2587#[inline]
2593#[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_u))]
2594#[target_feature(enable = "simd128")]
2595#[doc(alias("i16x8.narrow_i32x4_u"))]
2596#[stable(feature = "wasm_simd", since = "1.54.0")]
2597pub fn u16x8_narrow_i32x4(a: v128, b: v128) -> v128 {
2598    unsafe { llvm_narrow_i16x8_u(a.as_i32x4(), b.as_i32x4()).v128() }
2599}
2600
2601#[inline]
2604#[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_s))]
2605#[target_feature(enable = "simd128")]
2606#[doc(alias("i16x8.extend_low_i8x16_s"))]
2607#[stable(feature = "wasm_simd", since = "1.54.0")]
2608pub fn i16x8_extend_low_i8x16(a: v128) -> v128 {
2609    unsafe {
2610        simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2611            a.as_i8x16(),
2612            a.as_i8x16(),
2613            [0, 1, 2, 3, 4, 5, 6, 7],
2614        ))
2615        .v128()
2616    }
2617}
2618
2619#[inline]
2622#[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_s))]
2623#[target_feature(enable = "simd128")]
2624#[doc(alias("i16x8.extend_high_i8x16_s"))]
2625#[stable(feature = "wasm_simd", since = "1.54.0")]
2626pub fn i16x8_extend_high_i8x16(a: v128) -> v128 {
2627    unsafe {
2628        simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2629            a.as_i8x16(),
2630            a.as_i8x16(),
2631            [8, 9, 10, 11, 12, 13, 14, 15],
2632        ))
2633        .v128()
2634    }
2635}
2636
2637#[inline]
2640#[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_u))]
2641#[target_feature(enable = "simd128")]
2642#[doc(alias("i16x8.extend_low_i8x16_u"))]
2643#[stable(feature = "wasm_simd", since = "1.54.0")]
2644pub fn i16x8_extend_low_u8x16(a: v128) -> v128 {
2645    unsafe {
2646        simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2647            a.as_u8x16(),
2648            a.as_u8x16(),
2649            [0, 1, 2, 3, 4, 5, 6, 7],
2650        ))
2651        .v128()
2652    }
2653}
2654
2655#[stable(feature = "wasm_simd", since = "1.54.0")]
2656pub use i16x8_extend_low_u8x16 as u16x8_extend_low_u8x16;
2657
2658#[inline]
2661#[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_u))]
2662#[target_feature(enable = "simd128")]
2663#[doc(alias("i16x8.extend_high_i8x16_u"))]
2664#[stable(feature = "wasm_simd", since = "1.54.0")]
2665pub fn i16x8_extend_high_u8x16(a: v128) -> v128 {
2666    unsafe {
2667        simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2668            a.as_u8x16(),
2669            a.as_u8x16(),
2670            [8, 9, 10, 11, 12, 13, 14, 15],
2671        ))
2672        .v128()
2673    }
2674}
2675
2676#[stable(feature = "wasm_simd", since = "1.54.0")]
2677pub use i16x8_extend_high_u8x16 as u16x8_extend_high_u8x16;
2678
2679#[inline]
2684#[cfg_attr(test, assert_instr(i16x8.shl))]
2685#[target_feature(enable = "simd128")]
2686#[doc(alias("i16x8.shl"))]
2687#[stable(feature = "wasm_simd", since = "1.54.0")]
2688pub fn i16x8_shl(a: v128, amt: u32) -> v128 {
2689    unsafe { simd_shl(a.as_i16x8(), simd::i16x8::splat(amt as i16)).v128() }
2690}
2691
2692#[stable(feature = "wasm_simd", since = "1.54.0")]
2693pub use i16x8_shl as u16x8_shl;
2694
2695#[inline]
2701#[cfg_attr(test, assert_instr(i16x8.shr_s))]
2702#[target_feature(enable = "simd128")]
2703#[doc(alias("i16x8.shr_s"))]
2704#[stable(feature = "wasm_simd", since = "1.54.0")]
2705pub fn i16x8_shr(a: v128, amt: u32) -> v128 {
2706    unsafe { simd_shr(a.as_i16x8(), simd::i16x8::splat(amt as i16)).v128() }
2707}
2708
2709#[inline]
2715#[cfg_attr(test, assert_instr(i16x8.shr_u))]
2716#[target_feature(enable = "simd128")]
2717#[doc(alias("i16x8.shr_u"))]
2718#[stable(feature = "wasm_simd", since = "1.54.0")]
2719pub fn u16x8_shr(a: v128, amt: u32) -> v128 {
2720    unsafe { simd_shr(a.as_u16x8(), simd::u16x8::splat(amt as u16)).v128() }
2721}
2722
2723#[inline]
2725#[cfg_attr(test, assert_instr(i16x8.add))]
2726#[target_feature(enable = "simd128")]
2727#[doc(alias("i16x8.add"))]
2728#[stable(feature = "wasm_simd", since = "1.54.0")]
2729pub fn i16x8_add(a: v128, b: v128) -> v128 {
2730    unsafe { simd_add(a.as_i16x8(), b.as_i16x8()).v128() }
2731}
2732
2733#[stable(feature = "wasm_simd", since = "1.54.0")]
2734pub use i16x8_add as u16x8_add;
2735
2736#[inline]
2739#[cfg_attr(test, assert_instr(i16x8.add_sat_s))]
2740#[target_feature(enable = "simd128")]
2741#[doc(alias("i16x8.add_sat_s"))]
2742#[stable(feature = "wasm_simd", since = "1.54.0")]
2743pub fn i16x8_add_sat(a: v128, b: v128) -> v128 {
2744    unsafe { simd_saturating_add(a.as_i16x8(), b.as_i16x8()).v128() }
2745}
2746
2747#[inline]
2750#[cfg_attr(test, assert_instr(i16x8.add_sat_u))]
2751#[target_feature(enable = "simd128")]
2752#[doc(alias("i16x8.add_sat_u"))]
2753#[stable(feature = "wasm_simd", since = "1.54.0")]
2754pub fn u16x8_add_sat(a: v128, b: v128) -> v128 {
2755    unsafe { simd_saturating_add(a.as_u16x8(), b.as_u16x8()).v128() }
2756}
2757
2758#[inline]
2760#[cfg_attr(test, assert_instr(i16x8.sub))]
2761#[target_feature(enable = "simd128")]
2762#[doc(alias("i16x8.sub"))]
2763#[stable(feature = "wasm_simd", since = "1.54.0")]
2764pub fn i16x8_sub(a: v128, b: v128) -> v128 {
2765    unsafe { simd_sub(a.as_i16x8(), b.as_i16x8()).v128() }
2766}
2767
2768#[stable(feature = "wasm_simd", since = "1.54.0")]
2769pub use i16x8_sub as u16x8_sub;
2770
2771#[inline]
2774#[cfg_attr(test, assert_instr(i16x8.sub_sat_s))]
2775#[target_feature(enable = "simd128")]
2776#[doc(alias("i16x8.sub_sat_s"))]
2777#[stable(feature = "wasm_simd", since = "1.54.0")]
2778pub fn i16x8_sub_sat(a: v128, b: v128) -> v128 {
2779    unsafe { simd_saturating_sub(a.as_i16x8(), b.as_i16x8()).v128() }
2780}
2781
2782#[inline]
2785#[cfg_attr(test, assert_instr(i16x8.sub_sat_u))]
2786#[target_feature(enable = "simd128")]
2787#[doc(alias("i16x8.sub_sat_u"))]
2788#[stable(feature = "wasm_simd", since = "1.54.0")]
2789pub fn u16x8_sub_sat(a: v128, b: v128) -> v128 {
2790    unsafe { simd_saturating_sub(a.as_u16x8(), b.as_u16x8()).v128() }
2791}
2792
2793#[inline]
2796#[cfg_attr(test, assert_instr(i16x8.mul))]
2797#[target_feature(enable = "simd128")]
2798#[doc(alias("i16x8.mul"))]
2799#[stable(feature = "wasm_simd", since = "1.54.0")]
2800pub fn i16x8_mul(a: v128, b: v128) -> v128 {
2801    unsafe { simd_mul(a.as_i16x8(), b.as_i16x8()).v128() }
2802}
2803
2804#[stable(feature = "wasm_simd", since = "1.54.0")]
2805pub use i16x8_mul as u16x8_mul;
2806
2807#[inline]
2810#[cfg_attr(test, assert_instr(i16x8.min_s))]
2811#[target_feature(enable = "simd128")]
2812#[doc(alias("i16x8.min_s"))]
2813#[stable(feature = "wasm_simd", since = "1.54.0")]
2814pub fn i16x8_min(a: v128, b: v128) -> v128 {
2815    let a = a.as_i16x8();
2816    let b = b.as_i16x8();
2817    unsafe { simd_select::<simd::i16x8, _>(simd_lt(a, b), a, b).v128() }
2818}
2819
2820#[inline]
2823#[cfg_attr(test, assert_instr(i16x8.min_u))]
2824#[target_feature(enable = "simd128")]
2825#[doc(alias("i16x8.min_u"))]
2826#[stable(feature = "wasm_simd", since = "1.54.0")]
2827pub fn u16x8_min(a: v128, b: v128) -> v128 {
2828    let a = a.as_u16x8();
2829    let b = b.as_u16x8();
2830    unsafe { simd_select::<simd::i16x8, _>(simd_lt(a, b), a, b).v128() }
2831}
2832
2833#[inline]
2836#[cfg_attr(test, assert_instr(i16x8.max_s))]
2837#[target_feature(enable = "simd128")]
2838#[doc(alias("i16x8.max_s"))]
2839#[stable(feature = "wasm_simd", since = "1.54.0")]
2840pub fn i16x8_max(a: v128, b: v128) -> v128 {
2841    let a = a.as_i16x8();
2842    let b = b.as_i16x8();
2843    unsafe { simd_select::<simd::i16x8, _>(simd_gt(a, b), a, b).v128() }
2844}
2845
2846#[inline]
2849#[cfg_attr(test, assert_instr(i16x8.max_u))]
2850#[target_feature(enable = "simd128")]
2851#[doc(alias("i16x8.max_u"))]
2852#[stable(feature = "wasm_simd", since = "1.54.0")]
2853pub fn u16x8_max(a: v128, b: v128) -> v128 {
2854    let a = a.as_u16x8();
2855    let b = b.as_u16x8();
2856    unsafe { simd_select::<simd::i16x8, _>(simd_gt(a, b), a, b).v128() }
2857}
2858
2859#[inline]
2861#[cfg_attr(test, assert_instr(i16x8.avgr_u))]
2862#[target_feature(enable = "simd128")]
2863#[doc(alias("i16x8.avgr_u"))]
2864#[stable(feature = "wasm_simd", since = "1.54.0")]
2865pub fn u16x8_avgr(a: v128, b: v128) -> v128 {
2866    unsafe { llvm_avgr_u_i16x8(a.as_i16x8(), b.as_i16x8()).v128() }
2867}
2868
2869#[inline]
2874#[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_s))]
2875#[target_feature(enable = "simd128")]
2876#[doc(alias("i16x8.extmul_low_i8x16_s"))]
2877#[stable(feature = "wasm_simd", since = "1.54.0")]
2878pub fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 {
2879    unsafe {
2880        let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2881            a.as_i8x16(),
2882            a.as_i8x16(),
2883            [0, 1, 2, 3, 4, 5, 6, 7],
2884        ));
2885        let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2886            b.as_i8x16(),
2887            b.as_i8x16(),
2888            [0, 1, 2, 3, 4, 5, 6, 7],
2889        ));
2890        simd_mul(lhs, rhs).v128()
2891    }
2892}
2893
2894#[inline]
2899#[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_s))]
2900#[target_feature(enable = "simd128")]
2901#[doc(alias("i16x8.extmul_high_i8x16_s"))]
2902#[stable(feature = "wasm_simd", since = "1.54.0")]
2903pub fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 {
2904    unsafe {
2905        let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2906            a.as_i8x16(),
2907            a.as_i8x16(),
2908            [8, 9, 10, 11, 12, 13, 14, 15],
2909        ));
2910        let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle!(
2911            b.as_i8x16(),
2912            b.as_i8x16(),
2913            [8, 9, 10, 11, 12, 13, 14, 15],
2914        ));
2915        simd_mul(lhs, rhs).v128()
2916    }
2917}
2918
2919#[inline]
2924#[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_u))]
2925#[target_feature(enable = "simd128")]
2926#[doc(alias("i16x8.extmul_low_i8x16_u"))]
2927#[stable(feature = "wasm_simd", since = "1.54.0")]
2928pub fn i16x8_extmul_low_u8x16(a: v128, b: v128) -> v128 {
2929    unsafe {
2930        let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2931            a.as_u8x16(),
2932            a.as_u8x16(),
2933            [0, 1, 2, 3, 4, 5, 6, 7],
2934        ));
2935        let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2936            b.as_u8x16(),
2937            b.as_u8x16(),
2938            [0, 1, 2, 3, 4, 5, 6, 7],
2939        ));
2940        simd_mul(lhs, rhs).v128()
2941    }
2942}
2943
2944#[stable(feature = "wasm_simd", since = "1.54.0")]
2945pub use i16x8_extmul_low_u8x16 as u16x8_extmul_low_u8x16;
2946
2947#[inline]
2952#[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_u))]
2953#[target_feature(enable = "simd128")]
2954#[doc(alias("i16x8.extmul_high_i8x16_u"))]
2955#[stable(feature = "wasm_simd", since = "1.54.0")]
2956pub fn i16x8_extmul_high_u8x16(a: v128, b: v128) -> v128 {
2957    unsafe {
2958        let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2959            a.as_u8x16(),
2960            a.as_u8x16(),
2961            [8, 9, 10, 11, 12, 13, 14, 15],
2962        ));
2963        let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle!(
2964            b.as_u8x16(),
2965            b.as_u8x16(),
2966            [8, 9, 10, 11, 12, 13, 14, 15],
2967        ));
2968        simd_mul(lhs, rhs).v128()
2969    }
2970}
2971
2972#[stable(feature = "wasm_simd", since = "1.54.0")]
2973pub use i16x8_extmul_high_u8x16 as u16x8_extmul_high_u8x16;
2974
2975#[inline]
2978#[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_s))]
2979#[target_feature(enable = "simd128")]
2980#[doc(alias("i32x4.extadd_pairwise_i16x8_s"))]
2981#[stable(feature = "wasm_simd", since = "1.54.0")]
2982pub fn i32x4_extadd_pairwise_i16x8(a: v128) -> v128 {
2983    unsafe { llvm_i32x4_extadd_pairwise_i16x8_s(a.as_i16x8()).v128() }
2984}
2985
2986#[inline]
2989#[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_u))]
2990#[doc(alias("i32x4.extadd_pairwise_i16x8_u"))]
2991#[target_feature(enable = "simd128")]
2992#[stable(feature = "wasm_simd", since = "1.54.0")]
2993pub fn i32x4_extadd_pairwise_u16x8(a: v128) -> v128 {
2994    unsafe { llvm_i32x4_extadd_pairwise_i16x8_u(a.as_i16x8()).v128() }
2995}
2996
2997#[stable(feature = "wasm_simd", since = "1.54.0")]
2998pub use i32x4_extadd_pairwise_u16x8 as u32x4_extadd_pairwise_u16x8;
2999
3000#[inline]
3002#[cfg_attr(test, assert_instr(i32x4.abs))]
3003#[target_feature(enable = "simd128")]
3004#[doc(alias("i32x4.abs"))]
3005#[stable(feature = "wasm_simd", since = "1.54.0")]
3006pub fn i32x4_abs(a: v128) -> v128 {
3007    let a = a.as_i32x4();
3008    let zero = simd::i32x4::ZERO;
3009    unsafe {
3010        simd_select::<simd::m32x4, simd::i32x4>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
3011    }
3012}
3013
3014#[inline]
3016#[cfg_attr(test, assert_instr(i32x4.neg))]
3017#[target_feature(enable = "simd128")]
3018#[doc(alias("i32x4.neg"))]
3019#[stable(feature = "wasm_simd", since = "1.54.0")]
3020pub fn i32x4_neg(a: v128) -> v128 {
3021    unsafe { simd_mul(a.as_i32x4(), simd::i32x4::splat(-1)).v128() }
3022}
3023
3024#[inline]
3026#[cfg_attr(test, assert_instr(i32x4.all_true))]
3027#[target_feature(enable = "simd128")]
3028#[doc(alias("i32x4.all_true"))]
3029#[stable(feature = "wasm_simd", since = "1.54.0")]
3030pub fn i32x4_all_true(a: v128) -> bool {
3031    unsafe { llvm_i32x4_all_true(a.as_i32x4()) != 0 }
3032}
3033
3034#[stable(feature = "wasm_simd", since = "1.54.0")]
3035pub use i32x4_all_true as u32x4_all_true;
3036
3037#[inline]
3040#[cfg_attr(test, assert_instr(i32x4.bitmask))]
3041#[target_feature(enable = "simd128")]
3042#[doc(alias("i32x4.bitmask"))]
3043#[stable(feature = "wasm_simd", since = "1.54.0")]
3044pub fn i32x4_bitmask(a: v128) -> u8 {
3045    unsafe { llvm_bitmask_i32x4(a.as_i32x4()) as u8 }
3046}
3047
3048#[stable(feature = "wasm_simd", since = "1.54.0")]
3049pub use i32x4_bitmask as u32x4_bitmask;
3050
3051#[inline]
3054#[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_s))]
3055#[target_feature(enable = "simd128")]
3056#[doc(alias("i32x4.extend_low_i16x8_s"))]
3057#[stable(feature = "wasm_simd", since = "1.54.0")]
3058pub fn i32x4_extend_low_i16x8(a: v128) -> v128 {
3059    unsafe {
3060        simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3061            a.as_i16x8(),
3062            a.as_i16x8(),
3063            [0, 1, 2, 3]
3064        ))
3065        .v128()
3066    }
3067}
3068
3069#[inline]
3072#[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_s))]
3073#[target_feature(enable = "simd128")]
3074#[doc(alias("i32x4.extend_high_i16x8_s"))]
3075#[stable(feature = "wasm_simd", since = "1.54.0")]
3076pub fn i32x4_extend_high_i16x8(a: v128) -> v128 {
3077    unsafe {
3078        simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3079            a.as_i16x8(),
3080            a.as_i16x8(),
3081            [4, 5, 6, 7]
3082        ))
3083        .v128()
3084    }
3085}
3086
3087#[inline]
3090#[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_u))]
3091#[target_feature(enable = "simd128")]
3092#[doc(alias("i32x4.extend_low_i16x8_u"))]
3093#[stable(feature = "wasm_simd", since = "1.54.0")]
3094pub fn i32x4_extend_low_u16x8(a: v128) -> v128 {
3095    unsafe {
3096        simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3097            a.as_u16x8(),
3098            a.as_u16x8(),
3099            [0, 1, 2, 3]
3100        ))
3101        .v128()
3102    }
3103}
3104
3105#[stable(feature = "wasm_simd", since = "1.54.0")]
3106pub use i32x4_extend_low_u16x8 as u32x4_extend_low_u16x8;
3107
3108#[inline]
3111#[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_u))]
3112#[target_feature(enable = "simd128")]
3113#[doc(alias("i32x4.extend_high_i16x8_u"))]
3114#[stable(feature = "wasm_simd", since = "1.54.0")]
3115pub fn i32x4_extend_high_u16x8(a: v128) -> v128 {
3116    unsafe {
3117        simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3118            a.as_u16x8(),
3119            a.as_u16x8(),
3120            [4, 5, 6, 7]
3121        ))
3122        .v128()
3123    }
3124}
3125
3126#[stable(feature = "wasm_simd", since = "1.54.0")]
3127pub use i32x4_extend_high_u16x8 as u32x4_extend_high_u16x8;
3128
3129#[inline]
3134#[cfg_attr(test, assert_instr(i32x4.shl))]
3135#[target_feature(enable = "simd128")]
3136#[doc(alias("i32x4.shl"))]
3137#[stable(feature = "wasm_simd", since = "1.54.0")]
3138pub fn i32x4_shl(a: v128, amt: u32) -> v128 {
3139    unsafe { simd_shl(a.as_i32x4(), simd::i32x4::splat(amt as i32)).v128() }
3140}
3141
3142#[stable(feature = "wasm_simd", since = "1.54.0")]
3143pub use i32x4_shl as u32x4_shl;
3144
3145#[inline]
3151#[cfg_attr(test, assert_instr(i32x4.shr_s))]
3152#[target_feature(enable = "simd128")]
3153#[doc(alias("i32x4.shr_s"))]
3154#[stable(feature = "wasm_simd", since = "1.54.0")]
3155pub fn i32x4_shr(a: v128, amt: u32) -> v128 {
3156    unsafe { simd_shr(a.as_i32x4(), simd::i32x4::splat(amt as i32)).v128() }
3157}
3158
3159#[inline]
3165#[cfg_attr(test, assert_instr(i32x4.shr_u))]
3166#[target_feature(enable = "simd128")]
3167#[doc(alias("i32x4.shr_u"))]
3168#[stable(feature = "wasm_simd", since = "1.54.0")]
3169pub fn u32x4_shr(a: v128, amt: u32) -> v128 {
3170    unsafe { simd_shr(a.as_u32x4(), simd::u32x4::splat(amt)).v128() }
3171}
3172
3173#[inline]
3175#[cfg_attr(test, assert_instr(i32x4.add))]
3176#[target_feature(enable = "simd128")]
3177#[doc(alias("i32x4.add"))]
3178#[stable(feature = "wasm_simd", since = "1.54.0")]
3179pub fn i32x4_add(a: v128, b: v128) -> v128 {
3180    unsafe { simd_add(a.as_i32x4(), b.as_i32x4()).v128() }
3181}
3182
3183#[stable(feature = "wasm_simd", since = "1.54.0")]
3184pub use i32x4_add as u32x4_add;
3185
3186#[inline]
3188#[cfg_attr(test, assert_instr(i32x4.sub))]
3189#[target_feature(enable = "simd128")]
3190#[doc(alias("i32x4.sub"))]
3191#[stable(feature = "wasm_simd", since = "1.54.0")]
3192pub fn i32x4_sub(a: v128, b: v128) -> v128 {
3193    unsafe { simd_sub(a.as_i32x4(), b.as_i32x4()).v128() }
3194}
3195
3196#[stable(feature = "wasm_simd", since = "1.54.0")]
3197pub use i32x4_sub as u32x4_sub;
3198
3199#[inline]
3202#[cfg_attr(test, assert_instr(i32x4.mul))]
3203#[target_feature(enable = "simd128")]
3204#[doc(alias("i32x4.mul"))]
3205#[stable(feature = "wasm_simd", since = "1.54.0")]
3206pub fn i32x4_mul(a: v128, b: v128) -> v128 {
3207    unsafe { simd_mul(a.as_i32x4(), b.as_i32x4()).v128() }
3208}
3209
3210#[stable(feature = "wasm_simd", since = "1.54.0")]
3211pub use i32x4_mul as u32x4_mul;
3212
3213#[inline]
3216#[cfg_attr(test, assert_instr(i32x4.min_s))]
3217#[target_feature(enable = "simd128")]
3218#[doc(alias("i32x4.min_s"))]
3219#[stable(feature = "wasm_simd", since = "1.54.0")]
3220pub fn i32x4_min(a: v128, b: v128) -> v128 {
3221    let a = a.as_i32x4();
3222    let b = b.as_i32x4();
3223    unsafe { simd_select::<simd::i32x4, _>(simd_lt(a, b), a, b).v128() }
3224}
3225
3226#[inline]
3229#[cfg_attr(test, assert_instr(i32x4.min_u))]
3230#[target_feature(enable = "simd128")]
3231#[doc(alias("i32x4.min_u"))]
3232#[stable(feature = "wasm_simd", since = "1.54.0")]
3233pub fn u32x4_min(a: v128, b: v128) -> v128 {
3234    let a = a.as_u32x4();
3235    let b = b.as_u32x4();
3236    unsafe { simd_select::<simd::i32x4, _>(simd_lt(a, b), a, b).v128() }
3237}
3238
3239#[inline]
3242#[cfg_attr(test, assert_instr(i32x4.max_s))]
3243#[target_feature(enable = "simd128")]
3244#[doc(alias("i32x4.max_s"))]
3245#[stable(feature = "wasm_simd", since = "1.54.0")]
3246pub fn i32x4_max(a: v128, b: v128) -> v128 {
3247    let a = a.as_i32x4();
3248    let b = b.as_i32x4();
3249    unsafe { simd_select::<simd::i32x4, _>(simd_gt(a, b), a, b).v128() }
3250}
3251
3252#[inline]
3255#[cfg_attr(test, assert_instr(i32x4.max_u))]
3256#[target_feature(enable = "simd128")]
3257#[doc(alias("i32x4.max_u"))]
3258#[stable(feature = "wasm_simd", since = "1.54.0")]
3259pub fn u32x4_max(a: v128, b: v128) -> v128 {
3260    let a = a.as_u32x4();
3261    let b = b.as_u32x4();
3262    unsafe { simd_select::<simd::i32x4, _>(simd_gt(a, b), a, b).v128() }
3263}
3264
3265#[inline]
3268#[cfg_attr(test, assert_instr(i32x4.dot_i16x8_s))]
3269#[target_feature(enable = "simd128")]
3270#[doc(alias("i32x4.dot_i16x8_s"))]
3271#[stable(feature = "wasm_simd", since = "1.54.0")]
3272pub fn i32x4_dot_i16x8(a: v128, b: v128) -> v128 {
3273    unsafe { llvm_i32x4_dot_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() }
3274}
3275
3276#[inline]
3281#[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_s))]
3282#[target_feature(enable = "simd128")]
3283#[doc(alias("i32x4.extmul_low_i16x8_s"))]
3284#[stable(feature = "wasm_simd", since = "1.54.0")]
3285pub fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 {
3286    unsafe {
3287        let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3288            a.as_i16x8(),
3289            a.as_i16x8(),
3290            [0, 1, 2, 3]
3291        ));
3292        let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3293            b.as_i16x8(),
3294            b.as_i16x8(),
3295            [0, 1, 2, 3]
3296        ));
3297        simd_mul(lhs, rhs).v128()
3298    }
3299}
3300
3301#[inline]
3306#[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_s))]
3307#[target_feature(enable = "simd128")]
3308#[doc(alias("i32x4.extmul_high_i16x8_s"))]
3309#[stable(feature = "wasm_simd", since = "1.54.0")]
3310pub fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 {
3311    unsafe {
3312        let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3313            a.as_i16x8(),
3314            a.as_i16x8(),
3315            [4, 5, 6, 7]
3316        ));
3317        let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle!(
3318            b.as_i16x8(),
3319            b.as_i16x8(),
3320            [4, 5, 6, 7]
3321        ));
3322        simd_mul(lhs, rhs).v128()
3323    }
3324}
3325
3326#[inline]
3331#[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_u))]
3332#[target_feature(enable = "simd128")]
3333#[doc(alias("i32x4.extmul_low_i16x8_u"))]
3334#[stable(feature = "wasm_simd", since = "1.54.0")]
3335pub fn i32x4_extmul_low_u16x8(a: v128, b: v128) -> v128 {
3336    unsafe {
3337        let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3338            a.as_u16x8(),
3339            a.as_u16x8(),
3340            [0, 1, 2, 3]
3341        ));
3342        let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3343            b.as_u16x8(),
3344            b.as_u16x8(),
3345            [0, 1, 2, 3]
3346        ));
3347        simd_mul(lhs, rhs).v128()
3348    }
3349}
3350
3351#[stable(feature = "wasm_simd", since = "1.54.0")]
3352pub use i32x4_extmul_low_u16x8 as u32x4_extmul_low_u16x8;
3353
3354#[inline]
3359#[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_u))]
3360#[target_feature(enable = "simd128")]
3361#[doc(alias("i32x4.extmul_high_i16x8_u"))]
3362#[stable(feature = "wasm_simd", since = "1.54.0")]
3363pub fn i32x4_extmul_high_u16x8(a: v128, b: v128) -> v128 {
3364    unsafe {
3365        let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3366            a.as_u16x8(),
3367            a.as_u16x8(),
3368            [4, 5, 6, 7]
3369        ));
3370        let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle!(
3371            b.as_u16x8(),
3372            b.as_u16x8(),
3373            [4, 5, 6, 7]
3374        ));
3375        simd_mul(lhs, rhs).v128()
3376    }
3377}
3378
3379#[stable(feature = "wasm_simd", since = "1.54.0")]
3380pub use i32x4_extmul_high_u16x8 as u32x4_extmul_high_u16x8;
3381
3382#[inline]
3384#[cfg_attr(test, assert_instr(i64x2.abs))]
3385#[target_feature(enable = "simd128")]
3386#[doc(alias("i64x2.abs"))]
3387#[stable(feature = "wasm_simd", since = "1.54.0")]
3388pub fn i64x2_abs(a: v128) -> v128 {
3389    let a = a.as_i64x2();
3390    let zero = simd::i64x2::ZERO;
3391    unsafe {
3392        simd_select::<simd::m64x2, simd::i64x2>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
3393    }
3394}
3395
3396#[inline]
3398#[cfg_attr(test, assert_instr(i64x2.neg))]
3399#[target_feature(enable = "simd128")]
3400#[doc(alias("i64x2.neg"))]
3401#[stable(feature = "wasm_simd", since = "1.54.0")]
3402pub fn i64x2_neg(a: v128) -> v128 {
3403    unsafe { simd_mul(a.as_i64x2(), simd::i64x2::splat(-1)).v128() }
3404}
3405
3406#[inline]
3408#[cfg_attr(test, assert_instr(i64x2.all_true))]
3409#[target_feature(enable = "simd128")]
3410#[doc(alias("i64x2.all_true"))]
3411#[stable(feature = "wasm_simd", since = "1.54.0")]
3412pub fn i64x2_all_true(a: v128) -> bool {
3413    unsafe { llvm_i64x2_all_true(a.as_i64x2()) != 0 }
3414}
3415
3416#[stable(feature = "wasm_simd", since = "1.54.0")]
3417pub use i64x2_all_true as u64x2_all_true;
3418
3419#[inline]
3422#[cfg_attr(test, assert_instr(i64x2.bitmask))]
3423#[target_feature(enable = "simd128")]
3424#[doc(alias("i64x2.bitmask"))]
3425#[stable(feature = "wasm_simd", since = "1.54.0")]
3426pub fn i64x2_bitmask(a: v128) -> u8 {
3427    unsafe { llvm_bitmask_i64x2(a.as_i64x2()) as u8 }
3428}
3429
3430#[stable(feature = "wasm_simd", since = "1.54.0")]
3431pub use i64x2_bitmask as u64x2_bitmask;
3432
3433#[inline]
3436#[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_s))]
3437#[target_feature(enable = "simd128")]
3438#[doc(alias("i64x2.extend_low_i32x4_s"))]
3439#[stable(feature = "wasm_simd", since = "1.54.0")]
3440pub fn i64x2_extend_low_i32x4(a: v128) -> v128 {
3441    unsafe {
3442        simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [0, 1]))
3443            .v128()
3444    }
3445}
3446
3447#[inline]
3450#[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_s))]
3451#[target_feature(enable = "simd128")]
3452#[doc(alias("i64x2.extend_high_i32x4_s"))]
3453#[stable(feature = "wasm_simd", since = "1.54.0")]
3454pub fn i64x2_extend_high_i32x4(a: v128) -> v128 {
3455    unsafe {
3456        simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [2, 3]))
3457            .v128()
3458    }
3459}
3460
3461#[inline]
3464#[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_u))]
3465#[target_feature(enable = "simd128")]
3466#[doc(alias("i64x2.extend_low_i32x4_u"))]
3467#[stable(feature = "wasm_simd", since = "1.54.0")]
3468pub fn i64x2_extend_low_u32x4(a: v128) -> v128 {
3469    unsafe {
3470        simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [0, 1]))
3471            .v128()
3472    }
3473}
3474
3475#[stable(feature = "wasm_simd", since = "1.54.0")]
3476pub use i64x2_extend_low_u32x4 as u64x2_extend_low_u32x4;
3477
3478#[inline]
3481#[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_u))]
3482#[target_feature(enable = "simd128")]
3483#[doc(alias("i64x2.extend_high_i32x4_u"))]
3484#[stable(feature = "wasm_simd", since = "1.54.0")]
3485pub fn i64x2_extend_high_u32x4(a: v128) -> v128 {
3486    unsafe {
3487        simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [2, 3]))
3488            .v128()
3489    }
3490}
3491
3492#[stable(feature = "wasm_simd", since = "1.54.0")]
3493pub use i64x2_extend_high_u32x4 as u64x2_extend_high_u32x4;
3494
3495#[inline]
3500#[cfg_attr(test, assert_instr(i64x2.shl))]
3501#[target_feature(enable = "simd128")]
3502#[doc(alias("i64x2.shl"))]
3503#[stable(feature = "wasm_simd", since = "1.54.0")]
3504pub fn i64x2_shl(a: v128, amt: u32) -> v128 {
3505    unsafe { simd_shl(a.as_i64x2(), simd::i64x2::splat(amt as i64)).v128() }
3506}
3507
3508#[stable(feature = "wasm_simd", since = "1.54.0")]
3509pub use i64x2_shl as u64x2_shl;
3510
3511#[inline]
3517#[cfg_attr(test, assert_instr(i64x2.shr_s))]
3518#[target_feature(enable = "simd128")]
3519#[doc(alias("i64x2.shr_s"))]
3520#[stable(feature = "wasm_simd", since = "1.54.0")]
3521pub fn i64x2_shr(a: v128, amt: u32) -> v128 {
3522    unsafe { simd_shr(a.as_i64x2(), simd::i64x2::splat(amt as i64)).v128() }
3523}
3524
3525#[inline]
3531#[cfg_attr(test, assert_instr(i64x2.shr_u))]
3532#[target_feature(enable = "simd128")]
3533#[doc(alias("i64x2.shr_u"))]
3534#[stable(feature = "wasm_simd", since = "1.54.0")]
3535pub fn u64x2_shr(a: v128, amt: u32) -> v128 {
3536    unsafe { simd_shr(a.as_u64x2(), simd::u64x2::splat(amt as u64)).v128() }
3537}
3538
3539#[inline]
3541#[cfg_attr(test, assert_instr(i64x2.add))]
3542#[target_feature(enable = "simd128")]
3543#[doc(alias("i64x2.add"))]
3544#[stable(feature = "wasm_simd", since = "1.54.0")]
3545pub fn i64x2_add(a: v128, b: v128) -> v128 {
3546    unsafe { simd_add(a.as_i64x2(), b.as_i64x2()).v128() }
3547}
3548
3549#[stable(feature = "wasm_simd", since = "1.54.0")]
3550pub use i64x2_add as u64x2_add;
3551
3552#[inline]
3554#[cfg_attr(test, assert_instr(i64x2.sub))]
3555#[target_feature(enable = "simd128")]
3556#[doc(alias("i64x2.sub"))]
3557#[stable(feature = "wasm_simd", since = "1.54.0")]
3558pub fn i64x2_sub(a: v128, b: v128) -> v128 {
3559    unsafe { simd_sub(a.as_i64x2(), b.as_i64x2()).v128() }
3560}
3561
3562#[stable(feature = "wasm_simd", since = "1.54.0")]
3563pub use i64x2_sub as u64x2_sub;
3564
3565#[inline]
3567#[cfg_attr(test, assert_instr(i64x2.mul))]
3568#[target_feature(enable = "simd128")]
3569#[doc(alias("i64x2.mul"))]
3570#[stable(feature = "wasm_simd", since = "1.54.0")]
3571pub fn i64x2_mul(a: v128, b: v128) -> v128 {
3572    unsafe { simd_mul(a.as_i64x2(), b.as_i64x2()).v128() }
3573}
3574
3575#[stable(feature = "wasm_simd", since = "1.54.0")]
3576pub use i64x2_mul as u64x2_mul;
3577
3578#[inline]
3583#[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_s))]
3584#[target_feature(enable = "simd128")]
3585#[doc(alias("i64x2.extmul_low_i32x4_s"))]
3586#[stable(feature = "wasm_simd", since = "1.54.0")]
3587pub fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 {
3588    unsafe {
3589        let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3590            a.as_i32x4(),
3591            a.as_i32x4(),
3592            [0, 1]
3593        ));
3594        let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3595            b.as_i32x4(),
3596            b.as_i32x4(),
3597            [0, 1]
3598        ));
3599        simd_mul(lhs, rhs).v128()
3600    }
3601}
3602
3603#[inline]
3608#[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_s))]
3609#[target_feature(enable = "simd128")]
3610#[doc(alias("i64x2.extmul_high_i32x4_s"))]
3611#[stable(feature = "wasm_simd", since = "1.54.0")]
3612pub fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 {
3613    unsafe {
3614        let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3615            a.as_i32x4(),
3616            a.as_i32x4(),
3617            [2, 3]
3618        ));
3619        let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle!(
3620            b.as_i32x4(),
3621            b.as_i32x4(),
3622            [2, 3]
3623        ));
3624        simd_mul(lhs, rhs).v128()
3625    }
3626}
3627
3628#[inline]
3633#[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_u))]
3634#[target_feature(enable = "simd128")]
3635#[doc(alias("i64x2.extmul_low_i32x4_u"))]
3636#[stable(feature = "wasm_simd", since = "1.54.0")]
3637pub fn i64x2_extmul_low_u32x4(a: v128, b: v128) -> v128 {
3638    unsafe {
3639        let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3640            a.as_u32x4(),
3641            a.as_u32x4(),
3642            [0, 1]
3643        ));
3644        let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3645            b.as_u32x4(),
3646            b.as_u32x4(),
3647            [0, 1]
3648        ));
3649        simd_mul(lhs, rhs).v128()
3650    }
3651}
3652
3653#[stable(feature = "wasm_simd", since = "1.54.0")]
3654pub use i64x2_extmul_low_u32x4 as u64x2_extmul_low_u32x4;
3655
3656#[inline]
3661#[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_u))]
3662#[target_feature(enable = "simd128")]
3663#[doc(alias("i64x2.extmul_high_i32x4_u"))]
3664#[stable(feature = "wasm_simd", since = "1.54.0")]
3665pub fn i64x2_extmul_high_u32x4(a: v128, b: v128) -> v128 {
3666    unsafe {
3667        let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3668            a.as_u32x4(),
3669            a.as_u32x4(),
3670            [2, 3]
3671        ));
3672        let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle!(
3673            b.as_u32x4(),
3674            b.as_u32x4(),
3675            [2, 3]
3676        ));
3677        simd_mul(lhs, rhs).v128()
3678    }
3679}
3680
3681#[stable(feature = "wasm_simd", since = "1.54.0")]
3682pub use i64x2_extmul_high_u32x4 as u64x2_extmul_high_u32x4;
3683
3684#[inline]
3686#[cfg_attr(test, assert_instr(f32x4.ceil))]
3687#[target_feature(enable = "simd128")]
3688#[doc(alias("f32x4.ceil"))]
3689#[stable(feature = "wasm_simd", since = "1.54.0")]
3690pub fn f32x4_ceil(a: v128) -> v128 {
3691    unsafe { simd_ceil(a.as_f32x4()).v128() }
3692}
3693
3694#[inline]
3696#[cfg_attr(test, assert_instr(f32x4.floor))]
3697#[target_feature(enable = "simd128")]
3698#[doc(alias("f32x4.floor"))]
3699#[stable(feature = "wasm_simd", since = "1.54.0")]
3700pub fn f32x4_floor(a: v128) -> v128 {
3701    unsafe { simd_floor(a.as_f32x4()).v128() }
3702}
3703
3704#[inline]
3707#[cfg_attr(test, assert_instr(f32x4.trunc))]
3708#[target_feature(enable = "simd128")]
3709#[doc(alias("f32x4.trunc"))]
3710#[stable(feature = "wasm_simd", since = "1.54.0")]
3711pub fn f32x4_trunc(a: v128) -> v128 {
3712    unsafe { simd_trunc(a.as_f32x4()).v128() }
3713}
3714
3715#[inline]
3718#[cfg_attr(test, assert_instr(f32x4.nearest))]
3719#[target_feature(enable = "simd128")]
3720#[doc(alias("f32x4.nearest"))]
3721#[stable(feature = "wasm_simd", since = "1.54.0")]
3722pub fn f32x4_nearest(a: v128) -> v128 {
3723    unsafe { llvm_f32x4_nearest(a.as_f32x4()).v128() }
3724}
3725
3726#[inline]
3729#[cfg_attr(test, assert_instr(f32x4.abs))]
3730#[target_feature(enable = "simd128")]
3731#[doc(alias("f32x4.abs"))]
3732#[stable(feature = "wasm_simd", since = "1.54.0")]
3733pub fn f32x4_abs(a: v128) -> v128 {
3734    unsafe { simd_fabs(a.as_f32x4()).v128() }
3735}
3736
3737#[inline]
3740#[cfg_attr(test, assert_instr(f32x4.neg))]
3741#[target_feature(enable = "simd128")]
3742#[doc(alias("f32x4.neg"))]
3743#[stable(feature = "wasm_simd", since = "1.54.0")]
3744pub fn f32x4_neg(a: v128) -> v128 {
3745    unsafe { simd_neg(a.as_f32x4()).v128() }
3746}
3747
3748#[inline]
3751#[cfg_attr(test, assert_instr(f32x4.sqrt))]
3752#[target_feature(enable = "simd128")]
3753#[doc(alias("f32x4.sqrt"))]
3754#[stable(feature = "wasm_simd", since = "1.54.0")]
3755pub fn f32x4_sqrt(a: v128) -> v128 {
3756    unsafe { simd_fsqrt(a.as_f32x4()).v128() }
3757}
3758
3759#[inline]
3762#[cfg_attr(test, assert_instr(f32x4.add))]
3763#[target_feature(enable = "simd128")]
3764#[doc(alias("f32x4.add"))]
3765#[stable(feature = "wasm_simd", since = "1.54.0")]
3766pub fn f32x4_add(a: v128, b: v128) -> v128 {
3767    unsafe { simd_add(a.as_f32x4(), b.as_f32x4()).v128() }
3768}
3769
3770#[inline]
3773#[cfg_attr(test, assert_instr(f32x4.sub))]
3774#[target_feature(enable = "simd128")]
3775#[doc(alias("f32x4.sub"))]
3776#[stable(feature = "wasm_simd", since = "1.54.0")]
3777pub fn f32x4_sub(a: v128, b: v128) -> v128 {
3778    unsafe { simd_sub(a.as_f32x4(), b.as_f32x4()).v128() }
3779}
3780
3781#[inline]
3784#[cfg_attr(test, assert_instr(f32x4.mul))]
3785#[target_feature(enable = "simd128")]
3786#[doc(alias("f32x4.mul"))]
3787#[stable(feature = "wasm_simd", since = "1.54.0")]
3788pub fn f32x4_mul(a: v128, b: v128) -> v128 {
3789    unsafe { simd_mul(a.as_f32x4(), b.as_f32x4()).v128() }
3790}
3791
3792#[inline]
3795#[cfg_attr(test, assert_instr(f32x4.div))]
3796#[target_feature(enable = "simd128")]
3797#[doc(alias("f32x4.div"))]
3798#[stable(feature = "wasm_simd", since = "1.54.0")]
3799pub fn f32x4_div(a: v128, b: v128) -> v128 {
3800    unsafe { simd_div(a.as_f32x4(), b.as_f32x4()).v128() }
3801}
3802
3803#[inline]
3806#[cfg_attr(test, assert_instr(f32x4.min))]
3807#[target_feature(enable = "simd128")]
3808#[doc(alias("f32x4.min"))]
3809#[stable(feature = "wasm_simd", since = "1.54.0")]
3810pub fn f32x4_min(a: v128, b: v128) -> v128 {
3811    unsafe { llvm_f32x4_min(a.as_f32x4(), b.as_f32x4()).v128() }
3812}
3813
3814#[inline]
3817#[cfg_attr(test, assert_instr(f32x4.max))]
3818#[target_feature(enable = "simd128")]
3819#[doc(alias("f32x4.max"))]
3820#[stable(feature = "wasm_simd", since = "1.54.0")]
3821pub fn f32x4_max(a: v128, b: v128) -> v128 {
3822    unsafe { llvm_f32x4_max(a.as_f32x4(), b.as_f32x4()).v128() }
3823}
3824
3825#[inline]
3827#[cfg_attr(test, assert_instr(f32x4.pmin))]
3828#[target_feature(enable = "simd128")]
3829#[doc(alias("f32x4.pmin"))]
3830#[stable(feature = "wasm_simd", since = "1.54.0")]
3831pub fn f32x4_pmin(a: v128, b: v128) -> v128 {
3832    unsafe {
3833        simd_select::<simd::m32x4, simd::f32x4>(
3834            simd_lt(b.as_f32x4(), a.as_f32x4()),
3835            b.as_f32x4(),
3836            a.as_f32x4(),
3837        )
3838        .v128()
3839    }
3840}
3841
3842#[inline]
3844#[cfg_attr(test, assert_instr(f32x4.pmax))]
3845#[target_feature(enable = "simd128")]
3846#[doc(alias("f32x4.pmax"))]
3847#[stable(feature = "wasm_simd", since = "1.54.0")]
3848pub fn f32x4_pmax(a: v128, b: v128) -> v128 {
3849    unsafe {
3850        simd_select::<simd::m32x4, simd::f32x4>(
3851            simd_lt(a.as_f32x4(), b.as_f32x4()),
3852            b.as_f32x4(),
3853            a.as_f32x4(),
3854        )
3855        .v128()
3856    }
3857}
3858
3859#[inline]
3861#[cfg_attr(test, assert_instr(f64x2.ceil))]
3862#[target_feature(enable = "simd128")]
3863#[doc(alias("f64x2.ceil"))]
3864#[stable(feature = "wasm_simd", since = "1.54.0")]
3865pub fn f64x2_ceil(a: v128) -> v128 {
3866    unsafe { simd_ceil(a.as_f64x2()).v128() }
3867}
3868
3869#[inline]
3871#[cfg_attr(test, assert_instr(f64x2.floor))]
3872#[target_feature(enable = "simd128")]
3873#[doc(alias("f64x2.floor"))]
3874#[stable(feature = "wasm_simd", since = "1.54.0")]
3875pub fn f64x2_floor(a: v128) -> v128 {
3876    unsafe { simd_floor(a.as_f64x2()).v128() }
3877}
3878
3879#[inline]
3882#[cfg_attr(test, assert_instr(f64x2.trunc))]
3883#[target_feature(enable = "simd128")]
3884#[doc(alias("f64x2.trunc"))]
3885#[stable(feature = "wasm_simd", since = "1.54.0")]
3886pub fn f64x2_trunc(a: v128) -> v128 {
3887    unsafe { simd_trunc(a.as_f64x2()).v128() }
3888}
3889
3890#[inline]
3893#[cfg_attr(test, assert_instr(f64x2.nearest))]
3894#[target_feature(enable = "simd128")]
3895#[doc(alias("f64x2.nearest"))]
3896#[stable(feature = "wasm_simd", since = "1.54.0")]
3897pub fn f64x2_nearest(a: v128) -> v128 {
3898    unsafe { llvm_f64x2_nearest(a.as_f64x2()).v128() }
3899}
3900
3901#[inline]
3904#[cfg_attr(test, assert_instr(f64x2.abs))]
3905#[target_feature(enable = "simd128")]
3906#[doc(alias("f64x2.abs"))]
3907#[stable(feature = "wasm_simd", since = "1.54.0")]
3908pub fn f64x2_abs(a: v128) -> v128 {
3909    unsafe { simd_fabs(a.as_f64x2()).v128() }
3910}
3911
3912#[inline]
3915#[cfg_attr(test, assert_instr(f64x2.neg))]
3916#[target_feature(enable = "simd128")]
3917#[doc(alias("f64x2.neg"))]
3918#[stable(feature = "wasm_simd", since = "1.54.0")]
3919pub fn f64x2_neg(a: v128) -> v128 {
3920    unsafe { simd_neg(a.as_f64x2()).v128() }
3921}
3922
3923#[inline]
3926#[cfg_attr(test, assert_instr(f64x2.sqrt))]
3927#[target_feature(enable = "simd128")]
3928#[doc(alias("f64x2.sqrt"))]
3929#[stable(feature = "wasm_simd", since = "1.54.0")]
3930pub fn f64x2_sqrt(a: v128) -> v128 {
3931    unsafe { simd_fsqrt(a.as_f64x2()).v128() }
3932}
3933
3934#[inline]
3937#[cfg_attr(test, assert_instr(f64x2.add))]
3938#[target_feature(enable = "simd128")]
3939#[doc(alias("f64x2.add"))]
3940#[stable(feature = "wasm_simd", since = "1.54.0")]
3941pub fn f64x2_add(a: v128, b: v128) -> v128 {
3942    unsafe { simd_add(a.as_f64x2(), b.as_f64x2()).v128() }
3943}
3944
3945#[inline]
3948#[cfg_attr(test, assert_instr(f64x2.sub))]
3949#[target_feature(enable = "simd128")]
3950#[doc(alias("f64x2.sub"))]
3951#[stable(feature = "wasm_simd", since = "1.54.0")]
3952pub fn f64x2_sub(a: v128, b: v128) -> v128 {
3953    unsafe { simd_sub(a.as_f64x2(), b.as_f64x2()).v128() }
3954}
3955
3956#[inline]
3959#[cfg_attr(test, assert_instr(f64x2.mul))]
3960#[target_feature(enable = "simd128")]
3961#[doc(alias("f64x2.mul"))]
3962#[stable(feature = "wasm_simd", since = "1.54.0")]
3963pub fn f64x2_mul(a: v128, b: v128) -> v128 {
3964    unsafe { simd_mul(a.as_f64x2(), b.as_f64x2()).v128() }
3965}
3966
3967#[inline]
3970#[cfg_attr(test, assert_instr(f64x2.div))]
3971#[target_feature(enable = "simd128")]
3972#[doc(alias("f64x2.div"))]
3973#[stable(feature = "wasm_simd", since = "1.54.0")]
3974pub fn f64x2_div(a: v128, b: v128) -> v128 {
3975    unsafe { simd_div(a.as_f64x2(), b.as_f64x2()).v128() }
3976}
3977
3978#[inline]
3981#[cfg_attr(test, assert_instr(f64x2.min))]
3982#[target_feature(enable = "simd128")]
3983#[doc(alias("f64x2.min"))]
3984#[stable(feature = "wasm_simd", since = "1.54.0")]
3985pub fn f64x2_min(a: v128, b: v128) -> v128 {
3986    unsafe { llvm_f64x2_min(a.as_f64x2(), b.as_f64x2()).v128() }
3987}
3988
3989#[inline]
3992#[cfg_attr(test, assert_instr(f64x2.max))]
3993#[target_feature(enable = "simd128")]
3994#[doc(alias("f64x2.max"))]
3995#[stable(feature = "wasm_simd", since = "1.54.0")]
3996pub fn f64x2_max(a: v128, b: v128) -> v128 {
3997    unsafe { llvm_f64x2_max(a.as_f64x2(), b.as_f64x2()).v128() }
3998}
3999
4000#[inline]
4002#[cfg_attr(test, assert_instr(f64x2.pmin))]
4003#[target_feature(enable = "simd128")]
4004#[doc(alias("f64x2.pmin"))]
4005#[stable(feature = "wasm_simd", since = "1.54.0")]
4006pub fn f64x2_pmin(a: v128, b: v128) -> v128 {
4007    unsafe {
4008        simd_select::<simd::m64x2, simd::f64x2>(
4009            simd_lt(b.as_f64x2(), a.as_f64x2()),
4010            b.as_f64x2(),
4011            a.as_f64x2(),
4012        )
4013        .v128()
4014    }
4015}
4016
4017#[inline]
4019#[cfg_attr(test, assert_instr(f64x2.pmax))]
4020#[target_feature(enable = "simd128")]
4021#[doc(alias("f64x2.pmax"))]
4022#[stable(feature = "wasm_simd", since = "1.54.0")]
4023pub fn f64x2_pmax(a: v128, b: v128) -> v128 {
4024    unsafe {
4025        simd_select::<simd::m64x2, simd::f64x2>(
4026            simd_lt(a.as_f64x2(), b.as_f64x2()),
4027            b.as_f64x2(),
4028            a.as_f64x2(),
4029        )
4030        .v128()
4031    }
4032}
4033
4034#[inline]
4040#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_s))]
4041#[target_feature(enable = "simd128")]
4042#[doc(alias("i32x4.trunc_sat_f32x4_s"))]
4043#[stable(feature = "wasm_simd", since = "1.54.0")]
4044pub fn i32x4_trunc_sat_f32x4(a: v128) -> v128 {
4045    unsafe { simd_as::<simd::f32x4, simd::i32x4>(a.as_f32x4()).v128() }
4046}
4047
4048#[inline]
4054#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_u))]
4055#[target_feature(enable = "simd128")]
4056#[doc(alias("i32x4.trunc_sat_f32x4_u"))]
4057#[stable(feature = "wasm_simd", since = "1.54.0")]
4058pub fn u32x4_trunc_sat_f32x4(a: v128) -> v128 {
4059    unsafe { simd_as::<simd::f32x4, simd::u32x4>(a.as_f32x4()).v128() }
4060}
4061
4062#[inline]
4065#[cfg_attr(test, assert_instr(f32x4.convert_i32x4_s))]
4066#[target_feature(enable = "simd128")]
4067#[doc(alias("f32x4.convert_i32x4_s"))]
4068#[stable(feature = "wasm_simd", since = "1.54.0")]
4069pub fn f32x4_convert_i32x4(a: v128) -> v128 {
4070    unsafe { simd_cast::<_, simd::f32x4>(a.as_i32x4()).v128() }
4071}
4072
4073#[inline]
4076#[cfg_attr(test, assert_instr(f32x4.convert_i32x4_u))]
4077#[target_feature(enable = "simd128")]
4078#[doc(alias("f32x4.convert_i32x4_u"))]
4079#[stable(feature = "wasm_simd", since = "1.54.0")]
4080pub fn f32x4_convert_u32x4(a: v128) -> v128 {
4081    unsafe { simd_cast::<_, simd::f32x4>(a.as_u32x4()).v128() }
4082}
4083
4084#[inline]
4093#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_s_zero))]
4094#[target_feature(enable = "simd128")]
4095#[doc(alias("i32x4.trunc_sat_f64x2_s_zero"))]
4096#[stable(feature = "wasm_simd", since = "1.54.0")]
4097pub fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
4098    let ret: simd::i32x4 = unsafe {
4099        simd_shuffle!(
4100            simd_as::<simd::f64x2, simd::i32x2>(a.as_f64x2()),
4101            simd::i32x2::ZERO,
4102            [0, 1, 2, 3],
4103        )
4104    };
4105    ret.v128()
4106}
4107
4108#[inline]
4117#[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_u_zero))]
4118#[target_feature(enable = "simd128")]
4119#[doc(alias("i32x4.trunc_sat_f64x2_u_zero"))]
4120#[stable(feature = "wasm_simd", since = "1.54.0")]
4121pub fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
4122    let ret: simd::u32x4 = unsafe {
4123        simd_shuffle!(
4124            simd_as::<simd::f64x2, simd::u32x2>(a.as_f64x2()),
4125            simd::u32x2::ZERO,
4126            [0, 1, 2, 3],
4127        )
4128    };
4129    ret.v128()
4130}
4131
4132#[inline]
4134#[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_s))]
4135#[target_feature(enable = "simd128")]
4136#[doc(alias("f64x2.convert_low_i32x4_s"))]
4137#[stable(feature = "wasm_simd", since = "1.54.0")]
4138pub fn f64x2_convert_low_i32x4(a: v128) -> v128 {
4139    unsafe {
4140        simd_cast::<simd::i32x2, simd::f64x2>(simd_shuffle!(a.as_i32x4(), a.as_i32x4(), [0, 1],))
4141            .v128()
4142    }
4143}
4144
4145#[inline]
4147#[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_u))]
4148#[target_feature(enable = "simd128")]
4149#[doc(alias("f64x2.convert_low_i32x4_u"))]
4150#[stable(feature = "wasm_simd", since = "1.54.0")]
4151pub fn f64x2_convert_low_u32x4(a: v128) -> v128 {
4152    unsafe {
4153        simd_cast::<simd::u32x2, simd::f64x2>(simd_shuffle!(a.as_u32x4(), a.as_u32x4(), [0, 1],))
4154            .v128()
4155    }
4156}
4157
4158#[inline]
4164#[cfg_attr(test, assert_instr(f32x4.demote_f64x2_zero))]
4165#[target_feature(enable = "simd128")]
4166#[doc(alias("f32x4.demote_f64x2_zero"))]
4167#[stable(feature = "wasm_simd", since = "1.54.0")]
4168pub fn f32x4_demote_f64x2_zero(a: v128) -> v128 {
4169    unsafe {
4170        simd_cast::<simd::f64x4, simd::f32x4>(simd_shuffle!(
4171            a.as_f64x2(),
4172            simd::f64x2::ZERO,
4173            [0, 1, 2, 3]
4174        ))
4175        .v128()
4176    }
4177}
4178
4179#[inline]
4182#[cfg_attr(test, assert_instr(f64x2.promote_low_f32x4))]
4183#[target_feature(enable = "simd128")]
4184#[doc(alias("f32x4.promote_low_f32x4"))]
4185#[stable(feature = "wasm_simd", since = "1.54.0")]
4186pub fn f64x2_promote_low_f32x4(a: v128) -> v128 {
4187    unsafe {
4188        simd_cast::<simd::f32x2, simd::f64x2>(simd_shuffle!(a.as_f32x4(), a.as_f32x4(), [0, 1]))
4189            .v128()
4190    }
4191}
4192
4193#[cfg(test)]
4194mod tests {
4195    use super::*;
4196    use core::ops::{Add, Div, Mul, Neg, Sub};
4197
4198    use std::fmt::Debug;
4199    use std::mem::transmute;
4200    use std::num::Wrapping;
4201    use std::prelude::v1::*;
4202
4203    const _C1: v128 = i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4204    const _C2: v128 = u8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4205    const _C3: v128 = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4206    const _C4: v128 = u16x8(0, 1, 2, 3, 4, 5, 6, 7);
4207    const _C5: v128 = i32x4(0, 1, 2, 3);
4208    const _C6: v128 = u32x4(0, 1, 2, 3);
4209    const _C7: v128 = i64x2(0, 1);
4210    const _C8: v128 = u64x2(0, 1);
4211    const _C9: v128 = f32x4(0.0, 1.0, 2.0, 3.0);
4212    const _C10: v128 = f64x2(0.0, 1.0);
4213
4214    fn compare_bytes(a: v128, b: v128) {
4215        let a: [u8; 16] = unsafe { transmute(a) };
4216        let b: [u8; 16] = unsafe { transmute(b) };
4217        assert_eq!(a, b);
4218    }
4219
4220    #[test]
4221    fn test_load() {
4222        unsafe {
4223            let arr: [i32; 4] = [0, 1, 2, 3];
4224            let vec = v128_load(arr.as_ptr() as *const v128);
4225            compare_bytes(vec, i32x4(0, 1, 2, 3));
4226        }
4227    }
4228
4229    #[test]
4230    fn test_load_extend() {
4231        unsafe {
4232            let arr: [i8; 8] = [-3, -2, -1, 0, 1, 2, 3, 4];
4233            let vec = i16x8_load_extend_i8x8(arr.as_ptr());
4234            compare_bytes(vec, i16x8(-3, -2, -1, 0, 1, 2, 3, 4));
4235            let vec = i16x8_load_extend_u8x8(arr.as_ptr() as *const u8);
4236            compare_bytes(vec, i16x8(253, 254, 255, 0, 1, 2, 3, 4));
4237
4238            let arr: [i16; 4] = [-1, 0, 1, 2];
4239            let vec = i32x4_load_extend_i16x4(arr.as_ptr());
4240            compare_bytes(vec, i32x4(-1, 0, 1, 2));
4241            let vec = i32x4_load_extend_u16x4(arr.as_ptr() as *const u16);
4242            compare_bytes(vec, i32x4(65535, 0, 1, 2));
4243
4244            let arr: [i32; 2] = [-1, 1];
4245            let vec = i64x2_load_extend_i32x2(arr.as_ptr());
4246            compare_bytes(vec, i64x2(-1, 1));
4247            let vec = i64x2_load_extend_u32x2(arr.as_ptr() as *const u32);
4248            compare_bytes(vec, i64x2(u32::max_value().into(), 1));
4249        }
4250    }
4251
4252    #[test]
4253    fn test_load_splat() {
4254        unsafe {
4255            compare_bytes(v128_load8_splat(&8), i8x16_splat(8));
4256            compare_bytes(v128_load16_splat(&9), i16x8_splat(9));
4257            compare_bytes(v128_load32_splat(&10), i32x4_splat(10));
4258            compare_bytes(v128_load64_splat(&11), i64x2_splat(11));
4259        }
4260    }
4261
4262    #[test]
4263    fn test_load_zero() {
4264        unsafe {
4265            compare_bytes(v128_load32_zero(&10), i32x4(10, 0, 0, 0));
4266            compare_bytes(v128_load64_zero(&11), i64x2(11, 0));
4267        }
4268    }
4269
4270    #[test]
4271    fn test_store() {
4272        unsafe {
4273            let mut spot = i8x16_splat(0);
4274            v128_store(&mut spot, i8x16_splat(1));
4275            compare_bytes(spot, i8x16_splat(1));
4276        }
4277    }
4278
4279    #[test]
4280    fn test_load_lane() {
4281        unsafe {
4282            let zero = i8x16_splat(0);
4283            compare_bytes(
4284                v128_load8_lane::<2>(zero, &1),
4285                i8x16_replace_lane::<2>(zero, 1),
4286            );
4287
4288            compare_bytes(
4289                v128_load16_lane::<2>(zero, &1),
4290                i16x8_replace_lane::<2>(zero, 1),
4291            );
4292
4293            compare_bytes(
4294                v128_load32_lane::<2>(zero, &1),
4295                i32x4_replace_lane::<2>(zero, 1),
4296            );
4297
4298            compare_bytes(
4299                v128_load64_lane::<1>(zero, &1),
4300                i64x2_replace_lane::<1>(zero, 1),
4301            );
4302        }
4303    }
4304
4305    #[test]
4306    fn test_store_lane() {
4307        unsafe {
4308            let mut spot = 0;
4309            let zero = i8x16_splat(0);
4310            v128_store8_lane::<5>(i8x16_replace_lane::<5>(zero, 7), &mut spot);
4311            assert_eq!(spot, 7);
4312
4313            let mut spot = 0;
4314            v128_store16_lane::<5>(i16x8_replace_lane::<5>(zero, 7), &mut spot);
4315            assert_eq!(spot, 7);
4316
4317            let mut spot = 0;
4318            v128_store32_lane::<3>(i32x4_replace_lane::<3>(zero, 7), &mut spot);
4319            assert_eq!(spot, 7);
4320
4321            let mut spot = 0;
4322            v128_store64_lane::<0>(i64x2_replace_lane::<0>(zero, 7), &mut spot);
4323            assert_eq!(spot, 7);
4324        }
4325    }
4326
4327    #[test]
4328    fn test_i8x16() {
4329        const A: v128 = super::i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4330        compare_bytes(A, A);
4331
4332        const _: v128 = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4333        const _: v128 = i32x4(0, 1, 2, 3);
4334        const _: v128 = i64x2(0, 1);
4335        const _: v128 = f32x4(0., 1., 2., 3.);
4336        const _: v128 = f64x2(0., 1.);
4337
4338        let bytes: [i16; 8] = unsafe { mem::transmute(i16x8(-1, -2, -3, -4, -5, -6, -7, -8)) };
4339        assert_eq!(bytes, [-1, -2, -3, -4, -5, -6, -7, -8]);
4340        let bytes: [i8; 16] = unsafe {
4341            mem::transmute(i8x16(
4342                -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16,
4343            ))
4344        };
4345        assert_eq!(
4346            bytes,
4347            [
4348                -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16
4349            ]
4350        );
4351    }
4352
4353    #[test]
4354    fn test_shuffle() {
4355        let vec_a = i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4356        let vec_b = i8x16(
4357            16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
4358        );
4359
4360        let vec_r = i8x16_shuffle::<0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30>(
4361            vec_a, vec_b,
4362        );
4363        let vec_e = i8x16(0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
4364        compare_bytes(vec_r, vec_e);
4365
4366        let vec_a = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4367        let vec_b = i16x8(8, 9, 10, 11, 12, 13, 14, 15);
4368        let vec_r = i16x8_shuffle::<0, 8, 2, 10, 4, 12, 6, 14>(vec_a, vec_b);
4369        let vec_e = i16x8(0, 8, 2, 10, 4, 12, 6, 14);
4370        compare_bytes(vec_r, vec_e);
4371
4372        let vec_a = i32x4(0, 1, 2, 3);
4373        let vec_b = i32x4(4, 5, 6, 7);
4374        let vec_r = i32x4_shuffle::<0, 4, 2, 6>(vec_a, vec_b);
4375        let vec_e = i32x4(0, 4, 2, 6);
4376        compare_bytes(vec_r, vec_e);
4377
4378        let vec_a = i64x2(0, 1);
4379        let vec_b = i64x2(2, 3);
4380        let vec_r = i64x2_shuffle::<0, 2>(vec_a, vec_b);
4381        let vec_e = i64x2(0, 2);
4382        compare_bytes(vec_r, vec_e);
4383    }
4384
4385    macro_rules! test_extract {
4387        (
4388            name: $test_id:ident,
4389            extract: $extract:ident,
4390            replace: $replace:ident,
4391            elem: $elem:ty,
4392            count: $count:expr,
4393            indices: [$($idx:expr),*],
4394        ) => {
4395            #[test]
4396            fn $test_id() {
4397                unsafe {
4398                    let arr: [$elem; $count] = [123 as $elem; $count];
4399                    let vec: v128 = transmute(arr);
4400                    $(
4401                        assert_eq!($extract::<$idx>(vec), 123 as $elem);
4402                    )*
4403
4404                    let arr: [$elem; $count] = [$($idx as $elem),*];
4407                    let vec: v128 = transmute(arr);
4408                    $(
4409                        assert_eq!($extract::<$idx>(vec), $idx as $elem);
4410
4411                        let tmp = $replace::<$idx>(vec, 124 as $elem);
4412                        assert_eq!($extract::<$idx>(tmp), 124 as $elem);
4413                    )*
4414                }
4415            }
4416        }
4417    }
4418
4419    test_extract! {
4420        name: test_i8x16_extract_replace,
4421        extract: i8x16_extract_lane,
4422        replace: i8x16_replace_lane,
4423        elem: i8,
4424        count: 16,
4425        indices: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
4426    }
4427    test_extract! {
4428        name: test_i16x8_extract_replace,
4429        extract: i16x8_extract_lane,
4430        replace: i16x8_replace_lane,
4431        elem: i16,
4432        count: 8,
4433        indices: [0, 1, 2, 3, 4, 5, 6, 7],
4434    }
4435    test_extract! {
4436        name: test_i32x4_extract_replace,
4437        extract: i32x4_extract_lane,
4438        replace: i32x4_replace_lane,
4439        elem: i32,
4440        count: 4,
4441        indices: [0, 1, 2, 3],
4442    }
4443    test_extract! {
4444        name: test_i64x2_extract_replace,
4445        extract: i64x2_extract_lane,
4446        replace: i64x2_replace_lane,
4447        elem: i64,
4448        count: 2,
4449        indices: [0, 1],
4450    }
4451    test_extract! {
4452        name: test_f32x4_extract_replace,
4453        extract: f32x4_extract_lane,
4454        replace: f32x4_replace_lane,
4455        elem: f32,
4456        count: 4,
4457        indices: [0, 1, 2, 3],
4458    }
4459    test_extract! {
4460        name: test_f64x2_extract_replace,
4461        extract: f64x2_extract_lane,
4462        replace: f64x2_replace_lane,
4463        elem: f64,
4464        count: 2,
4465        indices: [0, 1],
4466    }
4467
4468    #[test]
4469    #[rustfmt::skip]
4470    fn test_swizzle() {
4471        compare_bytes(
4472            i8x16_swizzle(
4473                i32x4(1, 2, 3, 4),
4474                i8x16(
4475                    32, 31, 30, 29,
4476                    0, 1, 2, 3,
4477                    12, 13, 14, 15,
4478                    0, 4, 8, 12),
4479            ),
4480            i32x4(0, 1, 4, 0x04030201),
4481        );
4482    }
4483
4484    macro_rules! test_splat {
4485        ($test_id:ident: $val:expr => $($vals:expr),*) => {
4486            #[test]
4487            fn $test_id() {
4488                let a = super::$test_id($val);
4489                let b = u8x16($($vals as u8),*);
4490                compare_bytes(a, b);
4491            }
4492        }
4493    }
4494
4495    mod splats {
4496        use super::*;
4497        test_splat!(i8x16_splat: 42 => 42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42);
4498        test_splat!(i16x8_splat: 42 => 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0);
4499        test_splat!(i32x4_splat: 42 => 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0);
4500        test_splat!(i64x2_splat: 42 => 42, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0);
4501        test_splat!(f32x4_splat: 42. => 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66);
4502        test_splat!(f64x2_splat: 42. => 0, 0, 0, 0, 0, 0, 69, 64, 0, 0, 0, 0, 0, 0, 69, 64);
4503    }
4504
4505    #[test]
4506    fn test_bitmasks() {
4507        let zero = i8x16_splat(0);
4508        let ones = i8x16_splat(!0);
4509
4510        assert_eq!(i8x16_bitmask(zero), 0);
4511        assert_eq!(i8x16_bitmask(ones), 0xffff);
4512        assert_eq!(i8x16_bitmask(i8x16_splat(i8::MAX)), 0);
4513        assert_eq!(i8x16_bitmask(i8x16_splat(i8::MIN)), 0xffff);
4514        assert_eq!(i8x16_bitmask(i8x16_replace_lane::<1>(zero, -1)), 0b10);
4515
4516        assert_eq!(i16x8_bitmask(zero), 0);
4517        assert_eq!(i16x8_bitmask(ones), 0xff);
4518        assert_eq!(i16x8_bitmask(i16x8_splat(i16::MAX)), 0);
4519        assert_eq!(i16x8_bitmask(i16x8_splat(i16::MIN)), 0xff);
4520        assert_eq!(i16x8_bitmask(i16x8_replace_lane::<1>(zero, -1)), 0b10);
4521
4522        assert_eq!(i32x4_bitmask(zero), 0);
4523        assert_eq!(i32x4_bitmask(ones), 0b1111);
4524        assert_eq!(i32x4_bitmask(i32x4_splat(i32::MAX)), 0);
4525        assert_eq!(i32x4_bitmask(i32x4_splat(i32::MIN)), 0b1111);
4526        assert_eq!(i32x4_bitmask(i32x4_replace_lane::<1>(zero, -1)), 0b10);
4527
4528        assert_eq!(i64x2_bitmask(zero), 0);
4529        assert_eq!(i64x2_bitmask(ones), 0b11);
4530        assert_eq!(i64x2_bitmask(i64x2_splat(i64::MAX)), 0);
4531        assert_eq!(i64x2_bitmask(i64x2_splat(i64::MIN)), 0b11);
4532        assert_eq!(i64x2_bitmask(i64x2_replace_lane::<1>(zero, -1)), 0b10);
4533    }
4534
4535    #[test]
4536    fn test_narrow() {
4537        let zero = i8x16_splat(0);
4538        let ones = i8x16_splat(!0);
4539
4540        compare_bytes(i8x16_narrow_i16x8(zero, zero), zero);
4541        compare_bytes(u8x16_narrow_i16x8(zero, zero), zero);
4542        compare_bytes(i8x16_narrow_i16x8(ones, ones), ones);
4543        compare_bytes(u8x16_narrow_i16x8(ones, ones), zero);
4544
4545        compare_bytes(
4546            i8x16_narrow_i16x8(
4547                i16x8(
4548                    0,
4549                    1,
4550                    2,
4551                    -1,
4552                    i8::MIN.into(),
4553                    i8::MAX.into(),
4554                    u8::MIN.into(),
4555                    u8::MAX.into(),
4556                ),
4557                i16x8(
4558                    i16::MIN,
4559                    i16::MAX,
4560                    u16::MIN as i16,
4561                    u16::MAX as i16,
4562                    0,
4563                    0,
4564                    0,
4565                    0,
4566                ),
4567            ),
4568            i8x16(0, 1, 2, -1, -128, 127, 0, 127, -128, 127, 0, -1, 0, 0, 0, 0),
4569        );
4570
4571        compare_bytes(
4572            u8x16_narrow_i16x8(
4573                i16x8(
4574                    0,
4575                    1,
4576                    2,
4577                    -1,
4578                    i8::MIN.into(),
4579                    i8::MAX.into(),
4580                    u8::MIN.into(),
4581                    u8::MAX.into(),
4582                ),
4583                i16x8(
4584                    i16::MIN,
4585                    i16::MAX,
4586                    u16::MIN as i16,
4587                    u16::MAX as i16,
4588                    0,
4589                    0,
4590                    0,
4591                    0,
4592                ),
4593            ),
4594            i8x16(0, 1, 2, 0, 0, 127, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0),
4595        );
4596
4597        compare_bytes(i16x8_narrow_i32x4(zero, zero), zero);
4598        compare_bytes(u16x8_narrow_i32x4(zero, zero), zero);
4599        compare_bytes(i16x8_narrow_i32x4(ones, ones), ones);
4600        compare_bytes(u16x8_narrow_i32x4(ones, ones), zero);
4601
4602        compare_bytes(
4603            i16x8_narrow_i32x4(
4604                i32x4(0, -1, i16::MIN.into(), i16::MAX.into()),
4605                i32x4(i32::MIN, i32::MAX, u32::MIN as i32, u32::MAX as i32),
4606            ),
4607            i16x8(0, -1, i16::MIN, i16::MAX, i16::MIN, i16::MAX, 0, -1),
4608        );
4609
4610        compare_bytes(
4611            u16x8_narrow_i32x4(
4612                i32x4(u16::MAX.into(), -1, i16::MIN.into(), i16::MAX.into()),
4613                i32x4(i32::MIN, i32::MAX, u32::MIN as i32, u32::MAX as i32),
4614            ),
4615            i16x8(-1, 0, 0, i16::MAX, 0, -1, 0, 0),
4616        );
4617    }
4618
4619    #[test]
4620    fn test_extend() {
4621        let zero = i8x16_splat(0);
4622        let ones = i8x16_splat(!0);
4623
4624        compare_bytes(i16x8_extend_low_i8x16(zero), zero);
4625        compare_bytes(i16x8_extend_high_i8x16(zero), zero);
4626        compare_bytes(i16x8_extend_low_u8x16(zero), zero);
4627        compare_bytes(i16x8_extend_high_u8x16(zero), zero);
4628        compare_bytes(i16x8_extend_low_i8x16(ones), ones);
4629        compare_bytes(i16x8_extend_high_i8x16(ones), ones);
4630        let halves = u16x8_splat(u8::MAX.into());
4631        compare_bytes(i16x8_extend_low_u8x16(ones), halves);
4632        compare_bytes(i16x8_extend_high_u8x16(ones), halves);
4633
4634        compare_bytes(i32x4_extend_low_i16x8(zero), zero);
4635        compare_bytes(i32x4_extend_high_i16x8(zero), zero);
4636        compare_bytes(i32x4_extend_low_u16x8(zero), zero);
4637        compare_bytes(i32x4_extend_high_u16x8(zero), zero);
4638        compare_bytes(i32x4_extend_low_i16x8(ones), ones);
4639        compare_bytes(i32x4_extend_high_i16x8(ones), ones);
4640        let halves = u32x4_splat(u16::MAX.into());
4641        compare_bytes(i32x4_extend_low_u16x8(ones), halves);
4642        compare_bytes(i32x4_extend_high_u16x8(ones), halves);
4643
4644        compare_bytes(i64x2_extend_low_i32x4(zero), zero);
4645        compare_bytes(i64x2_extend_high_i32x4(zero), zero);
4646        compare_bytes(i64x2_extend_low_u32x4(zero), zero);
4647        compare_bytes(i64x2_extend_high_u32x4(zero), zero);
4648        compare_bytes(i64x2_extend_low_i32x4(ones), ones);
4649        compare_bytes(i64x2_extend_high_i32x4(ones), ones);
4650        let halves = i64x2_splat(u32::MAX.into());
4651        compare_bytes(u64x2_extend_low_u32x4(ones), halves);
4652        compare_bytes(u64x2_extend_high_u32x4(ones), halves);
4653    }
4654
4655    #[test]
4656    fn test_dot() {
4657        let zero = i8x16_splat(0);
4658        let ones = i8x16_splat(!0);
4659        let two = i32x4_splat(2);
4660        compare_bytes(i32x4_dot_i16x8(zero, zero), zero);
4661        compare_bytes(i32x4_dot_i16x8(ones, ones), two);
4662    }
4663
4664    macro_rules! test_binop {
4665        (
4666            $($name:ident => {
4667                $([$($vec1:tt)*] ($op:ident | $f:ident) [$($vec2:tt)*],)*
4668            })*
4669        ) => ($(
4670            #[test]
4671            fn $name() {
4672                unsafe {
4673                    $(
4674                        let v1 = [$($vec1)*];
4675                        let v2 = [$($vec2)*];
4676                        let v1_v128: v128 = mem::transmute(v1);
4677                        let v2_v128: v128 = mem::transmute(v2);
4678                        let v3_v128 = super::$f(v1_v128, v2_v128);
4679                        let mut v3 = [$($vec1)*];
4680                        let _ignore = v3;
4681                        v3 = mem::transmute(v3_v128);
4682
4683                        for (i, actual) in v3.iter().enumerate() {
4684                            let expected = v1[i].$op(v2[i]);
4685                            assert_eq!(*actual, expected);
4686                        }
4687                    )*
4688                }
4689            }
4690        )*)
4691    }
4692
4693    macro_rules! test_unop {
4694        (
4695            $($name:ident => {
4696                $(($op:ident | $f:ident) [$($vec1:tt)*],)*
4697            })*
4698        ) => ($(
4699            #[test]
4700            fn $name() {
4701                unsafe {
4702                    $(
4703                        let v1 = [$($vec1)*];
4704                        let v1_v128: v128 = mem::transmute(v1);
4705                        let v2_v128 = super::$f(v1_v128);
4706                        let mut v2 = [$($vec1)*];
4707                        let _ignore = v2;
4708                        v2 = mem::transmute(v2_v128);
4709
4710                        for (i, actual) in v2.iter().enumerate() {
4711                            let expected = v1[i].$op();
4712                            assert_eq!(*actual, expected);
4713                        }
4714                    )*
4715                }
4716            }
4717        )*)
4718    }
4719
4720    trait Avgr: Sized {
4721        fn avgr(self, other: Self) -> Self;
4722    }
4723
4724    macro_rules! impl_avgr {
4725        ($($i:ident)*) => ($(impl Avgr for $i {
4726            fn avgr(self, other: Self) -> Self {
4727                ((self as u64 + other as u64 + 1) / 2) as $i
4728            }
4729        })*)
4730    }
4731
4732    impl_avgr!(u8 u16);
4733
4734    test_binop! {
4735        test_i8x16_add => {
4736            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4737                (wrapping_add | i8x16_add)
4738            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4739
4740            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4741                (wrapping_add | i8x16_add)
4742            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4743
4744            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4745                (wrapping_add | i8x16_add)
4746            [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
4747        }
4748
4749        test_i8x16_add_sat_s => {
4750            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4751                (saturating_add | i8x16_add_sat)
4752            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4753
4754            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4755                (saturating_add | i8x16_add_sat)
4756            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4757
4758            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4759                (saturating_add | i8x16_add_sat)
4760            [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
4761        }
4762
4763        test_i8x16_add_sat_u => {
4764            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4765                (saturating_add | u8x16_add_sat)
4766            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4767
4768            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4769                (saturating_add | u8x16_add_sat)
4770            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4771
4772            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4773                (saturating_add | u8x16_add_sat)
4774            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4775        }
4776
4777        test_i8x16_sub => {
4778            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4779                (wrapping_sub | i8x16_sub)
4780            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4781
4782            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4783                (wrapping_sub | i8x16_sub)
4784            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4785
4786            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4787                (wrapping_sub | i8x16_sub)
4788            [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4789        }
4790
4791        test_i8x16_sub_sat_s => {
4792            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4793                (saturating_sub | i8x16_sub_sat)
4794            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4795
4796            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4797                (saturating_sub | i8x16_sub_sat)
4798            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4799
4800            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4801                (saturating_sub | i8x16_sub_sat)
4802            [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4803        }
4804
4805        test_i8x16_sub_sat_u => {
4806            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4807                (saturating_sub | u8x16_sub_sat)
4808            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4809
4810            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4811                (saturating_sub | u8x16_sub_sat)
4812            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4813
4814            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4815                (saturating_sub | u8x16_sub_sat)
4816            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4817        }
4818
4819        test_i8x16_min_s => {
4820            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4821                (min | i8x16_min)
4822            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4823
4824            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4825                (min | i8x16_min)
4826            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4827
4828            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4829                (min | i8x16_min)
4830            [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4831        }
4832
4833        test_i8x16_min_u => {
4834            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4835                (min | u8x16_min)
4836            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4837
4838            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4839                (min | u8x16_min)
4840            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4841
4842            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4843                (min | u8x16_min)
4844            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4845        }
4846
4847        test_i8x16_max_s => {
4848            [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4849                (max | i8x16_max)
4850            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4851
4852            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4853                (max | i8x16_max)
4854            [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4855
4856            [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4857                (max | i8x16_max)
4858            [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4859        }
4860
4861        test_i8x16_max_u => {
4862            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4863                (max | u8x16_max)
4864            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4865
4866            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4867                (max | u8x16_max)
4868            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4869
4870            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4871                (max | u8x16_max)
4872            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4873        }
4874
4875        test_i8x16_avgr_u => {
4876            [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4877                (avgr | u8x16_avgr)
4878            [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4879
4880            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4881                (avgr | u8x16_avgr)
4882            [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4883
4884            [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4885                (avgr | u8x16_avgr)
4886            [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4887        }
4888
4889        test_i16x8_add => {
4890            [0i16, 0, 0, 0, 0, 0, 0, 0]
4891                (wrapping_add | i16x8_add)
4892            [1i16, 1, 1, 1, 1, 1, 1, 1],
4893
4894            [1i16, 2, 3, 4, 5, 6, 7, 8]
4895                (wrapping_add | i16x8_add)
4896            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4897        }
4898
4899        test_i16x8_add_sat_s => {
4900            [0i16, 0, 0, 0, 0, 0, 0, 0]
4901                (saturating_add | i16x8_add_sat)
4902            [1i16, 1, 1, 1, 1, 1, 1, 1],
4903
4904            [1i16, 2, 3, 4, 5, 6, 7, 8]
4905                (saturating_add | i16x8_add_sat)
4906            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4907        }
4908
4909        test_i16x8_add_sat_u => {
4910            [0u16, 0, 0, 0, 0, 0, 0, 0]
4911                (saturating_add | u16x8_add_sat)
4912            [1u16, 1, 1, 1, 1, 1, 1, 1],
4913
4914            [1u16, 2, 3, 4, 5, 6, 7, 8]
4915                (saturating_add | u16x8_add_sat)
4916            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
4917        }
4918
4919        test_i16x8_sub => {
4920            [0i16, 0, 0, 0, 0, 0, 0, 0]
4921                (wrapping_sub | i16x8_sub)
4922            [1i16, 1, 1, 1, 1, 1, 1, 1],
4923
4924            [1i16, 2, 3, 4, 5, 6, 7, 8]
4925                (wrapping_sub | i16x8_sub)
4926            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4927        }
4928
4929        test_i16x8_sub_sat_s => {
4930            [0i16, 0, 0, 0, 0, 0, 0, 0]
4931                (saturating_sub | i16x8_sub_sat)
4932            [1i16, 1, 1, 1, 1, 1, 1, 1],
4933
4934            [1i16, 2, 3, 4, 5, 6, 7, 8]
4935                (saturating_sub | i16x8_sub_sat)
4936            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4937        }
4938
4939        test_i16x8_sub_sat_u => {
4940            [0u16, 0, 0, 0, 0, 0, 0, 0]
4941                (saturating_sub | u16x8_sub_sat)
4942            [1u16, 1, 1, 1, 1, 1, 1, 1],
4943
4944            [1u16, 2, 3, 4, 5, 6, 7, 8]
4945                (saturating_sub | u16x8_sub_sat)
4946            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
4947        }
4948
4949        test_i16x8_mul => {
4950            [0i16, 0, 0, 0, 0, 0, 0, 0]
4951                (wrapping_mul | i16x8_mul)
4952            [1i16, 1, 1, 1, 1, 1, 1, 1],
4953
4954            [1i16, 2, 3, 4, 5, 6, 7, 8]
4955                (wrapping_mul | i16x8_mul)
4956            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4957        }
4958
4959        test_i16x8_min_s => {
4960            [0i16, 0, 0, 0, 0, 0, 0, 0]
4961                (min | i16x8_min)
4962            [1i16, 1, 1, 1, 1, 1, 1, 1],
4963
4964            [1i16, 2, 3, 4, 5, 6, 7, 8]
4965                (min | i16x8_min)
4966            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4967        }
4968
4969        test_i16x8_min_u => {
4970            [0u16, 0, 0, 0, 0, 0, 0, 0]
4971                (min | u16x8_min)
4972            [1u16, 1, 1, 1, 1, 1, 1, 1],
4973
4974            [1u16, 2, 3, 4, 5, 6, 7, 8]
4975                (min | u16x8_min)
4976            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
4977        }
4978
4979        test_i16x8_max_s => {
4980            [0i16, 0, 0, 0, 0, 0, 0, 0]
4981                (max | i16x8_max)
4982            [1i16, 1, 1, 1, 1, 1, 1, 1],
4983
4984            [1i16, 2, 3, 4, 5, 6, 7, 8]
4985                (max | i16x8_max)
4986            [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4987        }
4988
4989        test_i16x8_max_u => {
4990            [0u16, 0, 0, 0, 0, 0, 0, 0]
4991                (max | u16x8_max)
4992            [1u16, 1, 1, 1, 1, 1, 1, 1],
4993
4994            [1u16, 2, 3, 4, 5, 6, 7, 8]
4995                (max | u16x8_max)
4996            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
4997        }
4998
4999        test_i16x8_avgr_u => {
5000            [0u16, 0, 0, 0, 0, 0, 0, 0]
5001                (avgr | u16x8_avgr)
5002            [1u16, 1, 1, 1, 1, 1, 1, 1],
5003
5004            [1u16, 2, 3, 4, 5, 6, 7, 8]
5005                (avgr | u16x8_avgr)
5006            [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5007        }
5008
5009        test_i32x4_add => {
5010            [0i32, 0, 0, 0] (wrapping_add | i32x4_add) [1, 2, 3, 4],
5011            [1i32, 1283, i32::MAX, i32::MIN]
5012                (wrapping_add | i32x4_add)
5013            [i32::MAX; 4],
5014        }
5015
5016        test_i32x4_sub => {
5017            [0i32, 0, 0, 0] (wrapping_sub | i32x4_sub) [1, 2, 3, 4],
5018            [1i32, 1283, i32::MAX, i32::MIN]
5019                (wrapping_sub | i32x4_sub)
5020            [i32::MAX; 4],
5021        }
5022
5023        test_i32x4_mul => {
5024            [0i32, 0, 0, 0] (wrapping_mul | i32x4_mul) [1, 2, 3, 4],
5025            [1i32, 1283, i32::MAX, i32::MIN]
5026                (wrapping_mul | i32x4_mul)
5027            [i32::MAX; 4],
5028        }
5029
5030        test_i32x4_min_s => {
5031            [0i32, 0, 0, 0] (min | i32x4_min) [1, 2, 3, 4],
5032            [1i32, 1283, i32::MAX, i32::MIN]
5033                (min | i32x4_min)
5034            [i32::MAX; 4],
5035        }
5036
5037        test_i32x4_min_u => {
5038            [0u32, 0, 0, 0] (min | u32x4_min) [1, 2, 3, 4],
5039            [1u32, 1283, i32::MAX as u32, i32::MIN as u32]
5040                (min | u32x4_min)
5041            [i32::MAX as u32; 4],
5042        }
5043
5044        test_i32x4_max_s => {
5045            [0i32, 0, 0, 0] (max | i32x4_max) [1, 2, 3, 4],
5046            [1i32, 1283, i32::MAX, i32::MIN]
5047                (max | i32x4_max)
5048            [i32::MAX; 4],
5049        }
5050
5051        test_i32x4_max_u => {
5052            [0u32, 0, 0, 0] (max | u32x4_max) [1, 2, 3, 4],
5053            [1u32, 1283, i32::MAX as u32, i32::MIN as u32]
5054                (max | u32x4_max)
5055            [i32::MAX as u32; 4],
5056        }
5057
5058        test_i64x2_add => {
5059            [0i64, 0] (wrapping_add | i64x2_add) [1, 2],
5060            [i64::MIN, i64::MAX] (wrapping_add | i64x2_add) [i64::MAX, i64::MIN],
5061            [i64::MAX; 2] (wrapping_add | i64x2_add) [i64::MAX; 2],
5062            [-4i64, -4] (wrapping_add | i64x2_add) [800, 939],
5063        }
5064
5065        test_i64x2_sub => {
5066            [0i64, 0] (wrapping_sub | i64x2_sub) [1, 2],
5067            [i64::MIN, i64::MAX] (wrapping_sub | i64x2_sub) [i64::MAX, i64::MIN],
5068            [i64::MAX; 2] (wrapping_sub | i64x2_sub) [i64::MAX; 2],
5069            [-4i64, -4] (wrapping_sub | i64x2_sub) [800, 939],
5070        }
5071
5072        test_i64x2_mul => {
5073            [0i64, 0] (wrapping_mul | i64x2_mul) [1, 2],
5074            [i64::MIN, i64::MAX] (wrapping_mul | i64x2_mul) [i64::MAX, i64::MIN],
5075            [i64::MAX; 2] (wrapping_mul | i64x2_mul) [i64::MAX; 2],
5076            [-4i64, -4] (wrapping_mul | i64x2_mul) [800, 939],
5077        }
5078
5079        test_f32x4_add => {
5080            [-1.0f32, 2.0, 3.0, 4.0] (add | f32x4_add) [1., 2., 0., 0.],
5081            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5082                (add | f32x4_add)
5083            [1., 2., 0., 0.],
5084        }
5085
5086        test_f32x4_sub => {
5087            [-1.0f32, 2.0, 3.0, 4.0] (sub | f32x4_sub) [1., 2., 0., 0.],
5088            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5089                (sub | f32x4_sub)
5090            [1., 2., 0., 0.],
5091        }
5092
5093        test_f32x4_mul => {
5094            [-1.0f32, 2.0, 3.0, 4.0] (mul | f32x4_mul) [1., 2., 0., 0.],
5095            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5096                (mul | f32x4_mul)
5097            [1., 2., 1., 0.],
5098        }
5099
5100        test_f32x4_div => {
5101            [-1.0f32, 2.0, 3.0, 4.0] (div | f32x4_div) [1., 2., 0., 0.],
5102            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5103                (div | f32x4_div)
5104            [1., 2., 0., 0.],
5105        }
5106
5107        test_f32x4_min => {
5108            [-1.0f32, 2.0, 3.0, 4.0] (min | f32x4_min) [1., 2., 0., 0.],
5109            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5110                (min | f32x4_min)
5111            [1., 2., 0., 0.],
5112        }
5113
5114        test_f32x4_max => {
5115            [-1.0f32, 2.0, 3.0, 4.0] (max | f32x4_max) [1., 2., 0., 0.],
5116            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5117                (max | f32x4_max)
5118            [1., 2., 0., 0.],
5119        }
5120
5121        test_f32x4_pmin => {
5122            [-1.0f32, 2.0, 3.0, 4.0] (min | f32x4_pmin) [1., 2., 0., 0.],
5123            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5124                (min | f32x4_pmin)
5125            [1., 2., 0., 0.],
5126        }
5127
5128        test_f32x4_pmax => {
5129            [-1.0f32, 2.0, 3.0, 4.0] (max | f32x4_pmax) [1., 2., 0., 0.],
5130            [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5131                (max | f32x4_pmax)
5132            [1., 2., 0., 0.],
5133        }
5134
5135        test_f64x2_add => {
5136            [-1.0f64, 2.0] (add | f64x2_add) [1., 2.],
5137            [f64::INFINITY, f64::NEG_INFINITY] (add | f64x2_add) [1., 2.],
5138        }
5139
5140        test_f64x2_sub => {
5141            [-1.0f64, 2.0] (sub | f64x2_sub) [1., 2.],
5142            [f64::INFINITY, f64::NEG_INFINITY] (sub | f64x2_sub) [1., 2.],
5143        }
5144
5145        test_f64x2_mul => {
5146            [-1.0f64, 2.0] (mul | f64x2_mul) [1., 2.],
5147            [f64::INFINITY, f64::NEG_INFINITY] (mul | f64x2_mul) [1., 2.],
5148        }
5149
5150        test_f64x2_div => {
5151            [-1.0f64, 2.0] (div | f64x2_div) [1., 2.],
5152            [f64::INFINITY, f64::NEG_INFINITY] (div | f64x2_div) [1., 2.],
5153        }
5154
5155        test_f64x2_min => {
5156            [-1.0f64, 2.0] (min | f64x2_min) [1., 2.],
5157            [f64::INFINITY, f64::NEG_INFINITY] (min | f64x2_min) [1., 2.],
5158        }
5159
5160        test_f64x2_max => {
5161            [-1.0f64, 2.0] (max | f64x2_max) [1., 2.],
5162            [f64::INFINITY, f64::NEG_INFINITY] (max | f64x2_max) [1., 2.],
5163        }
5164
5165        test_f64x2_pmin => {
5166            [-1.0f64, 2.0] (min | f64x2_pmin) [1., 2.],
5167            [f64::INFINITY, f64::NEG_INFINITY] (min | f64x2_pmin) [1., 2.],
5168        }
5169
5170        test_f64x2_pmax => {
5171            [-1.0f64, 2.0] (max | f64x2_pmax) [1., 2.],
5172            [f64::INFINITY, f64::NEG_INFINITY] (max | f64x2_pmax) [1., 2.],
5173        }
5174    }
5175
5176    test_unop! {
5177        test_i8x16_abs => {
5178            (wrapping_abs | i8x16_abs)
5179            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
5180
5181            (wrapping_abs | i8x16_abs)
5182            [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
5183
5184            (wrapping_abs | i8x16_abs)
5185            [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
5186        }
5187
5188        test_i8x16_neg => {
5189            (wrapping_neg | i8x16_neg)
5190            [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
5191
5192            (wrapping_neg | i8x16_neg)
5193            [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
5194
5195            (wrapping_neg | i8x16_neg)
5196            [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
5197        }
5198
5199        test_i16x8_abs => {
5200            (wrapping_abs | i16x8_abs) [1i16, 1, 1, 1, 1, 1, 1, 1],
5201            (wrapping_abs | i16x8_abs) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
5202        }
5203
5204        test_i16x8_neg => {
5205            (wrapping_neg | i16x8_neg) [1i16, 1, 1, 1, 1, 1, 1, 1],
5206            (wrapping_neg | i16x8_neg) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
5207        }
5208
5209        test_i32x4_abs => {
5210            (wrapping_abs | i32x4_abs) [1i32, 2, 3, 4],
5211            (wrapping_abs | i32x4_abs) [i32::MIN, i32::MAX, 0, 4],
5212        }
5213
5214        test_i32x4_neg => {
5215            (wrapping_neg | i32x4_neg) [1i32, 2, 3, 4],
5216            (wrapping_neg | i32x4_neg) [i32::MIN, i32::MAX, 0, 4],
5217        }
5218
5219        test_i64x2_abs => {
5220            (wrapping_abs | i64x2_abs) [1i64, 2],
5221            (wrapping_abs | i64x2_abs) [i64::MIN, i64::MAX],
5222        }
5223
5224        test_i64x2_neg => {
5225            (wrapping_neg | i64x2_neg) [1i64, 2],
5226            (wrapping_neg | i64x2_neg) [i64::MIN, i64::MAX],
5227        }
5228
5229        test_f32x4_ceil => {
5230            (ceil | f32x4_ceil) [1.0f32, 2., 2.5, 3.3],
5231            (ceil | f32x4_ceil) [0.0, -0.3, f32::INFINITY, -0.0],
5232        }
5233
5234        test_f32x4_floor => {
5235            (floor | f32x4_floor) [1.0f32, 2., 2.5, 3.3],
5236            (floor | f32x4_floor) [0.0, -0.3, f32::INFINITY, -0.0],
5237        }
5238
5239        test_f32x4_trunc => {
5240            (trunc | f32x4_trunc) [1.0f32, 2., 2.5, 3.3],
5241            (trunc | f32x4_trunc) [0.0, -0.3, f32::INFINITY, -0.0],
5242        }
5243
5244        test_f32x4_nearest => {
5245            (round | f32x4_nearest) [1.0f32, 2., 2.6, 3.3],
5246            (round | f32x4_nearest) [0.0, -0.3, f32::INFINITY, -0.0],
5247        }
5248
5249        test_f32x4_abs => {
5250            (abs | f32x4_abs) [1.0f32, 2., 2.6, 3.3],
5251            (abs | f32x4_abs) [0.0, -0.3, f32::INFINITY, -0.0],
5252        }
5253
5254        test_f32x4_neg => {
5255            (neg | f32x4_neg) [1.0f32, 2., 2.6, 3.3],
5256            (neg | f32x4_neg) [0.0, -0.3, f32::INFINITY, -0.0],
5257        }
5258
5259        test_f32x4_sqrt => {
5260            (sqrt | f32x4_sqrt) [1.0f32, 2., 2.6, 3.3],
5261            (sqrt | f32x4_sqrt) [0.0, 0.3, f32::INFINITY, 0.1],
5262        }
5263
5264        test_f64x2_ceil => {
5265            (ceil | f64x2_ceil) [1.0f64, 2.3],
5266            (ceil | f64x2_ceil) [f64::INFINITY, -0.1],
5267        }
5268
5269        test_f64x2_floor => {
5270            (floor | f64x2_floor) [1.0f64, 2.3],
5271            (floor | f64x2_floor) [f64::INFINITY, -0.1],
5272        }
5273
5274        test_f64x2_trunc => {
5275            (trunc | f64x2_trunc) [1.0f64, 2.3],
5276            (trunc | f64x2_trunc) [f64::INFINITY, -0.1],
5277        }
5278
5279        test_f64x2_nearest => {
5280            (round | f64x2_nearest) [1.0f64, 2.3],
5281            (round | f64x2_nearest) [f64::INFINITY, -0.1],
5282        }
5283
5284        test_f64x2_abs => {
5285            (abs | f64x2_abs) [1.0f64, 2.3],
5286            (abs | f64x2_abs) [f64::INFINITY, -0.1],
5287        }
5288
5289        test_f64x2_neg => {
5290            (neg | f64x2_neg) [1.0f64, 2.3],
5291            (neg | f64x2_neg) [f64::INFINITY, -0.1],
5292        }
5293
5294        test_f64x2_sqrt => {
5295            (sqrt | f64x2_sqrt) [1.0f64, 2.3],
5296            (sqrt | f64x2_sqrt) [f64::INFINITY, 0.1],
5297        }
5298    }
5299
5300    macro_rules! floating_point {
5301        (f32) => {
5302            true
5303        };
5304        (f64) => {
5305            true
5306        };
5307        ($id:ident) => {
5308            false
5309        };
5310    }
5311
5312    trait IsNan: Sized {
5313        fn is_nan(self) -> bool {
5314            false
5315        }
5316    }
5317    impl IsNan for i8 {}
5318    impl IsNan for i16 {}
5319    impl IsNan for i32 {}
5320    impl IsNan for i64 {}
5321
5322    macro_rules! test_bop {
5323         ($id:ident[$ety:ident; $ecount:expr] |
5324          $binary_op:ident [$op_test_id:ident] :
5325          ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
5326             test_bop!(
5327                 $id[$ety; $ecount] => $ety | $binary_op [ $op_test_id ]:
5328                 ([$($in_a),*], [$($in_b),*]) => [$($out),*]
5329             );
5330
5331         };
5332         ($id:ident[$ety:ident; $ecount:expr] => $oty:ident |
5333          $binary_op:ident [$op_test_id:ident] :
5334          ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
5335             #[test]
5336             fn $op_test_id() {
5337                 unsafe {
5338                     let a_input: [$ety; $ecount] = [$($in_a),*];
5339                     let b_input: [$ety; $ecount] = [$($in_b),*];
5340                     let output: [$oty; $ecount] = [$($out),*];
5341
5342                     let a_vec_in: v128 = transmute(a_input);
5343                     let b_vec_in: v128 = transmute(b_input);
5344                     let vec_res: v128 = $binary_op(a_vec_in, b_vec_in);
5345
5346                     let res: [$oty; $ecount] = transmute(vec_res);
5347
5348                     if !floating_point!($ety) {
5349                         assert_eq!(res, output);
5350                     } else {
5351                         for i in 0..$ecount {
5352                             let r = res[i];
5353                             let o = output[i];
5354                             assert_eq!(r.is_nan(), o.is_nan());
5355                             if !r.is_nan() {
5356                                 assert_eq!(r, o);
5357                             }
5358                         }
5359                     }
5360                 }
5361             }
5362         }
5363     }
5364
5365    macro_rules! test_bops {
5366         ($id:ident[$ety:ident; $ecount:expr] |
5367          $binary_op:ident [$op_test_id:ident]:
5368          ([$($in_a:expr),*], $in_b:expr) => [$($out:expr),*]) => {
5369             #[test]
5370             fn $op_test_id() {
5371                 unsafe {
5372                     let a_input: [$ety; $ecount] = [$($in_a),*];
5373                     let output: [$ety; $ecount] = [$($out),*];
5374
5375                     let a_vec_in: v128 = transmute(a_input);
5376                     let vec_res: v128 = $binary_op(a_vec_in, $in_b);
5377
5378                     let res: [$ety; $ecount] = transmute(vec_res);
5379                     assert_eq!(res, output);
5380                 }
5381             }
5382         }
5383     }
5384
5385    macro_rules! test_uop {
5386         ($id:ident[$ety:ident; $ecount:expr] |
5387          $unary_op:ident [$op_test_id:ident]: [$($in_a:expr),*] => [$($out:expr),*]) => {
5388             #[test]
5389             fn $op_test_id() {
5390                 unsafe {
5391                     let a_input: [$ety; $ecount] = [$($in_a),*];
5392                     let output: [$ety; $ecount] = [$($out),*];
5393
5394                     let a_vec_in: v128 = transmute(a_input);
5395                     let vec_res: v128 = $unary_op(a_vec_in);
5396
5397                     let res: [$ety; $ecount] = transmute(vec_res);
5398                     assert_eq!(res, output);
5399                 }
5400             }
5401         }
5402     }
5403
5404    test_bops!(i8x16[i8; 16] | i8x16_shl[i8x16_shl_test]:
5405               ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5406               [0, -2, 4, 6, 8, 10, 12, -2, 2, 2, 2, 2, 2, 2, 2, 2]);
5407    test_bops!(i16x8[i16; 8] | i16x8_shl[i16x8_shl_test]:
5408                ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5409                [0, -2, 4, 6, 8, 10, 12, -2]);
5410    test_bops!(i32x4[i32; 4] | i32x4_shl[i32x4_shl_test]:
5411                ([0, -1, 2, 3], 1) => [0, -2, 4, 6]);
5412    test_bops!(i64x2[i64; 2] | i64x2_shl[i64x2_shl_test]:
5413                ([0, -1], 1) => [0, -2]);
5414
5415    test_bops!(i8x16[i8; 16] | i8x16_shr[i8x16_shr_s_test]:
5416               ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5417               [0, -1, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
5418    test_bops!(i16x8[i16; 8] | i16x8_shr[i16x8_shr_s_test]:
5419               ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5420               [0, -1, 1, 1, 2, 2, 3, i16::MAX / 2]);
5421    test_bops!(i32x4[i32; 4] | i32x4_shr[i32x4_shr_s_test]:
5422               ([0, -1, 2, 3], 1) => [0, -1, 1, 1]);
5423    test_bops!(i64x2[i64; 2] | i64x2_shr[i64x2_shr_s_test]:
5424               ([0, -1], 1) => [0, -1]);
5425
5426    test_bops!(i8x16[i8; 16] | u8x16_shr[i8x16_uhr_u_test]:
5427                ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5428                [0, i8::MAX, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
5429    test_bops!(i16x8[i16; 8] | u16x8_shr[i16x8_uhr_u_test]:
5430                ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5431                [0, i16::MAX, 1, 1, 2, 2, 3, i16::MAX / 2]);
5432    test_bops!(i32x4[i32; 4] | u32x4_shr[i32x4_uhr_u_test]:
5433                ([0, -1, 2, 3], 1) => [0, i32::MAX, 1, 1]);
5434    test_bops!(i64x2[i64; 2] | u64x2_shr[i64x2_uhr_u_test]:
5435                ([0, -1], 1) => [0, i64::MAX]);
5436
5437    #[test]
5438    fn v128_bitwise_logical_ops() {
5439        unsafe {
5440            let a: [u32; 4] = [u32::MAX, 0, u32::MAX, 0];
5441            let b: [u32; 4] = [u32::MAX; 4];
5442            let c: [u32; 4] = [0; 4];
5443
5444            let vec_a: v128 = transmute(a);
5445            let vec_b: v128 = transmute(b);
5446            let vec_c: v128 = transmute(c);
5447
5448            let r: v128 = v128_and(vec_a, vec_a);
5449            compare_bytes(r, vec_a);
5450            let r: v128 = v128_and(vec_a, vec_b);
5451            compare_bytes(r, vec_a);
5452            let r: v128 = v128_andnot(vec_a, vec_b);
5453            compare_bytes(r, vec_c);
5454            let r: v128 = v128_andnot(vec_a, vec_a);
5455            compare_bytes(r, vec_c);
5456            let r: v128 = v128_andnot(vec_a, vec_c);
5457            compare_bytes(r, vec_a);
5458            let r: v128 = v128_or(vec_a, vec_b);
5459            compare_bytes(r, vec_b);
5460            let r: v128 = v128_not(vec_b);
5461            compare_bytes(r, vec_c);
5462            let r: v128 = v128_xor(vec_a, vec_c);
5463            compare_bytes(r, vec_a);
5464
5465            let r: v128 = v128_bitselect(vec_b, vec_c, vec_b);
5466            compare_bytes(r, vec_b);
5467            let r: v128 = v128_bitselect(vec_b, vec_c, vec_c);
5468            compare_bytes(r, vec_c);
5469            let r: v128 = v128_bitselect(vec_b, vec_c, vec_a);
5470            compare_bytes(r, vec_a);
5471        }
5472    }
5473
5474    macro_rules! test_bool_red {
5475         ([$test_id:ident, $any:ident, $all:ident] | [$($true:expr),*] | [$($false:expr),*] | [$($alt:expr),*]) => {
5476             #[test]
5477             fn $test_id() {
5478                 unsafe {
5479                     let vec_a: v128 = transmute([$($true),*]); let vec_b: v128 = transmute([$($false),*]); let vec_c: v128 = transmute([$($alt),*]); assert_eq!($all(vec_a), true);
5489                     assert_eq!($all(vec_b), false);
5490                     assert_eq!($all(vec_c), false);
5491                 }
5492             }
5493         }
5494     }
5495
5496    test_bool_red!(
5497        [i8x16_boolean_reductions, v128_any_true, i8x16_all_true]
5498            | [1_i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
5499            | [0_i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5500            | [1_i8, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
5501    );
5502    test_bool_red!(
5503        [i16x8_boolean_reductions, v128_any_true, i16x8_all_true]
5504            | [1_i16, 1, 1, 1, 1, 1, 1, 1]
5505            | [0_i16, 0, 0, 0, 0, 0, 0, 0]
5506            | [1_i16, 0, 1, 0, 1, 0, 1, 0]
5507    );
5508    test_bool_red!(
5509        [i32x4_boolean_reductions, v128_any_true, i32x4_all_true]
5510            | [1_i32, 1, 1, 1]
5511            | [0_i32, 0, 0, 0]
5512            | [1_i32, 0, 1, 0]
5513    );
5514    test_bool_red!(
5515        [i64x2_boolean_reductions, v128_any_true, i64x2_all_true]
5516            | [1_i64, 1]
5517            | [0_i64, 0]
5518            | [1_i64, 0]
5519    );
5520
5521    test_bop!(i8x16[i8; 16] | i8x16_eq[i8x16_eq_test]:
5522              ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5523               [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5524              [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5525    test_bop!(i16x8[i16; 8] | i16x8_eq[i16x8_eq_test]:
5526               ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5527               [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5528    test_bop!(i32x4[i32; 4] | i32x4_eq[i32x4_eq_test]:
5529               ([0, 1, 2, 3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
5530    test_bop!(i64x2[i64; 2] | i64x2_eq[i64x2_eq_test]:
5531               ([0, 1], [0, 2]) => [-1, 0]);
5532    test_bop!(f32x4[f32; 4] => i32 | f32x4_eq[f32x4_eq_test]:
5533               ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
5534    test_bop!(f64x2[f64; 2] => i64 | f64x2_eq[f64x2_eq_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
5535
5536    test_bop!(i8x16[i8; 16] | i8x16_ne[i8x16_ne_test]:
5537               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5538                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5539               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5540    test_bop!(i16x8[i16; 8] | i16x8_ne[i16x8_ne_test]:
5541               ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5542               [0, -1, 0, -1 ,0, -1, 0, 0]);
5543    test_bop!(i32x4[i32; 4] | i32x4_ne[i32x4_ne_test]:
5544               ([0, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
5545    test_bop!(i64x2[i64; 2] | i64x2_ne[i64x2_ne_test]:
5546               ([0, 1], [0, 2]) => [0, -1]);
5547    test_bop!(f32x4[f32; 4] => i32 | f32x4_ne[f32x4_ne_test]:
5548               ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
5549    test_bop!(f64x2[f64; 2] => i64 | f64x2_ne[f64x2_ne_test]: ([0., 1.], [0., 2.]) => [0, -1]);
5550
5551    test_bop!(i8x16[i8; 16] | i8x16_lt[i8x16_lt_s_test]:
5552               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -12, 13, 14, 15],
5553                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5554               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1, -1, -1, 0, 0]);
5555    test_bop!(i8x16[i8; 16] | u8x16_lt[i8x16_lt_u_test]:
5556               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -12, 13, 14, 15],
5557                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5558               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5559    test_bop!(i16x8[i16; 8] | i16x8_lt[i16x8_lt_s_test]:
5560               ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5561               [0, -1, 0, -1 ,0, -1, 0, -1]);
5562    test_bop!(i16x8[i16; 8] | u16x8_lt[i16x8_lt_u_test]:
5563               ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5564               [0, -1, 0, -1 ,0, -1, 0, 0]);
5565    test_bop!(i32x4[i32; 4] | i32x4_lt[i32x4_lt_s_test]:
5566               ([-1, 1, 2, 3], [0, 2, 2, 4]) => [-1, -1, 0, -1]);
5567    test_bop!(i32x4[i32; 4] | u32x4_lt[i32x4_lt_u_test]:
5568               ([-1, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
5569    test_bop!(i64x2[i64; 2] | i64x2_lt[i64x2_lt_s_test]:
5570               ([-1, 3], [0, 2]) => [-1, 0]);
5571    test_bop!(f32x4[f32; 4] => i32 | f32x4_lt[f32x4_lt_test]:
5572               ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
5573    test_bop!(f64x2[f64; 2] => i64 | f64x2_lt[f64x2_lt_test]: ([0., 1.], [0., 2.]) => [0, -1]);
5574
5575    test_bop!(i8x16[i8; 16] | i8x16_gt[i8x16_gt_s_test]:
5576           ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5577            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
5578               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5579    test_bop!(i8x16[i8; 16] | u8x16_gt[i8x16_gt_u_test]:
5580           ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5581            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
5582               [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, -1]);
5583    test_bop!(i16x8[i16; 8] | i16x8_gt[i16x8_gt_s_test]:
5584               ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5585               [0, -1, 0, -1 ,0, -1, 0, 0]);
5586    test_bop!(i16x8[i16; 8] | u16x8_gt[i16x8_gt_u_test]:
5587               ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5588               [0, -1, 0, -1 ,0, -1, 0, -1]);
5589    test_bop!(i32x4[i32; 4] | i32x4_gt[i32x4_gt_s_test]:
5590               ([0, 2, 2, -4], [0, 1, 2, 3]) => [0, -1, 0, 0]);
5591    test_bop!(i32x4[i32; 4] | u32x4_gt[i32x4_gt_u_test]:
5592               ([0, 2, 2, -4], [0, 1, 2, 3]) => [0, -1, 0, -1]);
5593    test_bop!(i64x2[i64; 2] | i64x2_gt[i64x2_gt_s_test]:
5594               ([-1, 2], [0, 1]) => [0, -1]);
5595    test_bop!(f32x4[f32; 4] => i32 | f32x4_gt[f32x4_gt_test]:
5596               ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [0, -1, 0, -1]);
5597    test_bop!(f64x2[f64; 2] => i64 | f64x2_gt[f64x2_gt_test]: ([0., 2.], [0., 1.]) => [0, -1]);
5598
5599    test_bop!(i8x16[i8; 16] | i8x16_ge[i8x16_ge_s_test]:
5600               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -15],
5601                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5602               [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, 0]);
5603    test_bop!(i8x16[i8; 16] | u8x16_ge[i8x16_ge_u_test]:
5604               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -15],
5605                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5606               [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5607    test_bop!(i16x8[i16; 8] | i16x8_ge[i16x8_ge_s_test]:
5608               ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5609               [-1, 0, -1, 0 ,-1, 0, -1, 0]);
5610    test_bop!(i16x8[i16; 8] | u16x8_ge[i16x8_ge_u_test]:
5611               ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5612               [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5613    test_bop!(i32x4[i32; 4] | i32x4_ge[i32x4_ge_s_test]:
5614               ([0, 1, 2, -3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
5615    test_bop!(i32x4[i32; 4] | u32x4_ge[i32x4_ge_u_test]:
5616               ([0, 1, 2, -3], [0, 2, 2, 4]) => [-1, 0, -1, -1]);
5617    test_bop!(i64x2[i64; 2] | i64x2_ge[i64x2_ge_s_test]:
5618               ([0, 1], [-1, 2]) => [-1, 0]);
5619    test_bop!(f32x4[f32; 4] => i32 | f32x4_ge[f32x4_ge_test]:
5620               ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
5621    test_bop!(f64x2[f64; 2] => i64 | f64x2_ge[f64x2_ge_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
5622
5623    test_bop!(i8x16[i8; 16] | i8x16_le[i8x16_le_s_test]:
5624               ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5625                [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
5626               ) =>
5627               [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5628    test_bop!(i8x16[i8; 16] | u8x16_le[i8x16_le_u_test]:
5629               ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5630                [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
5631               ) =>
5632               [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, 0]);
5633    test_bop!(i16x8[i16; 8] | i16x8_le[i16x8_le_s_test]:
5634               ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5635               [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5636    test_bop!(i16x8[i16; 8] | u16x8_le[i16x8_le_u_test]:
5637               ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5638               [-1, 0, -1, 0 ,-1, 0, -1, 0]);
5639    test_bop!(i32x4[i32; 4] | i32x4_le[i32x4_le_s_test]:
5640               ([0, 2, 2, -4], [0, 1, 2, 3]) => [-1, 0, -1, -1]);
5641    test_bop!(i32x4[i32; 4] | u32x4_le[i32x4_le_u_test]:
5642               ([0, 2, 2, -4], [0, 1, 2, 3]) => [-1, 0, -1, 0]);
5643    test_bop!(i64x2[i64; 2] | i64x2_le[i64x2_le_s_test]:
5644               ([0, 2], [0, 1]) => [-1, 0]);
5645    test_bop!(f32x4[f32; 4] => i32 | f32x4_le[f32x4_le_test]:
5646               ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [-1, 0, -1, -0]);
5647    test_bop!(f64x2[f64; 2] => i64 | f64x2_le[f64x2_le_test]: ([0., 2.], [0., 1.]) => [-1, 0]);
5648
5649    test_uop!(f32x4[f32; 4] | f32x4_neg[f32x4_neg_test]: [0., 1., 2., 3.] => [ 0., -1., -2., -3.]);
5650    test_uop!(f32x4[f32; 4] | f32x4_abs[f32x4_abs_test]: [0., -1., 2., -3.] => [ 0., 1., 2., 3.]);
5651    test_bop!(f32x4[f32; 4] | f32x4_min[f32x4_min_test]:
5652              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., -3., -4., 8.]);
5653    test_bop!(f32x4[f32; 4] | f32x4_min[f32x4_min_test_nan]:
5654              ([0., -1., 7., 8.], [1., -3., -4., f32::NAN])
5655              => [0., -3., -4., f32::NAN]);
5656    test_bop!(f32x4[f32; 4] | f32x4_max[f32x4_max_test]:
5657              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -1., 7., 10.]);
5658    test_bop!(f32x4[f32; 4] | f32x4_max[f32x4_max_test_nan]:
5659              ([0., -1., 7., 8.], [1., -3., -4., f32::NAN])
5660              => [1., -1., 7., f32::NAN]);
5661    test_bop!(f32x4[f32; 4] | f32x4_add[f32x4_add_test]:
5662              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -4., 3., 18.]);
5663    test_bop!(f32x4[f32; 4] | f32x4_sub[f32x4_sub_test]:
5664              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [-1., 2., 11., -2.]);
5665    test_bop!(f32x4[f32; 4] | f32x4_mul[f32x4_mul_test]:
5666              ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., 3., -28., 80.]);
5667    test_bop!(f32x4[f32; 4] | f32x4_div[f32x4_div_test]:
5668              ([0., -8., 70., 8.], [1., 4., 10., 2.]) => [0., -2., 7., 4.]);
5669
5670    test_uop!(f64x2[f64; 2] | f64x2_neg[f64x2_neg_test]: [0., 1.] => [ 0., -1.]);
5671    test_uop!(f64x2[f64; 2] | f64x2_abs[f64x2_abs_test]: [0., -1.] => [ 0., 1.]);
5672    test_bop!(f64x2[f64; 2] | f64x2_min[f64x2_min_test]:
5673               ([0., -1.], [1., -3.]) => [0., -3.]);
5674    test_bop!(f64x2[f64; 2] | f64x2_min[f64x2_min_test_nan]:
5675               ([7., 8.], [-4., f64::NAN])
5676               => [ -4., f64::NAN]);
5677    test_bop!(f64x2[f64; 2] | f64x2_max[f64x2_max_test]:
5678               ([0., -1.], [1., -3.]) => [1., -1.]);
5679    test_bop!(f64x2[f64; 2] | f64x2_max[f64x2_max_test_nan]:
5680               ([7., 8.], [ -4., f64::NAN])
5681               => [7., f64::NAN]);
5682    test_bop!(f64x2[f64; 2] | f64x2_add[f64x2_add_test]:
5683               ([0., -1.], [1., -3.]) => [1., -4.]);
5684    test_bop!(f64x2[f64; 2] | f64x2_sub[f64x2_sub_test]:
5685               ([0., -1.], [1., -3.]) => [-1., 2.]);
5686    test_bop!(f64x2[f64; 2] | f64x2_mul[f64x2_mul_test]:
5687               ([0., -1.], [1., -3.]) => [0., 3.]);
5688    test_bop!(f64x2[f64; 2] | f64x2_div[f64x2_div_test]:
5689               ([0., -8.], [1., 4.]) => [0., -2.]);
5690
5691    macro_rules! test_conv {
5692        ($test_id:ident | $conv_id:ident | $to_ty:ident | $from:expr,  $to:expr) => {
5693            #[test]
5694            fn $test_id() {
5695                unsafe {
5696                    let from: v128 = transmute($from);
5697                    let to: v128 = transmute($to);
5698
5699                    let r: v128 = $conv_id(from);
5700
5701                    compare_bytes(r, to);
5702                }
5703            }
5704        };
5705    }
5706
5707    test_conv!(
5708        f32x4_convert_s_i32x4 | f32x4_convert_i32x4 | f32x4 | [1_i32, 2, 3, 4],
5709        [1_f32, 2., 3., 4.]
5710    );
5711    test_conv!(
5712        f32x4_convert_u_i32x4 | f32x4_convert_u32x4 | f32x4 | [u32::MAX, 2, 3, 4],
5713        [u32::MAX as f32, 2., 3., 4.]
5714    );
5715
5716    #[test]
5717    fn test_conversions() {
5718        compare_bytes(
5719            i32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)),
5720            i32x4(1, i32::MIN, i32::MAX, 0),
5721        );
5722        compare_bytes(
5723            u32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)),
5724            u32x4(1, 0, u32::MAX, 0),
5725        );
5726        compare_bytes(f64x2_convert_low_i32x4(i32x4(1, 2, 3, 4)), f64x2(1., 2.));
5727        compare_bytes(
5728            f64x2_convert_low_i32x4(i32x4(i32::MIN, i32::MAX, 3, 4)),
5729            f64x2(f64::from(i32::MIN), f64::from(i32::MAX)),
5730        );
5731        compare_bytes(f64x2_convert_low_u32x4(u32x4(1, 2, 3, 4)), f64x2(1., 2.));
5732        compare_bytes(
5733            f64x2_convert_low_u32x4(u32x4(u32::MIN, u32::MAX, 3, 4)),
5734            f64x2(f64::from(u32::MIN), f64::from(u32::MAX)),
5735        );
5736
5737        compare_bytes(
5738            i32x4_trunc_sat_f64x2_zero(f64x2(1., f64::NEG_INFINITY)),
5739            i32x4(1, i32::MIN, 0, 0),
5740        );
5741        compare_bytes(
5742            i32x4_trunc_sat_f64x2_zero(f64x2(f64::NAN, f64::INFINITY)),
5743            i32x4(0, i32::MAX, 0, 0),
5744        );
5745        compare_bytes(
5746            u32x4_trunc_sat_f64x2_zero(f64x2(1., f64::NEG_INFINITY)),
5747            u32x4(1, 0, 0, 0),
5748        );
5749        compare_bytes(
5750            u32x4_trunc_sat_f64x2_zero(f64x2(f64::NAN, f64::INFINITY)),
5751            u32x4(0, u32::MAX, 0, 0),
5752        );
5753    }
5754
5755    #[test]
5756    fn test_popcnt() {
5757        unsafe {
5758            for i in 0..=255 {
5759                compare_bytes(
5760                    i8x16_popcnt(u8x16_splat(i)),
5761                    u8x16_splat(i.count_ones() as u8),
5762                )
5763            }
5764
5765            let vectors = [
5766                [0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5767                [
5768                    100, 200, 50, 0, 10, 7, 38, 185, 192, 3, 34, 85, 93, 7, 31, 99,
5769                ],
5770            ];
5771
5772            for vector in vectors.iter() {
5773                compare_bytes(
5774                    i8x16_popcnt(transmute(*vector)),
5775                    i8x16(
5776                        vector[0].count_ones() as i8,
5777                        vector[1].count_ones() as i8,
5778                        vector[2].count_ones() as i8,
5779                        vector[3].count_ones() as i8,
5780                        vector[4].count_ones() as i8,
5781                        vector[5].count_ones() as i8,
5782                        vector[6].count_ones() as i8,
5783                        vector[7].count_ones() as i8,
5784                        vector[8].count_ones() as i8,
5785                        vector[9].count_ones() as i8,
5786                        vector[10].count_ones() as i8,
5787                        vector[11].count_ones() as i8,
5788                        vector[12].count_ones() as i8,
5789                        vector[13].count_ones() as i8,
5790                        vector[14].count_ones() as i8,
5791                        vector[15].count_ones() as i8,
5792                    ),
5793                )
5794            }
5795        }
5796    }
5797
5798    #[test]
5799    fn test_promote_demote() {
5800        let tests = [
5801            [1., 2.],
5802            [f64::NAN, f64::INFINITY],
5803            [100., 201.],
5804            [0., -0.],
5805            [f64::NEG_INFINITY, 0.],
5806        ];
5807
5808        for [a, b] in tests {
5809            compare_bytes(
5810                f32x4_demote_f64x2_zero(f64x2(a, b)),
5811                f32x4(a as f32, b as f32, 0., 0.),
5812            );
5813            compare_bytes(
5814                f64x2_promote_low_f32x4(f32x4(a as f32, b as f32, 0., 0.)),
5815                f64x2(a, b),
5816            );
5817        }
5818    }
5819
5820    #[test]
5821    fn test_extmul() {
5822        macro_rules! test {
5823            ($(
5824                $ctor:ident {
5825                    from: $from:ident,
5826                    to: $to:ident,
5827                    low: $low:ident,
5828                    high: $high:ident,
5829                } => {
5830                    $(([$($a:tt)*] * [$($b:tt)*]))*
5831                }
5832            )*) => ($(
5833                $(unsafe {
5834                    let a: [$from; 16 / mem::size_of::<$from>()] = [$($a)*];
5835                    let b: [$from; 16 / mem::size_of::<$from>()] = [$($b)*];
5836                    let low = mem::transmute::<_, [$to; 16 / mem::size_of::<$to>()]>($low($ctor($($a)*), $ctor($($b)*)));
5837                    let high = mem::transmute::<_, [$to; 16 / mem::size_of::<$to>()]>($high($ctor($($a)*), $ctor($($b)*)));
5838
5839                    let half = a.len() / 2;
5840                    for i in 0..half {
5841                        assert_eq!(
5842                            (a[i] as $to).wrapping_mul((b[i] as $to)),
5843                            low[i],
5844                            "expected {} * {}", a[i] as $to, b[i] as $to,
5845                        );
5846                        assert_eq!(
5847                            (a[half + i] as $to).wrapping_mul((b[half + i] as $to)),
5848                            high[i],
5849                            "expected {} * {}", a[half + i] as $to, b[half + i] as $to,
5850                        );
5851                    }
5852                })*
5853            )*)
5854        }
5855        test! {
5856            i8x16 {
5857                from: i8,
5858                to: i16,
5859                low: i16x8_extmul_low_i8x16,
5860                high: i16x8_extmul_high_i8x16,
5861            } => {
5862                (
5863                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5864                        *
5865                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5866                )
5867                (
5868                    [-1, -2, 3, 100, 124, -38, 33, 87, 92, 108, 22, 8, -43, -128, 22, 0]
5869                        *
5870                    [-5, -2, 6, 10, 45, -4, 4, -2, 0, 88, 92, -102, -98, 83, 73, 54]
5871                )
5872            }
5873            u8x16 {
5874                from: u8,
5875                to: u16,
5876                low: u16x8_extmul_low_u8x16,
5877                high: u16x8_extmul_high_u8x16,
5878            } => {
5879                (
5880                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5881                        *
5882                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5883                )
5884                (
5885                    [1, 2, 3, 100, 124, 38, 33, 87, 92, 198, 22, 8, 43, 128, 22, 0]
5886                        *
5887                    [5, 200, 6, 10, 45, 248, 4, 2, 0, 2, 92, 102, 234, 83, 73, 54]
5888                )
5889            }
5890            i16x8 {
5891                from: i16,
5892                to: i32,
5893                low: i32x4_extmul_low_i16x8,
5894                high: i32x4_extmul_high_i16x8,
5895            } => {
5896                (
5897                    [0, 0, 0, 0, 0, 0, 0, 0]
5898                        *
5899                    [0, 0, 0, 0, 0, 0, 0, 0]
5900                )
5901                (
5902                    [-1, 0, i16::MAX, 19931, -2259, 64, 200, 87]
5903                        *
5904                    [1, 1, i16::MIN, 29391, 105, 2, 100, -2]
5905                )
5906            }
5907            u16x8 {
5908                from: u16,
5909                to: u32,
5910                low: u32x4_extmul_low_u16x8,
5911                high: u32x4_extmul_high_u16x8,
5912            } => {
5913                (
5914                    [0, 0, 0, 0, 0, 0, 0, 0]
5915                        *
5916                    [0, 0, 0, 0, 0, 0, 0, 0]
5917                )
5918                (
5919                    [1, 0, u16::MAX, 19931, 2259, 64, 200, 87]
5920                        *
5921                    [1, 1, 3, 29391, 105, 2, 100, 2]
5922                )
5923            }
5924            i32x4 {
5925                from: i32,
5926                to: i64,
5927                low: i64x2_extmul_low_i32x4,
5928                high: i64x2_extmul_high_i32x4,
5929            } => {
5930                (
5931                    [0, 0, 0, 0]
5932                        *
5933                    [0, 0, 0, 0]
5934                )
5935                (
5936                    [-1, 0, i32::MAX, 19931]
5937                        *
5938                    [1, 1, i32::MIN, 29391]
5939                )
5940                (
5941                    [i32::MAX, 3003183, 3 << 20, 0xffffff]
5942                        *
5943                    [i32::MAX, i32::MIN, -40042, 300]
5944                )
5945            }
5946            u32x4 {
5947                from: u32,
5948                to: u64,
5949                low: u64x2_extmul_low_u32x4,
5950                high: u64x2_extmul_high_u32x4,
5951            } => {
5952                (
5953                    [0, 0, 0, 0]
5954                        *
5955                    [0, 0, 0, 0]
5956                )
5957                (
5958                    [1, 0, u32::MAX, 19931]
5959                        *
5960                    [1, 1, 3, 29391]
5961                )
5962                (
5963                    [u32::MAX, 3003183, 3 << 20, 0xffffff]
5964                        *
5965                    [u32::MAX, 3000, 40042, 300]
5966                )
5967            }
5968        }
5969    }
5970
5971    #[test]
5972    fn test_q15mulr_sat_s() {
5973        fn test(a: [i16; 8], b: [i16; 8]) {
5974            let a_v = i16x8(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7]);
5975            let b_v = i16x8(b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
5976            let result = i16x8_q15mulr_sat(a_v, b_v);
5977            let result = unsafe { mem::transmute::<v128, [i16; 8]>(result) };
5978
5979            for (i, (a, b)) in a.iter().zip(&b).enumerate() {
5980                assert_eq!(
5981                    result[i],
5982                    (((*a as i32) * (*b as i32) + 0x4000) >> 15) as i16
5983                );
5984            }
5985        }
5986
5987        test([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]);
5988        test([1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1]);
5989        test(
5990            [-1, 100, 2003, -29494, 12, 128, 994, 1],
5991            [-4049, 8494, -10483, 0, 5, 2222, 883, -9],
5992        );
5993    }
5994
5995    #[test]
5996    fn test_extadd() {
5997        macro_rules! test {
5998            ($(
5999                $func:ident {
6000                    from: $from:ident,
6001                    to: $to:ident,
6002                } => {
6003                    $([$($a:tt)*])*
6004                }
6005            )*) => ($(
6006                $(unsafe {
6007                    let a: [$from; 16 / mem::size_of::<$from>()] = [$($a)*];
6008                    let a_v = mem::transmute::<_, v128>(a);
6009                    let r = mem::transmute::<v128, [$to; 16 / mem::size_of::<$to>()]>($func(a_v));
6010
6011                    let half = a.len() / 2;
6012                    for i in 0..half {
6013                        assert_eq!(
6014                            (a[2 * i] as $to).wrapping_add((a[2 * i + 1] as $to)),
6015                            r[i],
6016                            "failed {} + {} != {}",
6017                            a[2 * i] as $to,
6018                            a[2 * i + 1] as $to,
6019                            r[i],
6020                        );
6021                    }
6022                })*
6023            )*)
6024        }
6025        test! {
6026            i16x8_extadd_pairwise_i8x16 {
6027                from: i8,
6028                to: i16,
6029            } => {
6030                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
6031                [-1, -2, 3, 100, 124, -38, 33, 87, 92, 108, 22, 8, -43, -128, 22, 0]
6032                [-5, -2, 6, 10, 45, -4, 4, -2, 0, 88, 92, -102, -98, 83, 73, 54]
6033            }
6034            i16x8_extadd_pairwise_u8x16 {
6035                from: u8,
6036                to: i16,
6037            } => {
6038                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
6039                [1, 2, 3, 100, 124, 38, 33, 87, 92, 198, 22, 8, 43, 128, 22, 0]
6040                [5, 200, 6, 10, 45, 248, 4, 2, 0, 2, 92, 102, 234, 83, 73, 54]
6041            }
6042            i32x4_extadd_pairwise_i16x8 {
6043                from: i16,
6044                to: i32,
6045            } => {
6046                [0, 0, 0, 0, 0, 0, 0, 0]
6047                [-1, 0, i16::MAX, 19931, -2259, 64, 200, 87]
6048                [1, 1, i16::MIN, 29391, 105, 2, 100, -2]
6049            }
6050            i32x4_extadd_pairwise_u16x8 {
6051                from: u16,
6052                to: i32,
6053            } => {
6054                [0, 0, 0, 0, 0, 0, 0, 0]
6055                [1, 0, u16::MAX, 19931, 2259, 64, 200, 87]
6056                [1, 1, 3, 29391, 105, 2, 100, 2]
6057            }
6058        }
6059    }
6060}