Skip to main content

core/stdarch/crates/core_arch/src/aarch64/neon/
generated.rs

1// This code is automatically generated. DO NOT MODIFY.
2//
3// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file:
4//
5// ```
6// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec
7// ```
8#![allow(improper_ctypes)]
9
10#[cfg(test)]
11use stdarch_test::assert_instr;
12
13use super::*;
14
15#[doc = "CRC32-C single round checksum for quad words (64 bits)."]
16#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"]
17#[inline(always)]
18#[target_feature(enable = "crc")]
19#[cfg_attr(test, assert_instr(crc32cx))]
20#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
21pub fn __crc32cd(crc: u32, data: u64) -> u32 {
22    unsafe extern "unadjusted" {
23        #[cfg_attr(
24            any(target_arch = "aarch64", target_arch = "arm64ec"),
25            link_name = "llvm.aarch64.crc32cx"
26        )]
27        fn ___crc32cd(crc: u32, data: u64) -> u32;
28    }
29    unsafe { ___crc32cd(crc, data) }
30}
31#[doc = "CRC32 single round checksum for quad words (64 bits)."]
32#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"]
33#[inline(always)]
34#[target_feature(enable = "crc")]
35#[cfg_attr(test, assert_instr(crc32x))]
36#[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")]
37pub fn __crc32d(crc: u32, data: u64) -> u32 {
38    unsafe extern "unadjusted" {
39        #[cfg_attr(
40            any(target_arch = "aarch64", target_arch = "arm64ec"),
41            link_name = "llvm.aarch64.crc32x"
42        )]
43        fn ___crc32d(crc: u32, data: u64) -> u32;
44    }
45    unsafe { ___crc32d(crc, data) }
46}
47#[doc = "Floating-point JavaScript convert to signed fixed-point, rounding toward zero"]
48#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__jcvt)"]
49#[inline(always)]
50#[target_feature(enable = "jsconv")]
51#[cfg_attr(test, assert_instr(fjcvtzs))]
52#[stable(feature = "stdarch_aarch64_jscvt", since = "CURRENT_RUSTC_VERSION")]
53pub fn __jcvt(a: f64) -> i32 {
54    unsafe extern "unadjusted" {
55        #[cfg_attr(
56            any(target_arch = "aarch64", target_arch = "arm64ec"),
57            link_name = "llvm.aarch64.fjcvtzs"
58        )]
59        fn ___jcvt(a: f64) -> i32;
60    }
61    unsafe { ___jcvt(a) }
62}
63#[doc = "Signed Absolute difference and Accumulate Long"]
64#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"]
65#[inline(always)]
66#[target_feature(enable = "neon")]
67#[stable(feature = "neon_intrinsics", since = "1.59.0")]
68#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
69pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
70    unsafe {
71        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
72        let e: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
73        let f: int8x8_t = vabd_s8(d, e);
74        let f: uint8x8_t = simd_cast(f);
75        simd_add(a, simd_cast(f))
76    }
77}
78#[doc = "Signed Absolute difference and Accumulate Long"]
79#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"]
80#[inline(always)]
81#[target_feature(enable = "neon")]
82#[stable(feature = "neon_intrinsics", since = "1.59.0")]
83#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
84pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
85    unsafe {
86        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
87        let e: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
88        let f: int16x4_t = vabd_s16(d, e);
89        let f: uint16x4_t = simd_cast(f);
90        simd_add(a, simd_cast(f))
91    }
92}
93#[doc = "Signed Absolute difference and Accumulate Long"]
94#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"]
95#[inline(always)]
96#[target_feature(enable = "neon")]
97#[stable(feature = "neon_intrinsics", since = "1.59.0")]
98#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))]
99pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
100    unsafe {
101        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
102        let e: int32x2_t = simd_shuffle!(c, c, [2, 3]);
103        let f: int32x2_t = vabd_s32(d, e);
104        let f: uint32x2_t = simd_cast(f);
105        simd_add(a, simd_cast(f))
106    }
107}
108#[doc = "Unsigned Absolute difference and Accumulate Long"]
109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"]
110#[inline(always)]
111#[target_feature(enable = "neon")]
112#[stable(feature = "neon_intrinsics", since = "1.59.0")]
113#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
114pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
115    unsafe {
116        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
117        let e: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
118        let f: uint8x8_t = vabd_u8(d, e);
119        simd_add(a, simd_cast(f))
120    }
121}
122#[doc = "Unsigned Absolute difference and Accumulate Long"]
123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"]
124#[inline(always)]
125#[target_feature(enable = "neon")]
126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
128pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
129    unsafe {
130        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
131        let e: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
132        let f: uint16x4_t = vabd_u16(d, e);
133        simd_add(a, simd_cast(f))
134    }
135}
136#[doc = "Unsigned Absolute difference and Accumulate Long"]
137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"]
138#[inline(always)]
139#[target_feature(enable = "neon")]
140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
141#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))]
142pub fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
143    unsafe {
144        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
145        let e: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
146        let f: uint32x2_t = vabd_u32(d, e);
147        simd_add(a, simd_cast(f))
148    }
149}
150#[doc = "Absolute difference between the arguments of Floating"]
151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"]
152#[inline(always)]
153#[target_feature(enable = "neon")]
154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
155#[cfg_attr(test, assert_instr(fabd))]
156pub fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
157    unsafe extern "unadjusted" {
158        #[cfg_attr(
159            any(target_arch = "aarch64", target_arch = "arm64ec"),
160            link_name = "llvm.aarch64.neon.fabd.v1f64"
161        )]
162        fn _vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
163    }
164    unsafe { _vabd_f64(a, b) }
165}
166#[doc = "Absolute difference between the arguments of Floating"]
167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"]
168#[inline(always)]
169#[target_feature(enable = "neon")]
170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
171#[cfg_attr(test, assert_instr(fabd))]
172pub fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
173    unsafe extern "unadjusted" {
174        #[cfg_attr(
175            any(target_arch = "aarch64", target_arch = "arm64ec"),
176            link_name = "llvm.aarch64.neon.fabd.v2f64"
177        )]
178        fn _vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
179    }
180    unsafe { _vabdq_f64(a, b) }
181}
182#[doc = "Floating-point absolute difference"]
183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"]
184#[inline(always)]
185#[target_feature(enable = "neon")]
186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
187#[cfg_attr(test, assert_instr(fabd))]
188pub fn vabdd_f64(a: f64, b: f64) -> f64 {
189    unsafe { simd_extract!(vabd_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
190}
191#[doc = "Floating-point absolute difference"]
192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"]
193#[inline(always)]
194#[target_feature(enable = "neon")]
195#[stable(feature = "neon_intrinsics", since = "1.59.0")]
196#[cfg_attr(test, assert_instr(fabd))]
197pub fn vabds_f32(a: f32, b: f32) -> f32 {
198    unsafe { simd_extract!(vabd_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
199}
200#[doc = "Floating-point absolute difference"]
201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdh_f16)"]
202#[inline(always)]
203#[target_feature(enable = "neon,fp16")]
204#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
205#[cfg(not(target_arch = "arm64ec"))]
206#[cfg_attr(test, assert_instr(fabd))]
207pub fn vabdh_f16(a: f16, b: f16) -> f16 {
208    unsafe { simd_extract!(vabd_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
209}
210#[doc = "Signed Absolute difference Long"]
211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"]
212#[inline(always)]
213#[target_feature(enable = "neon")]
214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
215#[cfg_attr(test, assert_instr(sabdl2))]
216pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
217    unsafe {
218        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
219        let d: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
220        let e: uint16x4_t = simd_cast(vabd_s16(c, d));
221        simd_cast(e)
222    }
223}
224#[doc = "Signed Absolute difference Long"]
225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"]
226#[inline(always)]
227#[target_feature(enable = "neon")]
228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
229#[cfg_attr(test, assert_instr(sabdl2))]
230pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
231    unsafe {
232        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
233        let d: int32x2_t = simd_shuffle!(b, b, [2, 3]);
234        let e: uint32x2_t = simd_cast(vabd_s32(c, d));
235        simd_cast(e)
236    }
237}
238#[doc = "Signed Absolute difference Long"]
239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"]
240#[inline(always)]
241#[target_feature(enable = "neon")]
242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
243#[cfg_attr(test, assert_instr(sabdl2))]
244pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
245    unsafe {
246        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
247        let d: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
248        let e: uint8x8_t = simd_cast(vabd_s8(c, d));
249        simd_cast(e)
250    }
251}
252#[doc = "Unsigned Absolute difference Long"]
253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"]
254#[inline(always)]
255#[target_feature(enable = "neon")]
256#[cfg_attr(test, assert_instr(uabdl2))]
257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
258pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
259    unsafe {
260        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
261        let d: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
262        simd_cast(vabd_u8(c, d))
263    }
264}
265#[doc = "Unsigned Absolute difference Long"]
266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"]
267#[inline(always)]
268#[target_feature(enable = "neon")]
269#[cfg_attr(test, assert_instr(uabdl2))]
270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
271pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
272    unsafe {
273        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
274        let d: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
275        simd_cast(vabd_u16(c, d))
276    }
277}
278#[doc = "Unsigned Absolute difference Long"]
279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"]
280#[inline(always)]
281#[target_feature(enable = "neon")]
282#[cfg_attr(test, assert_instr(uabdl2))]
283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
284pub fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
285    unsafe {
286        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
287        let d: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
288        simd_cast(vabd_u32(c, d))
289    }
290}
291#[doc = "Floating-point absolute value"]
292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"]
293#[inline(always)]
294#[target_feature(enable = "neon")]
295#[cfg_attr(test, assert_instr(fabs))]
296#[stable(feature = "neon_intrinsics", since = "1.59.0")]
297pub fn vabs_f64(a: float64x1_t) -> float64x1_t {
298    unsafe { simd_fabs(a) }
299}
300#[doc = "Floating-point absolute value"]
301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"]
302#[inline(always)]
303#[target_feature(enable = "neon")]
304#[cfg_attr(test, assert_instr(fabs))]
305#[stable(feature = "neon_intrinsics", since = "1.59.0")]
306pub fn vabsq_f64(a: float64x2_t) -> float64x2_t {
307    unsafe { simd_fabs(a) }
308}
309#[doc = "Absolute Value (wrapping)."]
310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"]
311#[inline(always)]
312#[target_feature(enable = "neon")]
313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
314#[cfg_attr(test, assert_instr(abs))]
315pub fn vabs_s64(a: int64x1_t) -> int64x1_t {
316    unsafe {
317        let neg: int64x1_t = simd_neg(a);
318        let mask: int64x1_t = simd_ge(a, neg);
319        simd_select(mask, a, neg)
320    }
321}
322#[doc = "Absolute Value (wrapping)."]
323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"]
324#[inline(always)]
325#[target_feature(enable = "neon")]
326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
327#[cfg_attr(test, assert_instr(abs))]
328pub fn vabsq_s64(a: int64x2_t) -> int64x2_t {
329    unsafe {
330        let neg: int64x2_t = simd_neg(a);
331        let mask: int64x2_t = simd_ge(a, neg);
332        simd_select(mask, a, neg)
333    }
334}
335#[doc = "Absolute Value (wrapping)."]
336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"]
337#[inline(always)]
338#[target_feature(enable = "neon")]
339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
340#[cfg_attr(test, assert_instr(abs))]
341pub fn vabsd_s64(a: i64) -> i64 {
342    unsafe extern "unadjusted" {
343        #[cfg_attr(
344            any(target_arch = "aarch64", target_arch = "arm64ec"),
345            link_name = "llvm.aarch64.neon.abs.i64"
346        )]
347        fn _vabsd_s64(a: i64) -> i64;
348    }
349    unsafe { _vabsd_s64(a) }
350}
351#[doc = "Add"]
352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"]
353#[inline(always)]
354#[target_feature(enable = "neon")]
355#[stable(feature = "neon_intrinsics", since = "1.59.0")]
356#[cfg_attr(test, assert_instr(nop))]
357pub fn vaddd_s64(a: i64, b: i64) -> i64 {
358    a.wrapping_add(b)
359}
360#[doc = "Add"]
361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"]
362#[inline(always)]
363#[target_feature(enable = "neon")]
364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
365#[cfg_attr(test, assert_instr(nop))]
366pub fn vaddd_u64(a: u64, b: u64) -> u64 {
367    a.wrapping_add(b)
368}
369#[doc = "Signed Add Long across Vector"]
370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"]
371#[inline(always)]
372#[target_feature(enable = "neon")]
373#[stable(feature = "neon_intrinsics", since = "1.59.0")]
374#[cfg_attr(test, assert_instr(saddlv))]
375pub fn vaddlv_s16(a: int16x4_t) -> i32 {
376    unsafe extern "unadjusted" {
377        #[cfg_attr(
378            any(target_arch = "aarch64", target_arch = "arm64ec"),
379            link_name = "llvm.aarch64.neon.saddlv.i32.v4i16"
380        )]
381        fn _vaddlv_s16(a: int16x4_t) -> i32;
382    }
383    unsafe { _vaddlv_s16(a) }
384}
385#[doc = "Signed Add Long across Vector"]
386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"]
387#[inline(always)]
388#[target_feature(enable = "neon")]
389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
390#[cfg_attr(test, assert_instr(saddlv))]
391pub fn vaddlvq_s16(a: int16x8_t) -> i32 {
392    unsafe extern "unadjusted" {
393        #[cfg_attr(
394            any(target_arch = "aarch64", target_arch = "arm64ec"),
395            link_name = "llvm.aarch64.neon.saddlv.i32.v8i16"
396        )]
397        fn _vaddlvq_s16(a: int16x8_t) -> i32;
398    }
399    unsafe { _vaddlvq_s16(a) }
400}
401#[doc = "Signed Add Long across Vector"]
402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"]
403#[inline(always)]
404#[target_feature(enable = "neon")]
405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
406#[cfg_attr(test, assert_instr(saddlv))]
407pub fn vaddlvq_s32(a: int32x4_t) -> i64 {
408    unsafe extern "unadjusted" {
409        #[cfg_attr(
410            any(target_arch = "aarch64", target_arch = "arm64ec"),
411            link_name = "llvm.aarch64.neon.saddlv.i64.v4i32"
412        )]
413        fn _vaddlvq_s32(a: int32x4_t) -> i64;
414    }
415    unsafe { _vaddlvq_s32(a) }
416}
417#[doc = "Signed Add Long across Vector"]
418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"]
419#[inline(always)]
420#[target_feature(enable = "neon")]
421#[stable(feature = "neon_intrinsics", since = "1.59.0")]
422#[cfg_attr(test, assert_instr(saddlp))]
423pub fn vaddlv_s32(a: int32x2_t) -> i64 {
424    unsafe extern "unadjusted" {
425        #[cfg_attr(
426            any(target_arch = "aarch64", target_arch = "arm64ec"),
427            link_name = "llvm.aarch64.neon.saddlv.i64.v2i32"
428        )]
429        fn _vaddlv_s32(a: int32x2_t) -> i64;
430    }
431    unsafe { _vaddlv_s32(a) }
432}
433#[doc = "Signed Add Long across Vector"]
434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"]
435#[inline(always)]
436#[target_feature(enable = "neon")]
437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
438#[cfg_attr(test, assert_instr(saddlv))]
439pub fn vaddlv_s8(a: int8x8_t) -> i16 {
440    unsafe extern "unadjusted" {
441        #[cfg_attr(
442            any(target_arch = "aarch64", target_arch = "arm64ec"),
443            link_name = "llvm.aarch64.neon.saddlv.i32.v8i8"
444        )]
445        fn _vaddlv_s8(a: int8x8_t) -> i32;
446    }
447    unsafe { _vaddlv_s8(a) as i16 }
448}
449#[doc = "Signed Add Long across Vector"]
450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"]
451#[inline(always)]
452#[target_feature(enable = "neon")]
453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
454#[cfg_attr(test, assert_instr(saddlv))]
455pub fn vaddlvq_s8(a: int8x16_t) -> i16 {
456    unsafe extern "unadjusted" {
457        #[cfg_attr(
458            any(target_arch = "aarch64", target_arch = "arm64ec"),
459            link_name = "llvm.aarch64.neon.saddlv.i32.v16i8"
460        )]
461        fn _vaddlvq_s8(a: int8x16_t) -> i32;
462    }
463    unsafe { _vaddlvq_s8(a) as i16 }
464}
465#[doc = "Unsigned Add Long across Vector"]
466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"]
467#[inline(always)]
468#[target_feature(enable = "neon")]
469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
470#[cfg_attr(test, assert_instr(uaddlv))]
471pub fn vaddlv_u16(a: uint16x4_t) -> u32 {
472    unsafe extern "unadjusted" {
473        #[cfg_attr(
474            any(target_arch = "aarch64", target_arch = "arm64ec"),
475            link_name = "llvm.aarch64.neon.uaddlv.i32.v4i16"
476        )]
477        fn _vaddlv_u16(a: uint16x4_t) -> u32;
478    }
479    unsafe { _vaddlv_u16(a) }
480}
481#[doc = "Unsigned Add Long across Vector"]
482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"]
483#[inline(always)]
484#[target_feature(enable = "neon")]
485#[stable(feature = "neon_intrinsics", since = "1.59.0")]
486#[cfg_attr(test, assert_instr(uaddlv))]
487pub fn vaddlvq_u16(a: uint16x8_t) -> u32 {
488    unsafe extern "unadjusted" {
489        #[cfg_attr(
490            any(target_arch = "aarch64", target_arch = "arm64ec"),
491            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i16"
492        )]
493        fn _vaddlvq_u16(a: uint16x8_t) -> u32;
494    }
495    unsafe { _vaddlvq_u16(a) }
496}
497#[doc = "Unsigned Add Long across Vector"]
498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"]
499#[inline(always)]
500#[target_feature(enable = "neon")]
501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
502#[cfg_attr(test, assert_instr(uaddlv))]
503pub fn vaddlvq_u32(a: uint32x4_t) -> u64 {
504    unsafe extern "unadjusted" {
505        #[cfg_attr(
506            any(target_arch = "aarch64", target_arch = "arm64ec"),
507            link_name = "llvm.aarch64.neon.uaddlv.i64.v4i32"
508        )]
509        fn _vaddlvq_u32(a: uint32x4_t) -> u64;
510    }
511    unsafe { _vaddlvq_u32(a) }
512}
513#[doc = "Unsigned Add Long across Vector"]
514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"]
515#[inline(always)]
516#[target_feature(enable = "neon")]
517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
518#[cfg_attr(test, assert_instr(uaddlp))]
519pub fn vaddlv_u32(a: uint32x2_t) -> u64 {
520    unsafe extern "unadjusted" {
521        #[cfg_attr(
522            any(target_arch = "aarch64", target_arch = "arm64ec"),
523            link_name = "llvm.aarch64.neon.uaddlv.i64.v2i32"
524        )]
525        fn _vaddlv_u32(a: uint32x2_t) -> u64;
526    }
527    unsafe { _vaddlv_u32(a) }
528}
529#[doc = "Unsigned Add Long across Vector"]
530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"]
531#[inline(always)]
532#[target_feature(enable = "neon")]
533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
534#[cfg_attr(test, assert_instr(uaddlv))]
535pub fn vaddlv_u8(a: uint8x8_t) -> u16 {
536    unsafe extern "unadjusted" {
537        #[cfg_attr(
538            any(target_arch = "aarch64", target_arch = "arm64ec"),
539            link_name = "llvm.aarch64.neon.uaddlv.i32.v8i8"
540        )]
541        fn _vaddlv_u8(a: uint8x8_t) -> i32;
542    }
543    unsafe { _vaddlv_u8(a) as u16 }
544}
545#[doc = "Unsigned Add Long across Vector"]
546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"]
547#[inline(always)]
548#[target_feature(enable = "neon")]
549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
550#[cfg_attr(test, assert_instr(uaddlv))]
551pub fn vaddlvq_u8(a: uint8x16_t) -> u16 {
552    unsafe extern "unadjusted" {
553        #[cfg_attr(
554            any(target_arch = "aarch64", target_arch = "arm64ec"),
555            link_name = "llvm.aarch64.neon.uaddlv.i32.v16i8"
556        )]
557        fn _vaddlvq_u8(a: uint8x16_t) -> i32;
558    }
559    unsafe { _vaddlvq_u8(a) as u16 }
560}
561#[doc = "Floating-point add across vector"]
562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"]
563#[inline(always)]
564#[target_feature(enable = "neon")]
565#[stable(feature = "neon_intrinsics", since = "1.59.0")]
566#[cfg_attr(test, assert_instr(faddp))]
567pub fn vaddv_f32(a: float32x2_t) -> f32 {
568    unsafe extern "unadjusted" {
569        #[cfg_attr(
570            any(target_arch = "aarch64", target_arch = "arm64ec"),
571            link_name = "llvm.aarch64.neon.faddv.f32.v2f32"
572        )]
573        fn _vaddv_f32(a: float32x2_t) -> f32;
574    }
575    unsafe { _vaddv_f32(a) }
576}
577#[doc = "Floating-point add across vector"]
578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"]
579#[inline(always)]
580#[target_feature(enable = "neon")]
581#[stable(feature = "neon_intrinsics", since = "1.59.0")]
582#[cfg_attr(test, assert_instr(faddp))]
583pub fn vaddvq_f32(a: float32x4_t) -> f32 {
584    unsafe extern "unadjusted" {
585        #[cfg_attr(
586            any(target_arch = "aarch64", target_arch = "arm64ec"),
587            link_name = "llvm.aarch64.neon.faddv.f32.v4f32"
588        )]
589        fn _vaddvq_f32(a: float32x4_t) -> f32;
590    }
591    unsafe { _vaddvq_f32(a) }
592}
593#[doc = "Floating-point add across vector"]
594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"]
595#[inline(always)]
596#[target_feature(enable = "neon")]
597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
598#[cfg_attr(test, assert_instr(faddp))]
599pub fn vaddvq_f64(a: float64x2_t) -> f64 {
600    unsafe extern "unadjusted" {
601        #[cfg_attr(
602            any(target_arch = "aarch64", target_arch = "arm64ec"),
603            link_name = "llvm.aarch64.neon.faddv.f64.v2f64"
604        )]
605        fn _vaddvq_f64(a: float64x2_t) -> f64;
606    }
607    unsafe { _vaddvq_f64(a) }
608}
609#[doc = "Add across vector"]
610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"]
611#[inline(always)]
612#[target_feature(enable = "neon")]
613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
614#[cfg_attr(test, assert_instr(addp))]
615pub fn vaddv_s32(a: int32x2_t) -> i32 {
616    unsafe { simd_reduce_add_ordered(a, 0) }
617}
618#[doc = "Add across vector"]
619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"]
620#[inline(always)]
621#[target_feature(enable = "neon")]
622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
623#[cfg_attr(test, assert_instr(addv))]
624pub fn vaddv_s8(a: int8x8_t) -> i8 {
625    unsafe { simd_reduce_add_ordered(a, 0) }
626}
627#[doc = "Add across vector"]
628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"]
629#[inline(always)]
630#[target_feature(enable = "neon")]
631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
632#[cfg_attr(test, assert_instr(addv))]
633pub fn vaddvq_s8(a: int8x16_t) -> i8 {
634    unsafe { simd_reduce_add_ordered(a, 0) }
635}
636#[doc = "Add across vector"]
637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"]
638#[inline(always)]
639#[target_feature(enable = "neon")]
640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
641#[cfg_attr(test, assert_instr(addv))]
642pub fn vaddv_s16(a: int16x4_t) -> i16 {
643    unsafe { simd_reduce_add_ordered(a, 0) }
644}
645#[doc = "Add across vector"]
646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"]
647#[inline(always)]
648#[target_feature(enable = "neon")]
649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
650#[cfg_attr(test, assert_instr(addv))]
651pub fn vaddvq_s16(a: int16x8_t) -> i16 {
652    unsafe { simd_reduce_add_ordered(a, 0) }
653}
654#[doc = "Add across vector"]
655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"]
656#[inline(always)]
657#[target_feature(enable = "neon")]
658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
659#[cfg_attr(test, assert_instr(addv))]
660pub fn vaddvq_s32(a: int32x4_t) -> i32 {
661    unsafe { simd_reduce_add_ordered(a, 0) }
662}
663#[doc = "Add across vector"]
664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"]
665#[inline(always)]
666#[target_feature(enable = "neon")]
667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
668#[cfg_attr(test, assert_instr(addp))]
669pub fn vaddv_u32(a: uint32x2_t) -> u32 {
670    unsafe { simd_reduce_add_ordered(a, 0) }
671}
672#[doc = "Add across vector"]
673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"]
674#[inline(always)]
675#[target_feature(enable = "neon")]
676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
677#[cfg_attr(test, assert_instr(addv))]
678pub fn vaddv_u8(a: uint8x8_t) -> u8 {
679    unsafe { simd_reduce_add_ordered(a, 0) }
680}
681#[doc = "Add across vector"]
682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"]
683#[inline(always)]
684#[target_feature(enable = "neon")]
685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
686#[cfg_attr(test, assert_instr(addv))]
687pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
688    unsafe { simd_reduce_add_ordered(a, 0) }
689}
690#[doc = "Add across vector"]
691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"]
692#[inline(always)]
693#[target_feature(enable = "neon")]
694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
695#[cfg_attr(test, assert_instr(addv))]
696pub fn vaddv_u16(a: uint16x4_t) -> u16 {
697    unsafe { simd_reduce_add_ordered(a, 0) }
698}
699#[doc = "Add across vector"]
700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"]
701#[inline(always)]
702#[target_feature(enable = "neon")]
703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
704#[cfg_attr(test, assert_instr(addv))]
705pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
706    unsafe { simd_reduce_add_ordered(a, 0) }
707}
708#[doc = "Add across vector"]
709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"]
710#[inline(always)]
711#[target_feature(enable = "neon")]
712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
713#[cfg_attr(test, assert_instr(addv))]
714pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
715    unsafe { simd_reduce_add_ordered(a, 0) }
716}
717#[doc = "Add across vector"]
718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"]
719#[inline(always)]
720#[target_feature(enable = "neon")]
721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
722#[cfg_attr(test, assert_instr(addp))]
723pub fn vaddvq_s64(a: int64x2_t) -> i64 {
724    unsafe { simd_reduce_add_ordered(a, 0) }
725}
726#[doc = "Add across vector"]
727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"]
728#[inline(always)]
729#[target_feature(enable = "neon")]
730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
731#[cfg_attr(test, assert_instr(addp))]
732pub fn vaddvq_u64(a: uint64x2_t) -> u64 {
733    unsafe { simd_reduce_add_ordered(a, 0) }
734}
735#[doc = "Multi-vector floating-point absolute maximum"]
736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f16)"]
737#[inline(always)]
738#[target_feature(enable = "neon,faminmax")]
739#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
740#[unstable(feature = "faminmax", issue = "137933")]
741pub fn vamax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
742    unsafe extern "unadjusted" {
743        #[cfg_attr(
744            any(target_arch = "aarch64", target_arch = "arm64ec"),
745            link_name = "llvm.aarch64.neon.famax.v4f16"
746        )]
747        fn _vamax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
748    }
749    unsafe { _vamax_f16(a, b) }
750}
751#[doc = "Multi-vector floating-point absolute maximum"]
752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f16)"]
753#[inline(always)]
754#[target_feature(enable = "neon,faminmax")]
755#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
756#[unstable(feature = "faminmax", issue = "137933")]
757pub fn vamaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
758    unsafe extern "unadjusted" {
759        #[cfg_attr(
760            any(target_arch = "aarch64", target_arch = "arm64ec"),
761            link_name = "llvm.aarch64.neon.famax.v8f16"
762        )]
763        fn _vamaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
764    }
765    unsafe { _vamaxq_f16(a, b) }
766}
767#[doc = "Multi-vector floating-point absolute maximum"]
768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"]
769#[inline(always)]
770#[target_feature(enable = "neon,faminmax")]
771#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
772#[unstable(feature = "faminmax", issue = "137933")]
773pub fn vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
774    unsafe extern "unadjusted" {
775        #[cfg_attr(
776            any(target_arch = "aarch64", target_arch = "arm64ec"),
777            link_name = "llvm.aarch64.neon.famax.v2f32"
778        )]
779        fn _vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
780    }
781    unsafe { _vamax_f32(a, b) }
782}
783#[doc = "Multi-vector floating-point absolute maximum"]
784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f32)"]
785#[inline(always)]
786#[target_feature(enable = "neon,faminmax")]
787#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
788#[unstable(feature = "faminmax", issue = "137933")]
789pub fn vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
790    unsafe extern "unadjusted" {
791        #[cfg_attr(
792            any(target_arch = "aarch64", target_arch = "arm64ec"),
793            link_name = "llvm.aarch64.neon.famax.v4f32"
794        )]
795        fn _vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
796    }
797    unsafe { _vamaxq_f32(a, b) }
798}
799#[doc = "Multi-vector floating-point absolute maximum"]
800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f64)"]
801#[inline(always)]
802#[target_feature(enable = "neon,faminmax")]
803#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))]
804#[unstable(feature = "faminmax", issue = "137933")]
805pub fn vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
806    unsafe extern "unadjusted" {
807        #[cfg_attr(
808            any(target_arch = "aarch64", target_arch = "arm64ec"),
809            link_name = "llvm.aarch64.neon.famax.v2f64"
810        )]
811        fn _vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
812    }
813    unsafe { _vamaxq_f64(a, b) }
814}
815#[doc = "Multi-vector floating-point absolute minimum"]
816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f16)"]
817#[inline(always)]
818#[target_feature(enable = "neon,faminmax")]
819#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
820#[unstable(feature = "faminmax", issue = "137933")]
821pub fn vamin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
822    unsafe extern "unadjusted" {
823        #[cfg_attr(
824            any(target_arch = "aarch64", target_arch = "arm64ec"),
825            link_name = "llvm.aarch64.neon.famin.v4f16"
826        )]
827        fn _vamin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
828    }
829    unsafe { _vamin_f16(a, b) }
830}
831#[doc = "Multi-vector floating-point absolute minimum"]
832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f16)"]
833#[inline(always)]
834#[target_feature(enable = "neon,faminmax")]
835#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
836#[unstable(feature = "faminmax", issue = "137933")]
837pub fn vaminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
838    unsafe extern "unadjusted" {
839        #[cfg_attr(
840            any(target_arch = "aarch64", target_arch = "arm64ec"),
841            link_name = "llvm.aarch64.neon.famin.v8f16"
842        )]
843        fn _vaminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
844    }
845    unsafe { _vaminq_f16(a, b) }
846}
847#[doc = "Multi-vector floating-point absolute minimum"]
848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f32)"]
849#[inline(always)]
850#[target_feature(enable = "neon,faminmax")]
851#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
852#[unstable(feature = "faminmax", issue = "137933")]
853pub fn vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
854    unsafe extern "unadjusted" {
855        #[cfg_attr(
856            any(target_arch = "aarch64", target_arch = "arm64ec"),
857            link_name = "llvm.aarch64.neon.famin.v2f32"
858        )]
859        fn _vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
860    }
861    unsafe { _vamin_f32(a, b) }
862}
863#[doc = "Multi-vector floating-point absolute minimum"]
864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f32)"]
865#[inline(always)]
866#[target_feature(enable = "neon,faminmax")]
867#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
868#[unstable(feature = "faminmax", issue = "137933")]
869pub fn vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
870    unsafe extern "unadjusted" {
871        #[cfg_attr(
872            any(target_arch = "aarch64", target_arch = "arm64ec"),
873            link_name = "llvm.aarch64.neon.famin.v4f32"
874        )]
875        fn _vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
876    }
877    unsafe { _vaminq_f32(a, b) }
878}
879#[doc = "Multi-vector floating-point absolute minimum"]
880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f64)"]
881#[inline(always)]
882#[target_feature(enable = "neon,faminmax")]
883#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))]
884#[unstable(feature = "faminmax", issue = "137933")]
885pub fn vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
886    unsafe extern "unadjusted" {
887        #[cfg_attr(
888            any(target_arch = "aarch64", target_arch = "arm64ec"),
889            link_name = "llvm.aarch64.neon.famin.v2f64"
890        )]
891        fn _vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
892    }
893    unsafe { _vaminq_f64(a, b) }
894}
895#[doc = "Bit clear and exclusive OR"]
896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"]
897#[inline(always)]
898#[target_feature(enable = "neon,sha3")]
899#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
900#[cfg_attr(test, assert_instr(bcax))]
901pub fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
902    unsafe extern "unadjusted" {
903        #[cfg_attr(
904            any(target_arch = "aarch64", target_arch = "arm64ec"),
905            link_name = "llvm.aarch64.crypto.bcaxs.v16i8"
906        )]
907        fn _vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
908    }
909    unsafe { _vbcaxq_s8(a, b, c) }
910}
911#[doc = "Bit clear and exclusive OR"]
912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"]
913#[inline(always)]
914#[target_feature(enable = "neon,sha3")]
915#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
916#[cfg_attr(test, assert_instr(bcax))]
917pub fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
918    unsafe extern "unadjusted" {
919        #[cfg_attr(
920            any(target_arch = "aarch64", target_arch = "arm64ec"),
921            link_name = "llvm.aarch64.crypto.bcaxs.v8i16"
922        )]
923        fn _vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
924    }
925    unsafe { _vbcaxq_s16(a, b, c) }
926}
927#[doc = "Bit clear and exclusive OR"]
928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"]
929#[inline(always)]
930#[target_feature(enable = "neon,sha3")]
931#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
932#[cfg_attr(test, assert_instr(bcax))]
933pub fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
934    unsafe extern "unadjusted" {
935        #[cfg_attr(
936            any(target_arch = "aarch64", target_arch = "arm64ec"),
937            link_name = "llvm.aarch64.crypto.bcaxs.v4i32"
938        )]
939        fn _vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
940    }
941    unsafe { _vbcaxq_s32(a, b, c) }
942}
943#[doc = "Bit clear and exclusive OR"]
944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"]
945#[inline(always)]
946#[target_feature(enable = "neon,sha3")]
947#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
948#[cfg_attr(test, assert_instr(bcax))]
949pub fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
950    unsafe extern "unadjusted" {
951        #[cfg_attr(
952            any(target_arch = "aarch64", target_arch = "arm64ec"),
953            link_name = "llvm.aarch64.crypto.bcaxs.v2i64"
954        )]
955        fn _vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
956    }
957    unsafe { _vbcaxq_s64(a, b, c) }
958}
959#[doc = "Bit clear and exclusive OR"]
960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"]
961#[inline(always)]
962#[target_feature(enable = "neon,sha3")]
963#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
964#[cfg_attr(test, assert_instr(bcax))]
965pub fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
966    unsafe extern "unadjusted" {
967        #[cfg_attr(
968            any(target_arch = "aarch64", target_arch = "arm64ec"),
969            link_name = "llvm.aarch64.crypto.bcaxu.v16i8"
970        )]
971        fn _vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
972    }
973    unsafe { _vbcaxq_u8(a, b, c) }
974}
975#[doc = "Bit clear and exclusive OR"]
976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"]
977#[inline(always)]
978#[target_feature(enable = "neon,sha3")]
979#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
980#[cfg_attr(test, assert_instr(bcax))]
981pub fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
982    unsafe extern "unadjusted" {
983        #[cfg_attr(
984            any(target_arch = "aarch64", target_arch = "arm64ec"),
985            link_name = "llvm.aarch64.crypto.bcaxu.v8i16"
986        )]
987        fn _vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
988    }
989    unsafe { _vbcaxq_u16(a, b, c) }
990}
991#[doc = "Bit clear and exclusive OR"]
992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"]
993#[inline(always)]
994#[target_feature(enable = "neon,sha3")]
995#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
996#[cfg_attr(test, assert_instr(bcax))]
997pub fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
998    unsafe extern "unadjusted" {
999        #[cfg_attr(
1000            any(target_arch = "aarch64", target_arch = "arm64ec"),
1001            link_name = "llvm.aarch64.crypto.bcaxu.v4i32"
1002        )]
1003        fn _vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
1004    }
1005    unsafe { _vbcaxq_u32(a, b, c) }
1006}
1007#[doc = "Bit clear and exclusive OR"]
1008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"]
1009#[inline(always)]
1010#[target_feature(enable = "neon,sha3")]
1011#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
1012#[cfg_attr(test, assert_instr(bcax))]
1013pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
1014    unsafe extern "unadjusted" {
1015        #[cfg_attr(
1016            any(target_arch = "aarch64", target_arch = "arm64ec"),
1017            link_name = "llvm.aarch64.crypto.bcaxu.v2i64"
1018        )]
1019        fn _vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
1020    }
1021    unsafe { _vbcaxq_u64(a, b, c) }
1022}
1023#[doc = "Floating-point complex add"]
1024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"]
1025#[inline(always)]
1026#[target_feature(enable = "neon,fp16")]
1027#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1028#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1029#[cfg(not(target_arch = "arm64ec"))]
1030#[cfg_attr(test, assert_instr(fcadd))]
1031pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1032    unsafe extern "unadjusted" {
1033        #[cfg_attr(
1034            any(target_arch = "aarch64", target_arch = "arm64ec"),
1035            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f16"
1036        )]
1037        fn _vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1038    }
1039    unsafe { _vcadd_rot270_f16(a, b) }
1040}
1041#[doc = "Floating-point complex add"]
1042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"]
1043#[inline(always)]
1044#[target_feature(enable = "neon,fp16")]
1045#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1046#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1047#[cfg(not(target_arch = "arm64ec"))]
1048#[cfg_attr(test, assert_instr(fcadd))]
1049pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1050    unsafe extern "unadjusted" {
1051        #[cfg_attr(
1052            any(target_arch = "aarch64", target_arch = "arm64ec"),
1053            link_name = "llvm.aarch64.neon.vcadd.rot270.v8f16"
1054        )]
1055        fn _vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1056    }
1057    unsafe { _vcaddq_rot270_f16(a, b) }
1058}
1059#[doc = "Floating-point complex add"]
1060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"]
1061#[inline(always)]
1062#[target_feature(enable = "neon,fcma")]
1063#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1064#[cfg_attr(test, assert_instr(fcadd))]
1065pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1066    unsafe extern "unadjusted" {
1067        #[cfg_attr(
1068            any(target_arch = "aarch64", target_arch = "arm64ec"),
1069            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f32"
1070        )]
1071        fn _vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1072    }
1073    unsafe { _vcadd_rot270_f32(a, b) }
1074}
1075#[doc = "Floating-point complex add"]
1076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"]
1077#[inline(always)]
1078#[target_feature(enable = "neon,fcma")]
1079#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1080#[cfg_attr(test, assert_instr(fcadd))]
1081pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1082    unsafe extern "unadjusted" {
1083        #[cfg_attr(
1084            any(target_arch = "aarch64", target_arch = "arm64ec"),
1085            link_name = "llvm.aarch64.neon.vcadd.rot270.v4f32"
1086        )]
1087        fn _vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1088    }
1089    unsafe { _vcaddq_rot270_f32(a, b) }
1090}
1091#[doc = "Floating-point complex add"]
1092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"]
1093#[inline(always)]
1094#[target_feature(enable = "neon,fcma")]
1095#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1096#[cfg_attr(test, assert_instr(fcadd))]
1097pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1098    unsafe extern "unadjusted" {
1099        #[cfg_attr(
1100            any(target_arch = "aarch64", target_arch = "arm64ec"),
1101            link_name = "llvm.aarch64.neon.vcadd.rot270.v2f64"
1102        )]
1103        fn _vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1104    }
1105    unsafe { _vcaddq_rot270_f64(a, b) }
1106}
1107#[doc = "Floating-point complex add"]
1108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"]
1109#[inline(always)]
1110#[target_feature(enable = "neon,fp16")]
1111#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1112#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1113#[cfg(not(target_arch = "arm64ec"))]
1114#[cfg_attr(test, assert_instr(fcadd))]
1115pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
1116    unsafe extern "unadjusted" {
1117        #[cfg_attr(
1118            any(target_arch = "aarch64", target_arch = "arm64ec"),
1119            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f16"
1120        )]
1121        fn _vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
1122    }
1123    unsafe { _vcadd_rot90_f16(a, b) }
1124}
1125#[doc = "Floating-point complex add"]
1126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"]
1127#[inline(always)]
1128#[target_feature(enable = "neon,fp16")]
1129#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))]
1130#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1131#[cfg(not(target_arch = "arm64ec"))]
1132#[cfg_attr(test, assert_instr(fcadd))]
1133pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
1134    unsafe extern "unadjusted" {
1135        #[cfg_attr(
1136            any(target_arch = "aarch64", target_arch = "arm64ec"),
1137            link_name = "llvm.aarch64.neon.vcadd.rot90.v8f16"
1138        )]
1139        fn _vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
1140    }
1141    unsafe { _vcaddq_rot90_f16(a, b) }
1142}
1143#[doc = "Floating-point complex add"]
1144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"]
1145#[inline(always)]
1146#[target_feature(enable = "neon,fcma")]
1147#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1148#[cfg_attr(test, assert_instr(fcadd))]
1149pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
1150    unsafe extern "unadjusted" {
1151        #[cfg_attr(
1152            any(target_arch = "aarch64", target_arch = "arm64ec"),
1153            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f32"
1154        )]
1155        fn _vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
1156    }
1157    unsafe { _vcadd_rot90_f32(a, b) }
1158}
1159#[doc = "Floating-point complex add"]
1160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"]
1161#[inline(always)]
1162#[target_feature(enable = "neon,fcma")]
1163#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1164#[cfg_attr(test, assert_instr(fcadd))]
1165pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
1166    unsafe extern "unadjusted" {
1167        #[cfg_attr(
1168            any(target_arch = "aarch64", target_arch = "arm64ec"),
1169            link_name = "llvm.aarch64.neon.vcadd.rot90.v4f32"
1170        )]
1171        fn _vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
1172    }
1173    unsafe { _vcaddq_rot90_f32(a, b) }
1174}
1175#[doc = "Floating-point complex add"]
1176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"]
1177#[inline(always)]
1178#[target_feature(enable = "neon,fcma")]
1179#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
1180#[cfg_attr(test, assert_instr(fcadd))]
1181pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
1182    unsafe extern "unadjusted" {
1183        #[cfg_attr(
1184            any(target_arch = "aarch64", target_arch = "arm64ec"),
1185            link_name = "llvm.aarch64.neon.vcadd.rot90.v2f64"
1186        )]
1187        fn _vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
1188    }
1189    unsafe { _vcaddq_rot90_f64(a, b) }
1190}
1191#[doc = "Floating-point absolute compare greater than or equal"]
1192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"]
1193#[inline(always)]
1194#[target_feature(enable = "neon")]
1195#[cfg_attr(test, assert_instr(facge))]
1196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1197pub fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1198    unsafe extern "unadjusted" {
1199        #[cfg_attr(
1200            any(target_arch = "aarch64", target_arch = "arm64ec"),
1201            link_name = "llvm.aarch64.neon.facge.v1i64.v1f64"
1202        )]
1203        fn _vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1204    }
1205    unsafe { _vcage_f64(a, b) }
1206}
1207#[doc = "Floating-point absolute compare greater than or equal"]
1208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"]
1209#[inline(always)]
1210#[target_feature(enable = "neon")]
1211#[cfg_attr(test, assert_instr(facge))]
1212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1213pub fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1214    unsafe extern "unadjusted" {
1215        #[cfg_attr(
1216            any(target_arch = "aarch64", target_arch = "arm64ec"),
1217            link_name = "llvm.aarch64.neon.facge.v2i64.v2f64"
1218        )]
1219        fn _vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1220    }
1221    unsafe { _vcageq_f64(a, b) }
1222}
1223#[doc = "Floating-point absolute compare greater than or equal"]
1224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"]
1225#[inline(always)]
1226#[target_feature(enable = "neon")]
1227#[cfg_attr(test, assert_instr(facge))]
1228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1229pub fn vcaged_f64(a: f64, b: f64) -> u64 {
1230    unsafe extern "unadjusted" {
1231        #[cfg_attr(
1232            any(target_arch = "aarch64", target_arch = "arm64ec"),
1233            link_name = "llvm.aarch64.neon.facge.i64.f64"
1234        )]
1235        fn _vcaged_f64(a: f64, b: f64) -> u64;
1236    }
1237    unsafe { _vcaged_f64(a, b) }
1238}
1239#[doc = "Floating-point absolute compare greater than or equal"]
1240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"]
1241#[inline(always)]
1242#[target_feature(enable = "neon")]
1243#[cfg_attr(test, assert_instr(facge))]
1244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1245pub fn vcages_f32(a: f32, b: f32) -> u32 {
1246    unsafe extern "unadjusted" {
1247        #[cfg_attr(
1248            any(target_arch = "aarch64", target_arch = "arm64ec"),
1249            link_name = "llvm.aarch64.neon.facge.i32.f32"
1250        )]
1251        fn _vcages_f32(a: f32, b: f32) -> u32;
1252    }
1253    unsafe { _vcages_f32(a, b) }
1254}
1255#[doc = "Floating-point absolute compare greater than or equal"]
1256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageh_f16)"]
1257#[inline(always)]
1258#[cfg_attr(test, assert_instr(facge))]
1259#[target_feature(enable = "neon,fp16")]
1260#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1261#[cfg(not(target_arch = "arm64ec"))]
1262pub fn vcageh_f16(a: f16, b: f16) -> u16 {
1263    unsafe extern "unadjusted" {
1264        #[cfg_attr(
1265            any(target_arch = "aarch64", target_arch = "arm64ec"),
1266            link_name = "llvm.aarch64.neon.facge.i32.f16"
1267        )]
1268        fn _vcageh_f16(a: f16, b: f16) -> i32;
1269    }
1270    unsafe { _vcageh_f16(a, b) as u16 }
1271}
1272#[doc = "Floating-point absolute compare greater than"]
1273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"]
1274#[inline(always)]
1275#[target_feature(enable = "neon")]
1276#[cfg_attr(test, assert_instr(facgt))]
1277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1278pub fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1279    unsafe extern "unadjusted" {
1280        #[cfg_attr(
1281            any(target_arch = "aarch64", target_arch = "arm64ec"),
1282            link_name = "llvm.aarch64.neon.facgt.v1i64.v1f64"
1283        )]
1284        fn _vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t;
1285    }
1286    unsafe { _vcagt_f64(a, b) }
1287}
1288#[doc = "Floating-point absolute compare greater than"]
1289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"]
1290#[inline(always)]
1291#[target_feature(enable = "neon")]
1292#[cfg_attr(test, assert_instr(facgt))]
1293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1294pub fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1295    unsafe extern "unadjusted" {
1296        #[cfg_attr(
1297            any(target_arch = "aarch64", target_arch = "arm64ec"),
1298            link_name = "llvm.aarch64.neon.facgt.v2i64.v2f64"
1299        )]
1300        fn _vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t;
1301    }
1302    unsafe { _vcagtq_f64(a, b) }
1303}
1304#[doc = "Floating-point absolute compare greater than"]
1305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"]
1306#[inline(always)]
1307#[target_feature(enable = "neon")]
1308#[cfg_attr(test, assert_instr(facgt))]
1309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1310pub fn vcagtd_f64(a: f64, b: f64) -> u64 {
1311    unsafe extern "unadjusted" {
1312        #[cfg_attr(
1313            any(target_arch = "aarch64", target_arch = "arm64ec"),
1314            link_name = "llvm.aarch64.neon.facgt.i64.f64"
1315        )]
1316        fn _vcagtd_f64(a: f64, b: f64) -> u64;
1317    }
1318    unsafe { _vcagtd_f64(a, b) }
1319}
1320#[doc = "Floating-point absolute compare greater than"]
1321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"]
1322#[inline(always)]
1323#[target_feature(enable = "neon")]
1324#[cfg_attr(test, assert_instr(facgt))]
1325#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1326pub fn vcagts_f32(a: f32, b: f32) -> u32 {
1327    unsafe extern "unadjusted" {
1328        #[cfg_attr(
1329            any(target_arch = "aarch64", target_arch = "arm64ec"),
1330            link_name = "llvm.aarch64.neon.facgt.i32.f32"
1331        )]
1332        fn _vcagts_f32(a: f32, b: f32) -> u32;
1333    }
1334    unsafe { _vcagts_f32(a, b) }
1335}
1336#[doc = "Floating-point absolute compare greater than"]
1337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagth_f16)"]
1338#[inline(always)]
1339#[cfg_attr(test, assert_instr(facgt))]
1340#[target_feature(enable = "neon,fp16")]
1341#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1342#[cfg(not(target_arch = "arm64ec"))]
1343pub fn vcagth_f16(a: f16, b: f16) -> u16 {
1344    unsafe extern "unadjusted" {
1345        #[cfg_attr(
1346            any(target_arch = "aarch64", target_arch = "arm64ec"),
1347            link_name = "llvm.aarch64.neon.facgt.i32.f16"
1348        )]
1349        fn _vcagth_f16(a: f16, b: f16) -> i32;
1350    }
1351    unsafe { _vcagth_f16(a, b) as u16 }
1352}
1353#[doc = "Floating-point absolute compare less than or equal"]
1354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"]
1355#[inline(always)]
1356#[target_feature(enable = "neon")]
1357#[cfg_attr(test, assert_instr(facge))]
1358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1359pub fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1360    vcage_f64(b, a)
1361}
1362#[doc = "Floating-point absolute compare less than or equal"]
1363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"]
1364#[inline(always)]
1365#[target_feature(enable = "neon")]
1366#[cfg_attr(test, assert_instr(facge))]
1367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1368pub fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1369    vcageq_f64(b, a)
1370}
1371#[doc = "Floating-point absolute compare less than or equal"]
1372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"]
1373#[inline(always)]
1374#[target_feature(enable = "neon")]
1375#[cfg_attr(test, assert_instr(facge))]
1376#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1377pub fn vcaled_f64(a: f64, b: f64) -> u64 {
1378    vcaged_f64(b, a)
1379}
1380#[doc = "Floating-point absolute compare less than or equal"]
1381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"]
1382#[inline(always)]
1383#[target_feature(enable = "neon")]
1384#[cfg_attr(test, assert_instr(facge))]
1385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1386pub fn vcales_f32(a: f32, b: f32) -> u32 {
1387    vcages_f32(b, a)
1388}
1389#[doc = "Floating-point absolute compare less than or equal"]
1390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleh_f16)"]
1391#[inline(always)]
1392#[cfg_attr(test, assert_instr(facge))]
1393#[target_feature(enable = "neon,fp16")]
1394#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1395#[cfg(not(target_arch = "arm64ec"))]
1396pub fn vcaleh_f16(a: f16, b: f16) -> u16 {
1397    vcageh_f16(b, a)
1398}
1399#[doc = "Floating-point absolute compare less than"]
1400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"]
1401#[inline(always)]
1402#[target_feature(enable = "neon")]
1403#[cfg_attr(test, assert_instr(facgt))]
1404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1405pub fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1406    vcagt_f64(b, a)
1407}
1408#[doc = "Floating-point absolute compare less than"]
1409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"]
1410#[inline(always)]
1411#[target_feature(enable = "neon")]
1412#[cfg_attr(test, assert_instr(facgt))]
1413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1414pub fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1415    vcagtq_f64(b, a)
1416}
1417#[doc = "Floating-point absolute compare less than"]
1418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"]
1419#[inline(always)]
1420#[target_feature(enable = "neon")]
1421#[cfg_attr(test, assert_instr(facgt))]
1422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1423pub fn vcaltd_f64(a: f64, b: f64) -> u64 {
1424    vcagtd_f64(b, a)
1425}
1426#[doc = "Floating-point absolute compare less than"]
1427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"]
1428#[inline(always)]
1429#[target_feature(enable = "neon")]
1430#[cfg_attr(test, assert_instr(facgt))]
1431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1432pub fn vcalts_f32(a: f32, b: f32) -> u32 {
1433    vcagts_f32(b, a)
1434}
1435#[doc = "Floating-point absolute compare less than"]
1436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalth_f16)"]
1437#[inline(always)]
1438#[cfg_attr(test, assert_instr(facgt))]
1439#[target_feature(enable = "neon,fp16")]
1440#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1441#[cfg(not(target_arch = "arm64ec"))]
1442pub fn vcalth_f16(a: f16, b: f16) -> u16 {
1443    vcagth_f16(b, a)
1444}
1445#[doc = "Floating-point compare equal"]
1446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"]
1447#[inline(always)]
1448#[target_feature(enable = "neon")]
1449#[cfg_attr(test, assert_instr(fcmeq))]
1450#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1451pub fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1452    unsafe { simd_eq(a, b) }
1453}
1454#[doc = "Floating-point compare equal"]
1455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"]
1456#[inline(always)]
1457#[target_feature(enable = "neon")]
1458#[cfg_attr(test, assert_instr(fcmeq))]
1459#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1460pub fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1461    unsafe { simd_eq(a, b) }
1462}
1463#[doc = "Compare bitwise Equal (vector)"]
1464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"]
1465#[inline(always)]
1466#[target_feature(enable = "neon")]
1467#[cfg_attr(test, assert_instr(cmeq))]
1468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1469pub fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1470    unsafe { simd_eq(a, b) }
1471}
1472#[doc = "Compare bitwise Equal (vector)"]
1473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"]
1474#[inline(always)]
1475#[target_feature(enable = "neon")]
1476#[cfg_attr(test, assert_instr(cmeq))]
1477#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1478pub fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1479    unsafe { simd_eq(a, b) }
1480}
1481#[doc = "Compare bitwise Equal (vector)"]
1482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"]
1483#[inline(always)]
1484#[target_feature(enable = "neon")]
1485#[cfg_attr(test, assert_instr(cmeq))]
1486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1487pub fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1488    unsafe { simd_eq(a, b) }
1489}
1490#[doc = "Compare bitwise Equal (vector)"]
1491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"]
1492#[inline(always)]
1493#[target_feature(enable = "neon")]
1494#[cfg_attr(test, assert_instr(cmeq))]
1495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1496pub fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1497    unsafe { simd_eq(a, b) }
1498}
1499#[doc = "Compare bitwise Equal (vector)"]
1500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"]
1501#[inline(always)]
1502#[target_feature(enable = "neon")]
1503#[cfg_attr(test, assert_instr(cmeq))]
1504#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1505pub fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
1506    unsafe { simd_eq(a, b) }
1507}
1508#[doc = "Compare bitwise Equal (vector)"]
1509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"]
1510#[inline(always)]
1511#[target_feature(enable = "neon")]
1512#[cfg_attr(test, assert_instr(cmeq))]
1513#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1514pub fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
1515    unsafe { simd_eq(a, b) }
1516}
1517#[doc = "Floating-point compare equal"]
1518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"]
1519#[inline(always)]
1520#[target_feature(enable = "neon")]
1521#[cfg_attr(test, assert_instr(fcmp))]
1522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1523pub fn vceqd_f64(a: f64, b: f64) -> u64 {
1524    unsafe { simd_extract!(vceq_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1525}
1526#[doc = "Floating-point compare equal"]
1527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"]
1528#[inline(always)]
1529#[target_feature(enable = "neon")]
1530#[cfg_attr(test, assert_instr(fcmp))]
1531#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1532pub fn vceqs_f32(a: f32, b: f32) -> u32 {
1533    unsafe { simd_extract!(vceq_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1534}
1535#[doc = "Compare bitwise equal"]
1536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"]
1537#[inline(always)]
1538#[target_feature(enable = "neon")]
1539#[cfg_attr(test, assert_instr(cmp))]
1540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1541pub fn vceqd_s64(a: i64, b: i64) -> u64 {
1542    unsafe { transmute(vceq_s64(transmute(a), transmute(b))) }
1543}
1544#[doc = "Compare bitwise equal"]
1545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"]
1546#[inline(always)]
1547#[target_feature(enable = "neon")]
1548#[cfg_attr(test, assert_instr(cmp))]
1549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1550pub fn vceqd_u64(a: u64, b: u64) -> u64 {
1551    unsafe { transmute(vceq_u64(transmute(a), transmute(b))) }
1552}
1553#[doc = "Floating-point compare equal"]
1554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqh_f16)"]
1555#[inline(always)]
1556#[cfg_attr(test, assert_instr(fcmp))]
1557#[target_feature(enable = "neon,fp16")]
1558#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1559#[cfg(not(target_arch = "arm64ec"))]
1560pub fn vceqh_f16(a: f16, b: f16) -> u16 {
1561    unsafe { simd_extract!(vceq_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1562}
1563#[doc = "Floating-point compare bitwise equal to zero"]
1564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f16)"]
1565#[inline(always)]
1566#[cfg_attr(test, assert_instr(fcmeq))]
1567#[target_feature(enable = "neon,fp16")]
1568#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
1569#[cfg(not(target_arch = "arm64ec"))]
1570pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t {
1571    let b: f16x4 = f16x4::new(0.0, 0.0, 0.0, 0.0);
1572    unsafe { simd_eq(a, transmute(b)) }
1573}
1574#[doc = "Floating-point compare bitwise equal to zero"]
1575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f16)"]
1576#[inline(always)]
1577#[cfg_attr(test, assert_instr(fcmeq))]
1578#[target_feature(enable = "neon,fp16")]
1579#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
1580#[cfg(not(target_arch = "arm64ec"))]
1581pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t {
1582    let b: f16x8 = f16x8::new(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
1583    unsafe { simd_eq(a, transmute(b)) }
1584}
1585#[doc = "Floating-point compare bitwise equal to zero"]
1586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"]
1587#[inline(always)]
1588#[target_feature(enable = "neon")]
1589#[cfg_attr(test, assert_instr(fcmeq))]
1590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1591pub fn vceqz_f32(a: float32x2_t) -> uint32x2_t {
1592    let b: f32x2 = f32x2::new(0.0, 0.0);
1593    unsafe { simd_eq(a, transmute(b)) }
1594}
1595#[doc = "Floating-point compare bitwise equal to zero"]
1596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"]
1597#[inline(always)]
1598#[target_feature(enable = "neon")]
1599#[cfg_attr(test, assert_instr(fcmeq))]
1600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1601pub fn vceqzq_f32(a: float32x4_t) -> uint32x4_t {
1602    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1603    unsafe { simd_eq(a, transmute(b)) }
1604}
1605#[doc = "Floating-point compare bitwise equal to zero"]
1606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"]
1607#[inline(always)]
1608#[target_feature(enable = "neon")]
1609#[cfg_attr(test, assert_instr(fcmeq))]
1610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1611pub fn vceqz_f64(a: float64x1_t) -> uint64x1_t {
1612    let b: f64 = 0.0;
1613    unsafe { simd_eq(a, transmute(b)) }
1614}
1615#[doc = "Floating-point compare bitwise equal to zero"]
1616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"]
1617#[inline(always)]
1618#[target_feature(enable = "neon")]
1619#[cfg_attr(test, assert_instr(fcmeq))]
1620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1621pub fn vceqzq_f64(a: float64x2_t) -> uint64x2_t {
1622    let b: f64x2 = f64x2::new(0.0, 0.0);
1623    unsafe { simd_eq(a, transmute(b)) }
1624}
1625#[doc = "Signed compare bitwise equal to zero"]
1626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"]
1627#[inline(always)]
1628#[target_feature(enable = "neon")]
1629#[cfg_attr(test, assert_instr(cmeq))]
1630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1631pub fn vceqz_s8(a: int8x8_t) -> uint8x8_t {
1632    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1633    unsafe { simd_eq(a, transmute(b)) }
1634}
1635#[doc = "Signed compare bitwise equal to zero"]
1636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"]
1637#[inline(always)]
1638#[target_feature(enable = "neon")]
1639#[cfg_attr(test, assert_instr(cmeq))]
1640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1641pub fn vceqzq_s8(a: int8x16_t) -> uint8x16_t {
1642    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1643    unsafe { simd_eq(a, transmute(b)) }
1644}
1645#[doc = "Signed compare bitwise equal to zero"]
1646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"]
1647#[inline(always)]
1648#[target_feature(enable = "neon")]
1649#[cfg_attr(test, assert_instr(cmeq))]
1650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1651pub fn vceqz_s16(a: int16x4_t) -> uint16x4_t {
1652    let b: i16x4 = i16x4::new(0, 0, 0, 0);
1653    unsafe { simd_eq(a, transmute(b)) }
1654}
1655#[doc = "Signed compare bitwise equal to zero"]
1656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"]
1657#[inline(always)]
1658#[target_feature(enable = "neon")]
1659#[cfg_attr(test, assert_instr(cmeq))]
1660#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1661pub fn vceqzq_s16(a: int16x8_t) -> uint16x8_t {
1662    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1663    unsafe { simd_eq(a, transmute(b)) }
1664}
1665#[doc = "Signed compare bitwise equal to zero"]
1666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"]
1667#[inline(always)]
1668#[target_feature(enable = "neon")]
1669#[cfg_attr(test, assert_instr(cmeq))]
1670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1671pub fn vceqz_s32(a: int32x2_t) -> uint32x2_t {
1672    let b: i32x2 = i32x2::new(0, 0);
1673    unsafe { simd_eq(a, transmute(b)) }
1674}
1675#[doc = "Signed compare bitwise equal to zero"]
1676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"]
1677#[inline(always)]
1678#[target_feature(enable = "neon")]
1679#[cfg_attr(test, assert_instr(cmeq))]
1680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1681pub fn vceqzq_s32(a: int32x4_t) -> uint32x4_t {
1682    let b: i32x4 = i32x4::new(0, 0, 0, 0);
1683    unsafe { simd_eq(a, transmute(b)) }
1684}
1685#[doc = "Signed compare bitwise equal to zero"]
1686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"]
1687#[inline(always)]
1688#[target_feature(enable = "neon")]
1689#[cfg_attr(test, assert_instr(cmeq))]
1690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1691pub fn vceqz_s64(a: int64x1_t) -> uint64x1_t {
1692    let b: i64x1 = i64x1::new(0);
1693    unsafe { simd_eq(a, transmute(b)) }
1694}
1695#[doc = "Signed compare bitwise equal to zero"]
1696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"]
1697#[inline(always)]
1698#[target_feature(enable = "neon")]
1699#[cfg_attr(test, assert_instr(cmeq))]
1700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1701pub fn vceqzq_s64(a: int64x2_t) -> uint64x2_t {
1702    let b: i64x2 = i64x2::new(0, 0);
1703    unsafe { simd_eq(a, transmute(b)) }
1704}
1705#[doc = "Signed compare bitwise equal to zero"]
1706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"]
1707#[inline(always)]
1708#[target_feature(enable = "neon")]
1709#[cfg_attr(test, assert_instr(cmeq))]
1710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1711pub fn vceqz_p8(a: poly8x8_t) -> uint8x8_t {
1712    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1713    unsafe { simd_eq(a, transmute(b)) }
1714}
1715#[doc = "Signed compare bitwise equal to zero"]
1716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"]
1717#[inline(always)]
1718#[target_feature(enable = "neon")]
1719#[cfg_attr(test, assert_instr(cmeq))]
1720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1721pub fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t {
1722    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1723    unsafe { simd_eq(a, transmute(b)) }
1724}
1725#[doc = "Signed compare bitwise equal to zero"]
1726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"]
1727#[inline(always)]
1728#[target_feature(enable = "neon")]
1729#[cfg_attr(test, assert_instr(cmeq))]
1730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1731pub fn vceqz_p64(a: poly64x1_t) -> uint64x1_t {
1732    let b: i64x1 = i64x1::new(0);
1733    unsafe { simd_eq(a, transmute(b)) }
1734}
1735#[doc = "Signed compare bitwise equal to zero"]
1736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"]
1737#[inline(always)]
1738#[target_feature(enable = "neon")]
1739#[cfg_attr(test, assert_instr(cmeq))]
1740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1741pub fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t {
1742    let b: i64x2 = i64x2::new(0, 0);
1743    unsafe { simd_eq(a, transmute(b)) }
1744}
1745#[doc = "Unsigned compare bitwise equal to zero"]
1746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"]
1747#[inline(always)]
1748#[target_feature(enable = "neon")]
1749#[cfg_attr(test, assert_instr(cmeq))]
1750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1751pub fn vceqz_u8(a: uint8x8_t) -> uint8x8_t {
1752    let b: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1753    unsafe { simd_eq(a, transmute(b)) }
1754}
1755#[doc = "Unsigned compare bitwise equal to zero"]
1756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"]
1757#[inline(always)]
1758#[target_feature(enable = "neon")]
1759#[cfg_attr(test, assert_instr(cmeq))]
1760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1761pub fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t {
1762    let b: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
1763    unsafe { simd_eq(a, transmute(b)) }
1764}
1765#[doc = "Unsigned compare bitwise equal to zero"]
1766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"]
1767#[inline(always)]
1768#[target_feature(enable = "neon")]
1769#[cfg_attr(test, assert_instr(cmeq))]
1770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1771pub fn vceqz_u16(a: uint16x4_t) -> uint16x4_t {
1772    let b: u16x4 = u16x4::new(0, 0, 0, 0);
1773    unsafe { simd_eq(a, transmute(b)) }
1774}
1775#[doc = "Unsigned compare bitwise equal to zero"]
1776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"]
1777#[inline(always)]
1778#[target_feature(enable = "neon")]
1779#[cfg_attr(test, assert_instr(cmeq))]
1780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1781pub fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t {
1782    let b: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
1783    unsafe { simd_eq(a, transmute(b)) }
1784}
1785#[doc = "Unsigned compare bitwise equal to zero"]
1786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"]
1787#[inline(always)]
1788#[target_feature(enable = "neon")]
1789#[cfg_attr(test, assert_instr(cmeq))]
1790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1791pub fn vceqz_u32(a: uint32x2_t) -> uint32x2_t {
1792    let b: u32x2 = u32x2::new(0, 0);
1793    unsafe { simd_eq(a, transmute(b)) }
1794}
1795#[doc = "Unsigned compare bitwise equal to zero"]
1796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"]
1797#[inline(always)]
1798#[target_feature(enable = "neon")]
1799#[cfg_attr(test, assert_instr(cmeq))]
1800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1801pub fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t {
1802    let b: u32x4 = u32x4::new(0, 0, 0, 0);
1803    unsafe { simd_eq(a, transmute(b)) }
1804}
1805#[doc = "Unsigned compare bitwise equal to zero"]
1806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"]
1807#[inline(always)]
1808#[target_feature(enable = "neon")]
1809#[cfg_attr(test, assert_instr(cmeq))]
1810#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1811pub fn vceqz_u64(a: uint64x1_t) -> uint64x1_t {
1812    let b: u64x1 = u64x1::new(0);
1813    unsafe { simd_eq(a, transmute(b)) }
1814}
1815#[doc = "Unsigned compare bitwise equal to zero"]
1816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"]
1817#[inline(always)]
1818#[target_feature(enable = "neon")]
1819#[cfg_attr(test, assert_instr(cmeq))]
1820#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1821pub fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t {
1822    let b: u64x2 = u64x2::new(0, 0);
1823    unsafe { simd_eq(a, transmute(b)) }
1824}
1825#[doc = "Compare bitwise equal to zero"]
1826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"]
1827#[inline(always)]
1828#[target_feature(enable = "neon")]
1829#[cfg_attr(test, assert_instr(cmp))]
1830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1831pub fn vceqzd_s64(a: i64) -> u64 {
1832    unsafe { transmute(vceqz_s64(transmute(a))) }
1833}
1834#[doc = "Compare bitwise equal to zero"]
1835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"]
1836#[inline(always)]
1837#[target_feature(enable = "neon")]
1838#[cfg_attr(test, assert_instr(cmp))]
1839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1840pub fn vceqzd_u64(a: u64) -> u64 {
1841    unsafe { transmute(vceqz_u64(transmute(a))) }
1842}
1843#[doc = "Floating-point compare bitwise equal to zero"]
1844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzh_f16)"]
1845#[inline(always)]
1846#[cfg_attr(test, assert_instr(fcmp))]
1847#[target_feature(enable = "neon,fp16")]
1848#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1849#[cfg(not(target_arch = "arm64ec"))]
1850pub fn vceqzh_f16(a: f16) -> u16 {
1851    unsafe { simd_extract!(vceqz_f16(vdup_n_f16(a)), 0) }
1852}
1853#[doc = "Floating-point compare bitwise equal to zero"]
1854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"]
1855#[inline(always)]
1856#[target_feature(enable = "neon")]
1857#[cfg_attr(test, assert_instr(fcmp))]
1858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1859pub fn vceqzs_f32(a: f32) -> u32 {
1860    unsafe { simd_extract!(vceqz_f32(vdup_n_f32(a)), 0) }
1861}
1862#[doc = "Floating-point compare bitwise equal to zero"]
1863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"]
1864#[inline(always)]
1865#[target_feature(enable = "neon")]
1866#[cfg_attr(test, assert_instr(fcmp))]
1867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1868pub fn vceqzd_f64(a: f64) -> u64 {
1869    unsafe { simd_extract!(vceqz_f64(vdup_n_f64(a)), 0) }
1870}
1871#[doc = "Floating-point compare greater than or equal"]
1872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"]
1873#[inline(always)]
1874#[target_feature(enable = "neon")]
1875#[cfg_attr(test, assert_instr(fcmge))]
1876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1877pub fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
1878    unsafe { simd_ge(a, b) }
1879}
1880#[doc = "Floating-point compare greater than or equal"]
1881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"]
1882#[inline(always)]
1883#[target_feature(enable = "neon")]
1884#[cfg_attr(test, assert_instr(fcmge))]
1885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1886pub fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
1887    unsafe { simd_ge(a, b) }
1888}
1889#[doc = "Compare signed greater than or equal"]
1890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"]
1891#[inline(always)]
1892#[target_feature(enable = "neon")]
1893#[cfg_attr(test, assert_instr(cmge))]
1894#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1895pub fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
1896    unsafe { simd_ge(a, b) }
1897}
1898#[doc = "Compare signed greater than or equal"]
1899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"]
1900#[inline(always)]
1901#[target_feature(enable = "neon")]
1902#[cfg_attr(test, assert_instr(cmge))]
1903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1904pub fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
1905    unsafe { simd_ge(a, b) }
1906}
1907#[doc = "Compare unsigned greater than or equal"]
1908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"]
1909#[inline(always)]
1910#[target_feature(enable = "neon")]
1911#[cfg_attr(test, assert_instr(cmhs))]
1912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1913pub fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1914    unsafe { simd_ge(a, b) }
1915}
1916#[doc = "Compare unsigned greater than or equal"]
1917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"]
1918#[inline(always)]
1919#[target_feature(enable = "neon")]
1920#[cfg_attr(test, assert_instr(cmhs))]
1921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1922pub fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1923    unsafe { simd_ge(a, b) }
1924}
1925#[doc = "Floating-point compare greater than or equal"]
1926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"]
1927#[inline(always)]
1928#[target_feature(enable = "neon")]
1929#[cfg_attr(test, assert_instr(fcmp))]
1930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1931pub fn vcged_f64(a: f64, b: f64) -> u64 {
1932    unsafe { simd_extract!(vcge_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
1933}
1934#[doc = "Floating-point compare greater than or equal"]
1935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"]
1936#[inline(always)]
1937#[target_feature(enable = "neon")]
1938#[cfg_attr(test, assert_instr(fcmp))]
1939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1940pub fn vcges_f32(a: f32, b: f32) -> u32 {
1941    unsafe { simd_extract!(vcge_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
1942}
1943#[doc = "Compare greater than or equal"]
1944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"]
1945#[inline(always)]
1946#[target_feature(enable = "neon")]
1947#[cfg_attr(test, assert_instr(cmp))]
1948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1949pub fn vcged_s64(a: i64, b: i64) -> u64 {
1950    unsafe { transmute(vcge_s64(transmute(a), transmute(b))) }
1951}
1952#[doc = "Compare greater than or equal"]
1953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"]
1954#[inline(always)]
1955#[target_feature(enable = "neon")]
1956#[cfg_attr(test, assert_instr(cmp))]
1957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1958pub fn vcged_u64(a: u64, b: u64) -> u64 {
1959    unsafe { transmute(vcge_u64(transmute(a), transmute(b))) }
1960}
1961#[doc = "Floating-point compare greater than or equal"]
1962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeh_f16)"]
1963#[inline(always)]
1964#[cfg_attr(test, assert_instr(fcmp))]
1965#[target_feature(enable = "neon,fp16")]
1966#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
1967#[cfg(not(target_arch = "arm64ec"))]
1968pub fn vcgeh_f16(a: f16, b: f16) -> u16 {
1969    unsafe { simd_extract!(vcge_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
1970}
1971#[doc = "Floating-point compare greater than or equal to zero"]
1972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"]
1973#[inline(always)]
1974#[target_feature(enable = "neon")]
1975#[cfg_attr(test, assert_instr(fcmge))]
1976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1977pub fn vcgez_f32(a: float32x2_t) -> uint32x2_t {
1978    let b: f32x2 = f32x2::new(0.0, 0.0);
1979    unsafe { simd_ge(a, transmute(b)) }
1980}
1981#[doc = "Floating-point compare greater than or equal to zero"]
1982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"]
1983#[inline(always)]
1984#[target_feature(enable = "neon")]
1985#[cfg_attr(test, assert_instr(fcmge))]
1986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1987pub fn vcgezq_f32(a: float32x4_t) -> uint32x4_t {
1988    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
1989    unsafe { simd_ge(a, transmute(b)) }
1990}
1991#[doc = "Floating-point compare greater than or equal to zero"]
1992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"]
1993#[inline(always)]
1994#[target_feature(enable = "neon")]
1995#[cfg_attr(test, assert_instr(fcmge))]
1996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1997pub fn vcgez_f64(a: float64x1_t) -> uint64x1_t {
1998    let b: f64 = 0.0;
1999    unsafe { simd_ge(a, transmute(b)) }
2000}
2001#[doc = "Floating-point compare greater than or equal to zero"]
2002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"]
2003#[inline(always)]
2004#[target_feature(enable = "neon")]
2005#[cfg_attr(test, assert_instr(fcmge))]
2006#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2007pub fn vcgezq_f64(a: float64x2_t) -> uint64x2_t {
2008    let b: f64x2 = f64x2::new(0.0, 0.0);
2009    unsafe { simd_ge(a, transmute(b)) }
2010}
2011#[doc = "Compare signed greater than or equal to zero"]
2012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"]
2013#[inline(always)]
2014#[target_feature(enable = "neon")]
2015#[cfg_attr(test, assert_instr(cmge))]
2016#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2017pub fn vcgez_s8(a: int8x8_t) -> uint8x8_t {
2018    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2019    unsafe { simd_ge(a, transmute(b)) }
2020}
2021#[doc = "Compare signed greater than or equal to zero"]
2022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"]
2023#[inline(always)]
2024#[target_feature(enable = "neon")]
2025#[cfg_attr(test, assert_instr(cmge))]
2026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2027pub fn vcgezq_s8(a: int8x16_t) -> uint8x16_t {
2028    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2029    unsafe { simd_ge(a, transmute(b)) }
2030}
2031#[doc = "Compare signed greater than or equal to zero"]
2032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"]
2033#[inline(always)]
2034#[target_feature(enable = "neon")]
2035#[cfg_attr(test, assert_instr(cmge))]
2036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2037pub fn vcgez_s16(a: int16x4_t) -> uint16x4_t {
2038    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2039    unsafe { simd_ge(a, transmute(b)) }
2040}
2041#[doc = "Compare signed greater than or equal to zero"]
2042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"]
2043#[inline(always)]
2044#[target_feature(enable = "neon")]
2045#[cfg_attr(test, assert_instr(cmge))]
2046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2047pub fn vcgezq_s16(a: int16x8_t) -> uint16x8_t {
2048    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2049    unsafe { simd_ge(a, transmute(b)) }
2050}
2051#[doc = "Compare signed greater than or equal to zero"]
2052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"]
2053#[inline(always)]
2054#[target_feature(enable = "neon")]
2055#[cfg_attr(test, assert_instr(cmge))]
2056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2057pub fn vcgez_s32(a: int32x2_t) -> uint32x2_t {
2058    let b: i32x2 = i32x2::new(0, 0);
2059    unsafe { simd_ge(a, transmute(b)) }
2060}
2061#[doc = "Compare signed greater than or equal to zero"]
2062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"]
2063#[inline(always)]
2064#[target_feature(enable = "neon")]
2065#[cfg_attr(test, assert_instr(cmge))]
2066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2067pub fn vcgezq_s32(a: int32x4_t) -> uint32x4_t {
2068    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2069    unsafe { simd_ge(a, transmute(b)) }
2070}
2071#[doc = "Compare signed greater than or equal to zero"]
2072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"]
2073#[inline(always)]
2074#[target_feature(enable = "neon")]
2075#[cfg_attr(test, assert_instr(cmge))]
2076#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2077pub fn vcgez_s64(a: int64x1_t) -> uint64x1_t {
2078    let b: i64x1 = i64x1::new(0);
2079    unsafe { simd_ge(a, transmute(b)) }
2080}
2081#[doc = "Compare signed greater than or equal to zero"]
2082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"]
2083#[inline(always)]
2084#[target_feature(enable = "neon")]
2085#[cfg_attr(test, assert_instr(cmge))]
2086#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2087pub fn vcgezq_s64(a: int64x2_t) -> uint64x2_t {
2088    let b: i64x2 = i64x2::new(0, 0);
2089    unsafe { simd_ge(a, transmute(b)) }
2090}
2091#[doc = "Floating-point compare greater than or equal to zero"]
2092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"]
2093#[inline(always)]
2094#[target_feature(enable = "neon")]
2095#[cfg_attr(test, assert_instr(fcmp))]
2096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2097pub fn vcgezd_f64(a: f64) -> u64 {
2098    unsafe { simd_extract!(vcgez_f64(vdup_n_f64(a)), 0) }
2099}
2100#[doc = "Floating-point compare greater than or equal to zero"]
2101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"]
2102#[inline(always)]
2103#[target_feature(enable = "neon")]
2104#[cfg_attr(test, assert_instr(fcmp))]
2105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2106pub fn vcgezs_f32(a: f32) -> u32 {
2107    unsafe { simd_extract!(vcgez_f32(vdup_n_f32(a)), 0) }
2108}
2109#[doc = "Compare signed greater than or equal to zero"]
2110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"]
2111#[inline(always)]
2112#[target_feature(enable = "neon")]
2113#[cfg_attr(test, assert_instr(nop))]
2114#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2115pub fn vcgezd_s64(a: i64) -> u64 {
2116    unsafe { transmute(vcgez_s64(transmute(a))) }
2117}
2118#[doc = "Floating-point compare greater than or equal to zero"]
2119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezh_f16)"]
2120#[inline(always)]
2121#[cfg_attr(test, assert_instr(fcmp))]
2122#[target_feature(enable = "neon,fp16")]
2123#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2124#[cfg(not(target_arch = "arm64ec"))]
2125pub fn vcgezh_f16(a: f16) -> u16 {
2126    unsafe { simd_extract!(vcgez_f16(vdup_n_f16(a)), 0) }
2127}
2128#[doc = "Floating-point compare greater than"]
2129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"]
2130#[inline(always)]
2131#[target_feature(enable = "neon")]
2132#[cfg_attr(test, assert_instr(fcmgt))]
2133#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2134pub fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2135    unsafe { simd_gt(a, b) }
2136}
2137#[doc = "Floating-point compare greater than"]
2138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"]
2139#[inline(always)]
2140#[target_feature(enable = "neon")]
2141#[cfg_attr(test, assert_instr(fcmgt))]
2142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2143pub fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2144    unsafe { simd_gt(a, b) }
2145}
2146#[doc = "Compare signed greater than"]
2147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"]
2148#[inline(always)]
2149#[target_feature(enable = "neon")]
2150#[cfg_attr(test, assert_instr(cmgt))]
2151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2152pub fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2153    unsafe { simd_gt(a, b) }
2154}
2155#[doc = "Compare signed greater than"]
2156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"]
2157#[inline(always)]
2158#[target_feature(enable = "neon")]
2159#[cfg_attr(test, assert_instr(cmgt))]
2160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2161pub fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2162    unsafe { simd_gt(a, b) }
2163}
2164#[doc = "Compare unsigned greater than"]
2165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"]
2166#[inline(always)]
2167#[target_feature(enable = "neon")]
2168#[cfg_attr(test, assert_instr(cmhi))]
2169#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2170pub fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2171    unsafe { simd_gt(a, b) }
2172}
2173#[doc = "Compare unsigned greater than"]
2174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"]
2175#[inline(always)]
2176#[target_feature(enable = "neon")]
2177#[cfg_attr(test, assert_instr(cmhi))]
2178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2179pub fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2180    unsafe { simd_gt(a, b) }
2181}
2182#[doc = "Floating-point compare greater than"]
2183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"]
2184#[inline(always)]
2185#[target_feature(enable = "neon")]
2186#[cfg_attr(test, assert_instr(fcmp))]
2187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2188pub fn vcgtd_f64(a: f64, b: f64) -> u64 {
2189    unsafe { simd_extract!(vcgt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2190}
2191#[doc = "Floating-point compare greater than"]
2192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"]
2193#[inline(always)]
2194#[target_feature(enable = "neon")]
2195#[cfg_attr(test, assert_instr(fcmp))]
2196#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2197pub fn vcgts_f32(a: f32, b: f32) -> u32 {
2198    unsafe { simd_extract!(vcgt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2199}
2200#[doc = "Compare greater than"]
2201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"]
2202#[inline(always)]
2203#[target_feature(enable = "neon")]
2204#[cfg_attr(test, assert_instr(cmp))]
2205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2206pub fn vcgtd_s64(a: i64, b: i64) -> u64 {
2207    unsafe { transmute(vcgt_s64(transmute(a), transmute(b))) }
2208}
2209#[doc = "Compare greater than"]
2210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"]
2211#[inline(always)]
2212#[target_feature(enable = "neon")]
2213#[cfg_attr(test, assert_instr(cmp))]
2214#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2215pub fn vcgtd_u64(a: u64, b: u64) -> u64 {
2216    unsafe { transmute(vcgt_u64(transmute(a), transmute(b))) }
2217}
2218#[doc = "Floating-point compare greater than"]
2219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgth_f16)"]
2220#[inline(always)]
2221#[cfg_attr(test, assert_instr(fcmp))]
2222#[target_feature(enable = "neon,fp16")]
2223#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2224#[cfg(not(target_arch = "arm64ec"))]
2225pub fn vcgth_f16(a: f16, b: f16) -> u16 {
2226    unsafe { simd_extract!(vcgt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2227}
2228#[doc = "Floating-point compare greater than zero"]
2229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"]
2230#[inline(always)]
2231#[target_feature(enable = "neon")]
2232#[cfg_attr(test, assert_instr(fcmgt))]
2233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2234pub fn vcgtz_f32(a: float32x2_t) -> uint32x2_t {
2235    let b: f32x2 = f32x2::new(0.0, 0.0);
2236    unsafe { simd_gt(a, transmute(b)) }
2237}
2238#[doc = "Floating-point compare greater than zero"]
2239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"]
2240#[inline(always)]
2241#[target_feature(enable = "neon")]
2242#[cfg_attr(test, assert_instr(fcmgt))]
2243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2244pub fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t {
2245    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2246    unsafe { simd_gt(a, transmute(b)) }
2247}
2248#[doc = "Floating-point compare greater than zero"]
2249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"]
2250#[inline(always)]
2251#[target_feature(enable = "neon")]
2252#[cfg_attr(test, assert_instr(fcmgt))]
2253#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2254pub fn vcgtz_f64(a: float64x1_t) -> uint64x1_t {
2255    let b: f64 = 0.0;
2256    unsafe { simd_gt(a, transmute(b)) }
2257}
2258#[doc = "Floating-point compare greater than zero"]
2259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"]
2260#[inline(always)]
2261#[target_feature(enable = "neon")]
2262#[cfg_attr(test, assert_instr(fcmgt))]
2263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2264pub fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t {
2265    let b: f64x2 = f64x2::new(0.0, 0.0);
2266    unsafe { simd_gt(a, transmute(b)) }
2267}
2268#[doc = "Compare signed greater than zero"]
2269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"]
2270#[inline(always)]
2271#[target_feature(enable = "neon")]
2272#[cfg_attr(test, assert_instr(cmgt))]
2273#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2274pub fn vcgtz_s8(a: int8x8_t) -> uint8x8_t {
2275    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2276    unsafe { simd_gt(a, transmute(b)) }
2277}
2278#[doc = "Compare signed greater than zero"]
2279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"]
2280#[inline(always)]
2281#[target_feature(enable = "neon")]
2282#[cfg_attr(test, assert_instr(cmgt))]
2283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2284pub fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t {
2285    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2286    unsafe { simd_gt(a, transmute(b)) }
2287}
2288#[doc = "Compare signed greater than zero"]
2289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"]
2290#[inline(always)]
2291#[target_feature(enable = "neon")]
2292#[cfg_attr(test, assert_instr(cmgt))]
2293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2294pub fn vcgtz_s16(a: int16x4_t) -> uint16x4_t {
2295    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2296    unsafe { simd_gt(a, transmute(b)) }
2297}
2298#[doc = "Compare signed greater than zero"]
2299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"]
2300#[inline(always)]
2301#[target_feature(enable = "neon")]
2302#[cfg_attr(test, assert_instr(cmgt))]
2303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2304pub fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t {
2305    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2306    unsafe { simd_gt(a, transmute(b)) }
2307}
2308#[doc = "Compare signed greater than zero"]
2309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"]
2310#[inline(always)]
2311#[target_feature(enable = "neon")]
2312#[cfg_attr(test, assert_instr(cmgt))]
2313#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2314pub fn vcgtz_s32(a: int32x2_t) -> uint32x2_t {
2315    let b: i32x2 = i32x2::new(0, 0);
2316    unsafe { simd_gt(a, transmute(b)) }
2317}
2318#[doc = "Compare signed greater than zero"]
2319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"]
2320#[inline(always)]
2321#[target_feature(enable = "neon")]
2322#[cfg_attr(test, assert_instr(cmgt))]
2323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2324pub fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t {
2325    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2326    unsafe { simd_gt(a, transmute(b)) }
2327}
2328#[doc = "Compare signed greater than zero"]
2329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"]
2330#[inline(always)]
2331#[target_feature(enable = "neon")]
2332#[cfg_attr(test, assert_instr(cmgt))]
2333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2334pub fn vcgtz_s64(a: int64x1_t) -> uint64x1_t {
2335    let b: i64x1 = i64x1::new(0);
2336    unsafe { simd_gt(a, transmute(b)) }
2337}
2338#[doc = "Compare signed greater than zero"]
2339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"]
2340#[inline(always)]
2341#[target_feature(enable = "neon")]
2342#[cfg_attr(test, assert_instr(cmgt))]
2343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2344pub fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t {
2345    let b: i64x2 = i64x2::new(0, 0);
2346    unsafe { simd_gt(a, transmute(b)) }
2347}
2348#[doc = "Floating-point compare greater than zero"]
2349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"]
2350#[inline(always)]
2351#[target_feature(enable = "neon")]
2352#[cfg_attr(test, assert_instr(fcmp))]
2353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2354pub fn vcgtzd_f64(a: f64) -> u64 {
2355    unsafe { simd_extract!(vcgtz_f64(vdup_n_f64(a)), 0) }
2356}
2357#[doc = "Floating-point compare greater than zero"]
2358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"]
2359#[inline(always)]
2360#[target_feature(enable = "neon")]
2361#[cfg_attr(test, assert_instr(fcmp))]
2362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2363pub fn vcgtzs_f32(a: f32) -> u32 {
2364    unsafe { simd_extract!(vcgtz_f32(vdup_n_f32(a)), 0) }
2365}
2366#[doc = "Compare signed greater than zero"]
2367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"]
2368#[inline(always)]
2369#[target_feature(enable = "neon")]
2370#[cfg_attr(test, assert_instr(cmp))]
2371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2372pub fn vcgtzd_s64(a: i64) -> u64 {
2373    unsafe { transmute(vcgtz_s64(transmute(a))) }
2374}
2375#[doc = "Floating-point compare greater than zero"]
2376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzh_f16)"]
2377#[inline(always)]
2378#[cfg_attr(test, assert_instr(fcmp))]
2379#[target_feature(enable = "neon,fp16")]
2380#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2381#[cfg(not(target_arch = "arm64ec"))]
2382pub fn vcgtzh_f16(a: f16) -> u16 {
2383    unsafe { simd_extract!(vcgtz_f16(vdup_n_f16(a)), 0) }
2384}
2385#[doc = "Floating-point compare less than or equal"]
2386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"]
2387#[inline(always)]
2388#[target_feature(enable = "neon")]
2389#[cfg_attr(test, assert_instr(fcmge))]
2390#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2391pub fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2392    unsafe { simd_le(a, b) }
2393}
2394#[doc = "Floating-point compare less than or equal"]
2395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"]
2396#[inline(always)]
2397#[target_feature(enable = "neon")]
2398#[cfg_attr(test, assert_instr(fcmge))]
2399#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2400pub fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2401    unsafe { simd_le(a, b) }
2402}
2403#[doc = "Compare signed less than or equal"]
2404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"]
2405#[inline(always)]
2406#[target_feature(enable = "neon")]
2407#[cfg_attr(test, assert_instr(cmge))]
2408#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2409pub fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2410    unsafe { simd_le(a, b) }
2411}
2412#[doc = "Compare signed less than or equal"]
2413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"]
2414#[inline(always)]
2415#[target_feature(enable = "neon")]
2416#[cfg_attr(test, assert_instr(cmge))]
2417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2418pub fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2419    unsafe { simd_le(a, b) }
2420}
2421#[doc = "Compare unsigned less than or equal"]
2422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"]
2423#[inline(always)]
2424#[target_feature(enable = "neon")]
2425#[cfg_attr(test, assert_instr(cmhs))]
2426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2427pub fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2428    unsafe { simd_le(a, b) }
2429}
2430#[doc = "Compare unsigned less than or equal"]
2431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"]
2432#[inline(always)]
2433#[target_feature(enable = "neon")]
2434#[cfg_attr(test, assert_instr(cmhs))]
2435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2436pub fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2437    unsafe { simd_le(a, b) }
2438}
2439#[doc = "Floating-point compare less than or equal"]
2440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"]
2441#[inline(always)]
2442#[target_feature(enable = "neon")]
2443#[cfg_attr(test, assert_instr(fcmp))]
2444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2445pub fn vcled_f64(a: f64, b: f64) -> u64 {
2446    unsafe { simd_extract!(vcle_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2447}
2448#[doc = "Floating-point compare less than or equal"]
2449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"]
2450#[inline(always)]
2451#[target_feature(enable = "neon")]
2452#[cfg_attr(test, assert_instr(fcmp))]
2453#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2454pub fn vcles_f32(a: f32, b: f32) -> u32 {
2455    unsafe { simd_extract!(vcle_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2456}
2457#[doc = "Compare less than or equal"]
2458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"]
2459#[inline(always)]
2460#[target_feature(enable = "neon")]
2461#[cfg_attr(test, assert_instr(cmp))]
2462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2463pub fn vcled_u64(a: u64, b: u64) -> u64 {
2464    unsafe { transmute(vcle_u64(transmute(a), transmute(b))) }
2465}
2466#[doc = "Compare less than or equal"]
2467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"]
2468#[inline(always)]
2469#[target_feature(enable = "neon")]
2470#[cfg_attr(test, assert_instr(cmp))]
2471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2472pub fn vcled_s64(a: i64, b: i64) -> u64 {
2473    unsafe { transmute(vcle_s64(transmute(a), transmute(b))) }
2474}
2475#[doc = "Floating-point compare less than or equal"]
2476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleh_f16)"]
2477#[inline(always)]
2478#[cfg_attr(test, assert_instr(fcmp))]
2479#[target_feature(enable = "neon,fp16")]
2480#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2481#[cfg(not(target_arch = "arm64ec"))]
2482pub fn vcleh_f16(a: f16, b: f16) -> u16 {
2483    unsafe { simd_extract!(vcle_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2484}
2485#[doc = "Floating-point compare less than or equal to zero"]
2486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"]
2487#[inline(always)]
2488#[target_feature(enable = "neon")]
2489#[cfg_attr(test, assert_instr(fcmle))]
2490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2491pub fn vclez_f32(a: float32x2_t) -> uint32x2_t {
2492    let b: f32x2 = f32x2::new(0.0, 0.0);
2493    unsafe { simd_le(a, transmute(b)) }
2494}
2495#[doc = "Floating-point compare less than or equal to zero"]
2496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"]
2497#[inline(always)]
2498#[target_feature(enable = "neon")]
2499#[cfg_attr(test, assert_instr(fcmle))]
2500#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2501pub fn vclezq_f32(a: float32x4_t) -> uint32x4_t {
2502    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2503    unsafe { simd_le(a, transmute(b)) }
2504}
2505#[doc = "Floating-point compare less than or equal to zero"]
2506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"]
2507#[inline(always)]
2508#[target_feature(enable = "neon")]
2509#[cfg_attr(test, assert_instr(fcmle))]
2510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2511pub fn vclez_f64(a: float64x1_t) -> uint64x1_t {
2512    let b: f64 = 0.0;
2513    unsafe { simd_le(a, transmute(b)) }
2514}
2515#[doc = "Floating-point compare less than or equal to zero"]
2516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"]
2517#[inline(always)]
2518#[target_feature(enable = "neon")]
2519#[cfg_attr(test, assert_instr(fcmle))]
2520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2521pub fn vclezq_f64(a: float64x2_t) -> uint64x2_t {
2522    let b: f64x2 = f64x2::new(0.0, 0.0);
2523    unsafe { simd_le(a, transmute(b)) }
2524}
2525#[doc = "Compare signed less than or equal to zero"]
2526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"]
2527#[inline(always)]
2528#[target_feature(enable = "neon")]
2529#[cfg_attr(test, assert_instr(cmle))]
2530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2531pub fn vclez_s8(a: int8x8_t) -> uint8x8_t {
2532    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2533    unsafe { simd_le(a, transmute(b)) }
2534}
2535#[doc = "Compare signed less than or equal to zero"]
2536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"]
2537#[inline(always)]
2538#[target_feature(enable = "neon")]
2539#[cfg_attr(test, assert_instr(cmle))]
2540#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2541pub fn vclezq_s8(a: int8x16_t) -> uint8x16_t {
2542    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2543    unsafe { simd_le(a, transmute(b)) }
2544}
2545#[doc = "Compare signed less than or equal to zero"]
2546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"]
2547#[inline(always)]
2548#[target_feature(enable = "neon")]
2549#[cfg_attr(test, assert_instr(cmle))]
2550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2551pub fn vclez_s16(a: int16x4_t) -> uint16x4_t {
2552    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2553    unsafe { simd_le(a, transmute(b)) }
2554}
2555#[doc = "Compare signed less than or equal to zero"]
2556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"]
2557#[inline(always)]
2558#[target_feature(enable = "neon")]
2559#[cfg_attr(test, assert_instr(cmle))]
2560#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2561pub fn vclezq_s16(a: int16x8_t) -> uint16x8_t {
2562    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2563    unsafe { simd_le(a, transmute(b)) }
2564}
2565#[doc = "Compare signed less than or equal to zero"]
2566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"]
2567#[inline(always)]
2568#[target_feature(enable = "neon")]
2569#[cfg_attr(test, assert_instr(cmle))]
2570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2571pub fn vclez_s32(a: int32x2_t) -> uint32x2_t {
2572    let b: i32x2 = i32x2::new(0, 0);
2573    unsafe { simd_le(a, transmute(b)) }
2574}
2575#[doc = "Compare signed less than or equal to zero"]
2576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"]
2577#[inline(always)]
2578#[target_feature(enable = "neon")]
2579#[cfg_attr(test, assert_instr(cmle))]
2580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2581pub fn vclezq_s32(a: int32x4_t) -> uint32x4_t {
2582    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2583    unsafe { simd_le(a, transmute(b)) }
2584}
2585#[doc = "Compare signed less than or equal to zero"]
2586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"]
2587#[inline(always)]
2588#[target_feature(enable = "neon")]
2589#[cfg_attr(test, assert_instr(cmle))]
2590#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2591pub fn vclez_s64(a: int64x1_t) -> uint64x1_t {
2592    let b: i64x1 = i64x1::new(0);
2593    unsafe { simd_le(a, transmute(b)) }
2594}
2595#[doc = "Compare signed less than or equal to zero"]
2596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"]
2597#[inline(always)]
2598#[target_feature(enable = "neon")]
2599#[cfg_attr(test, assert_instr(cmle))]
2600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2601pub fn vclezq_s64(a: int64x2_t) -> uint64x2_t {
2602    let b: i64x2 = i64x2::new(0, 0);
2603    unsafe { simd_le(a, transmute(b)) }
2604}
2605#[doc = "Floating-point compare less than or equal to zero"]
2606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"]
2607#[inline(always)]
2608#[target_feature(enable = "neon")]
2609#[cfg_attr(test, assert_instr(fcmp))]
2610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2611pub fn vclezd_f64(a: f64) -> u64 {
2612    unsafe { simd_extract!(vclez_f64(vdup_n_f64(a)), 0) }
2613}
2614#[doc = "Floating-point compare less than or equal to zero"]
2615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"]
2616#[inline(always)]
2617#[target_feature(enable = "neon")]
2618#[cfg_attr(test, assert_instr(fcmp))]
2619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2620pub fn vclezs_f32(a: f32) -> u32 {
2621    unsafe { simd_extract!(vclez_f32(vdup_n_f32(a)), 0) }
2622}
2623#[doc = "Compare less than or equal to zero"]
2624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"]
2625#[inline(always)]
2626#[target_feature(enable = "neon")]
2627#[cfg_attr(test, assert_instr(cmp))]
2628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2629pub fn vclezd_s64(a: i64) -> u64 {
2630    unsafe { transmute(vclez_s64(transmute(a))) }
2631}
2632#[doc = "Floating-point compare less than or equal to zero"]
2633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezh_f16)"]
2634#[inline(always)]
2635#[cfg_attr(test, assert_instr(fcmp))]
2636#[target_feature(enable = "neon,fp16")]
2637#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2638#[cfg(not(target_arch = "arm64ec"))]
2639pub fn vclezh_f16(a: f16) -> u16 {
2640    unsafe { simd_extract!(vclez_f16(vdup_n_f16(a)), 0) }
2641}
2642#[doc = "Floating-point compare less than"]
2643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"]
2644#[inline(always)]
2645#[target_feature(enable = "neon")]
2646#[cfg_attr(test, assert_instr(fcmgt))]
2647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2648pub fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t {
2649    unsafe { simd_lt(a, b) }
2650}
2651#[doc = "Floating-point compare less than"]
2652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"]
2653#[inline(always)]
2654#[target_feature(enable = "neon")]
2655#[cfg_attr(test, assert_instr(fcmgt))]
2656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2657pub fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t {
2658    unsafe { simd_lt(a, b) }
2659}
2660#[doc = "Compare signed less than"]
2661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"]
2662#[inline(always)]
2663#[target_feature(enable = "neon")]
2664#[cfg_attr(test, assert_instr(cmgt))]
2665#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2666pub fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
2667    unsafe { simd_lt(a, b) }
2668}
2669#[doc = "Compare signed less than"]
2670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"]
2671#[inline(always)]
2672#[target_feature(enable = "neon")]
2673#[cfg_attr(test, assert_instr(cmgt))]
2674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2675pub fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
2676    unsafe { simd_lt(a, b) }
2677}
2678#[doc = "Compare unsigned less than"]
2679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"]
2680#[inline(always)]
2681#[target_feature(enable = "neon")]
2682#[cfg_attr(test, assert_instr(cmhi))]
2683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2684pub fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
2685    unsafe { simd_lt(a, b) }
2686}
2687#[doc = "Compare unsigned less than"]
2688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"]
2689#[inline(always)]
2690#[target_feature(enable = "neon")]
2691#[cfg_attr(test, assert_instr(cmhi))]
2692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2693pub fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
2694    unsafe { simd_lt(a, b) }
2695}
2696#[doc = "Compare less than"]
2697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"]
2698#[inline(always)]
2699#[target_feature(enable = "neon")]
2700#[cfg_attr(test, assert_instr(cmp))]
2701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2702pub fn vcltd_u64(a: u64, b: u64) -> u64 {
2703    unsafe { transmute(vclt_u64(transmute(a), transmute(b))) }
2704}
2705#[doc = "Compare less than"]
2706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"]
2707#[inline(always)]
2708#[target_feature(enable = "neon")]
2709#[cfg_attr(test, assert_instr(cmp))]
2710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2711pub fn vcltd_s64(a: i64, b: i64) -> u64 {
2712    unsafe { transmute(vclt_s64(transmute(a), transmute(b))) }
2713}
2714#[doc = "Floating-point compare less than"]
2715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclth_f16)"]
2716#[inline(always)]
2717#[cfg_attr(test, assert_instr(fcmp))]
2718#[target_feature(enable = "neon,fp16")]
2719#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2720#[cfg(not(target_arch = "arm64ec"))]
2721pub fn vclth_f16(a: f16, b: f16) -> u16 {
2722    unsafe { simd_extract!(vclt_f16(vdup_n_f16(a), vdup_n_f16(b)), 0) }
2723}
2724#[doc = "Floating-point compare less than"]
2725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"]
2726#[inline(always)]
2727#[target_feature(enable = "neon")]
2728#[cfg_attr(test, assert_instr(fcmp))]
2729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2730pub fn vclts_f32(a: f32, b: f32) -> u32 {
2731    unsafe { simd_extract!(vclt_f32(vdup_n_f32(a), vdup_n_f32(b)), 0) }
2732}
2733#[doc = "Floating-point compare less than"]
2734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"]
2735#[inline(always)]
2736#[target_feature(enable = "neon")]
2737#[cfg_attr(test, assert_instr(fcmp))]
2738#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2739pub fn vcltd_f64(a: f64, b: f64) -> u64 {
2740    unsafe { simd_extract!(vclt_f64(vdup_n_f64(a), vdup_n_f64(b)), 0) }
2741}
2742#[doc = "Floating-point compare less than zero"]
2743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"]
2744#[inline(always)]
2745#[target_feature(enable = "neon")]
2746#[cfg_attr(test, assert_instr(fcmlt))]
2747#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2748pub fn vcltz_f32(a: float32x2_t) -> uint32x2_t {
2749    let b: f32x2 = f32x2::new(0.0, 0.0);
2750    unsafe { simd_lt(a, transmute(b)) }
2751}
2752#[doc = "Floating-point compare less than zero"]
2753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"]
2754#[inline(always)]
2755#[target_feature(enable = "neon")]
2756#[cfg_attr(test, assert_instr(fcmlt))]
2757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2758pub fn vcltzq_f32(a: float32x4_t) -> uint32x4_t {
2759    let b: f32x4 = f32x4::new(0.0, 0.0, 0.0, 0.0);
2760    unsafe { simd_lt(a, transmute(b)) }
2761}
2762#[doc = "Floating-point compare less than zero"]
2763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"]
2764#[inline(always)]
2765#[target_feature(enable = "neon")]
2766#[cfg_attr(test, assert_instr(fcmlt))]
2767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2768pub fn vcltz_f64(a: float64x1_t) -> uint64x1_t {
2769    let b: f64 = 0.0;
2770    unsafe { simd_lt(a, transmute(b)) }
2771}
2772#[doc = "Floating-point compare less than zero"]
2773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"]
2774#[inline(always)]
2775#[target_feature(enable = "neon")]
2776#[cfg_attr(test, assert_instr(fcmlt))]
2777#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2778pub fn vcltzq_f64(a: float64x2_t) -> uint64x2_t {
2779    let b: f64x2 = f64x2::new(0.0, 0.0);
2780    unsafe { simd_lt(a, transmute(b)) }
2781}
2782#[doc = "Compare signed less than zero"]
2783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"]
2784#[inline(always)]
2785#[target_feature(enable = "neon")]
2786#[cfg_attr(test, assert_instr(cmlt))]
2787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2788pub fn vcltz_s8(a: int8x8_t) -> uint8x8_t {
2789    let b: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2790    unsafe { simd_lt(a, transmute(b)) }
2791}
2792#[doc = "Compare signed less than zero"]
2793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"]
2794#[inline(always)]
2795#[target_feature(enable = "neon")]
2796#[cfg_attr(test, assert_instr(cmlt))]
2797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2798pub fn vcltzq_s8(a: int8x16_t) -> uint8x16_t {
2799    let b: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
2800    unsafe { simd_lt(a, transmute(b)) }
2801}
2802#[doc = "Compare signed less than zero"]
2803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"]
2804#[inline(always)]
2805#[target_feature(enable = "neon")]
2806#[cfg_attr(test, assert_instr(cmlt))]
2807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2808pub fn vcltz_s16(a: int16x4_t) -> uint16x4_t {
2809    let b: i16x4 = i16x4::new(0, 0, 0, 0);
2810    unsafe { simd_lt(a, transmute(b)) }
2811}
2812#[doc = "Compare signed less than zero"]
2813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"]
2814#[inline(always)]
2815#[target_feature(enable = "neon")]
2816#[cfg_attr(test, assert_instr(cmlt))]
2817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2818pub fn vcltzq_s16(a: int16x8_t) -> uint16x8_t {
2819    let b: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0);
2820    unsafe { simd_lt(a, transmute(b)) }
2821}
2822#[doc = "Compare signed less than zero"]
2823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"]
2824#[inline(always)]
2825#[target_feature(enable = "neon")]
2826#[cfg_attr(test, assert_instr(cmlt))]
2827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2828pub fn vcltz_s32(a: int32x2_t) -> uint32x2_t {
2829    let b: i32x2 = i32x2::new(0, 0);
2830    unsafe { simd_lt(a, transmute(b)) }
2831}
2832#[doc = "Compare signed less than zero"]
2833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"]
2834#[inline(always)]
2835#[target_feature(enable = "neon")]
2836#[cfg_attr(test, assert_instr(cmlt))]
2837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2838pub fn vcltzq_s32(a: int32x4_t) -> uint32x4_t {
2839    let b: i32x4 = i32x4::new(0, 0, 0, 0);
2840    unsafe { simd_lt(a, transmute(b)) }
2841}
2842#[doc = "Compare signed less than zero"]
2843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"]
2844#[inline(always)]
2845#[target_feature(enable = "neon")]
2846#[cfg_attr(test, assert_instr(cmlt))]
2847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2848pub fn vcltz_s64(a: int64x1_t) -> uint64x1_t {
2849    let b: i64x1 = i64x1::new(0);
2850    unsafe { simd_lt(a, transmute(b)) }
2851}
2852#[doc = "Compare signed less than zero"]
2853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"]
2854#[inline(always)]
2855#[target_feature(enable = "neon")]
2856#[cfg_attr(test, assert_instr(cmlt))]
2857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2858pub fn vcltzq_s64(a: int64x2_t) -> uint64x2_t {
2859    let b: i64x2 = i64x2::new(0, 0);
2860    unsafe { simd_lt(a, transmute(b)) }
2861}
2862#[doc = "Floating-point compare less than zero"]
2863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"]
2864#[inline(always)]
2865#[target_feature(enable = "neon")]
2866#[cfg_attr(test, assert_instr(fcmp))]
2867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2868pub fn vcltzd_f64(a: f64) -> u64 {
2869    unsafe { simd_extract!(vcltz_f64(vdup_n_f64(a)), 0) }
2870}
2871#[doc = "Floating-point compare less than zero"]
2872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"]
2873#[inline(always)]
2874#[target_feature(enable = "neon")]
2875#[cfg_attr(test, assert_instr(fcmp))]
2876#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2877pub fn vcltzs_f32(a: f32) -> u32 {
2878    unsafe { simd_extract!(vcltz_f32(vdup_n_f32(a)), 0) }
2879}
2880#[doc = "Compare less than zero"]
2881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"]
2882#[inline(always)]
2883#[target_feature(enable = "neon")]
2884#[cfg_attr(test, assert_instr(asr))]
2885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
2886pub fn vcltzd_s64(a: i64) -> u64 {
2887    unsafe { transmute(vcltz_s64(transmute(a))) }
2888}
2889#[doc = "Floating-point compare less than zero"]
2890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzh_f16)"]
2891#[inline(always)]
2892#[cfg_attr(test, assert_instr(fcmp))]
2893#[target_feature(enable = "neon,fp16")]
2894#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
2895#[cfg(not(target_arch = "arm64ec"))]
2896pub fn vcltzh_f16(a: f16) -> u16 {
2897    unsafe { simd_extract!(vcltz_f16(vdup_n_f16(a)), 0) }
2898}
2899#[doc = "Floating-point complex multiply accumulate"]
2900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"]
2901#[inline(always)]
2902#[target_feature(enable = "neon,fcma")]
2903#[target_feature(enable = "neon,fp16")]
2904#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2905#[cfg(not(target_arch = "arm64ec"))]
2906#[cfg_attr(test, assert_instr(fcmla))]
2907pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
2908    unsafe extern "unadjusted" {
2909        #[cfg_attr(
2910            any(target_arch = "aarch64", target_arch = "arm64ec"),
2911            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f16"
2912        )]
2913        fn _vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
2914    }
2915    unsafe { _vcmla_f16(a, b, c) }
2916}
2917#[doc = "Floating-point complex multiply accumulate"]
2918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"]
2919#[inline(always)]
2920#[target_feature(enable = "neon,fcma")]
2921#[target_feature(enable = "neon,fp16")]
2922#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2923#[cfg(not(target_arch = "arm64ec"))]
2924#[cfg_attr(test, assert_instr(fcmla))]
2925pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
2926    unsafe extern "unadjusted" {
2927        #[cfg_attr(
2928            any(target_arch = "aarch64", target_arch = "arm64ec"),
2929            link_name = "llvm.aarch64.neon.vcmla.rot0.v8f16"
2930        )]
2931        fn _vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
2932    }
2933    unsafe { _vcmlaq_f16(a, b, c) }
2934}
2935#[doc = "Floating-point complex multiply accumulate"]
2936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"]
2937#[inline(always)]
2938#[target_feature(enable = "neon,fcma")]
2939#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2940#[cfg_attr(test, assert_instr(fcmla))]
2941pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
2942    unsafe extern "unadjusted" {
2943        #[cfg_attr(
2944            any(target_arch = "aarch64", target_arch = "arm64ec"),
2945            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f32"
2946        )]
2947        fn _vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
2948    }
2949    unsafe { _vcmla_f32(a, b, c) }
2950}
2951#[doc = "Floating-point complex multiply accumulate"]
2952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"]
2953#[inline(always)]
2954#[target_feature(enable = "neon,fcma")]
2955#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2956#[cfg_attr(test, assert_instr(fcmla))]
2957pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
2958    unsafe extern "unadjusted" {
2959        #[cfg_attr(
2960            any(target_arch = "aarch64", target_arch = "arm64ec"),
2961            link_name = "llvm.aarch64.neon.vcmla.rot0.v4f32"
2962        )]
2963        fn _vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
2964    }
2965    unsafe { _vcmlaq_f32(a, b, c) }
2966}
2967#[doc = "Floating-point complex multiply accumulate"]
2968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"]
2969#[inline(always)]
2970#[target_feature(enable = "neon,fcma")]
2971#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2972#[cfg_attr(test, assert_instr(fcmla))]
2973pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
2974    unsafe extern "unadjusted" {
2975        #[cfg_attr(
2976            any(target_arch = "aarch64", target_arch = "arm64ec"),
2977            link_name = "llvm.aarch64.neon.vcmla.rot0.v2f64"
2978        )]
2979        fn _vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
2980    }
2981    unsafe { _vcmlaq_f64(a, b, c) }
2982}
2983#[doc = "Floating-point complex multiply accumulate"]
2984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f16)"]
2985#[inline(always)]
2986#[target_feature(enable = "neon,fcma")]
2987#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
2988#[rustc_legacy_const_generics(3)]
2989#[target_feature(enable = "neon,fp16")]
2990#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
2991#[cfg(not(target_arch = "arm64ec"))]
2992pub fn vcmla_lane_f16<const LANE: i32>(
2993    a: float16x4_t,
2994    b: float16x4_t,
2995    c: float16x4_t,
2996) -> float16x4_t {
2997    static_assert_uimm_bits!(LANE, 1);
2998    unsafe {
2999        let c: float16x4_t = simd_shuffle!(
3000            c,
3001            c,
3002            [
3003                2 * LANE as u32,
3004                2 * LANE as u32 + 1,
3005                2 * LANE as u32,
3006                2 * LANE as u32 + 1
3007            ]
3008        );
3009        vcmla_f16(a, b, c)
3010    }
3011}
3012#[doc = "Floating-point complex multiply accumulate"]
3013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f16)"]
3014#[inline(always)]
3015#[target_feature(enable = "neon,fcma")]
3016#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3017#[rustc_legacy_const_generics(3)]
3018#[target_feature(enable = "neon,fp16")]
3019#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3020#[cfg(not(target_arch = "arm64ec"))]
3021pub fn vcmlaq_lane_f16<const LANE: i32>(
3022    a: float16x8_t,
3023    b: float16x8_t,
3024    c: float16x4_t,
3025) -> float16x8_t {
3026    static_assert_uimm_bits!(LANE, 1);
3027    unsafe {
3028        let c: float16x8_t = simd_shuffle!(
3029            c,
3030            c,
3031            [
3032                2 * LANE as u32,
3033                2 * LANE as u32 + 1,
3034                2 * LANE as u32,
3035                2 * LANE as u32 + 1,
3036                2 * LANE as u32,
3037                2 * LANE as u32 + 1,
3038                2 * LANE as u32,
3039                2 * LANE as u32 + 1
3040            ]
3041        );
3042        vcmlaq_f16(a, b, c)
3043    }
3044}
3045#[doc = "Floating-point complex multiply accumulate"]
3046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"]
3047#[inline(always)]
3048#[target_feature(enable = "neon,fcma")]
3049#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3050#[rustc_legacy_const_generics(3)]
3051#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3052pub fn vcmla_lane_f32<const LANE: i32>(
3053    a: float32x2_t,
3054    b: float32x2_t,
3055    c: float32x2_t,
3056) -> float32x2_t {
3057    static_assert!(LANE == 0);
3058    unsafe {
3059        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3060        vcmla_f32(a, b, c)
3061    }
3062}
3063#[doc = "Floating-point complex multiply accumulate"]
3064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"]
3065#[inline(always)]
3066#[target_feature(enable = "neon,fcma")]
3067#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3068#[rustc_legacy_const_generics(3)]
3069#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3070pub fn vcmlaq_lane_f32<const LANE: i32>(
3071    a: float32x4_t,
3072    b: float32x4_t,
3073    c: float32x2_t,
3074) -> float32x4_t {
3075    static_assert!(LANE == 0);
3076    unsafe {
3077        let c: float32x4_t = simd_shuffle!(
3078            c,
3079            c,
3080            [
3081                2 * LANE as u32,
3082                2 * LANE as u32 + 1,
3083                2 * LANE as u32,
3084                2 * LANE as u32 + 1
3085            ]
3086        );
3087        vcmlaq_f32(a, b, c)
3088    }
3089}
3090#[doc = "Floating-point complex multiply accumulate"]
3091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f16)"]
3092#[inline(always)]
3093#[target_feature(enable = "neon,fcma")]
3094#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3095#[rustc_legacy_const_generics(3)]
3096#[target_feature(enable = "neon,fp16")]
3097#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3098#[cfg(not(target_arch = "arm64ec"))]
3099pub fn vcmla_laneq_f16<const LANE: i32>(
3100    a: float16x4_t,
3101    b: float16x4_t,
3102    c: float16x8_t,
3103) -> float16x4_t {
3104    static_assert_uimm_bits!(LANE, 2);
3105    unsafe {
3106        let c: float16x4_t = simd_shuffle!(
3107            c,
3108            c,
3109            [
3110                2 * LANE as u32,
3111                2 * LANE as u32 + 1,
3112                2 * LANE as u32,
3113                2 * LANE as u32 + 1
3114            ]
3115        );
3116        vcmla_f16(a, b, c)
3117    }
3118}
3119#[doc = "Floating-point complex multiply accumulate"]
3120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f16)"]
3121#[inline(always)]
3122#[target_feature(enable = "neon,fcma")]
3123#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3124#[rustc_legacy_const_generics(3)]
3125#[target_feature(enable = "neon,fp16")]
3126#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3127#[cfg(not(target_arch = "arm64ec"))]
3128pub fn vcmlaq_laneq_f16<const LANE: i32>(
3129    a: float16x8_t,
3130    b: float16x8_t,
3131    c: float16x8_t,
3132) -> float16x8_t {
3133    static_assert_uimm_bits!(LANE, 2);
3134    unsafe {
3135        let c: float16x8_t = simd_shuffle!(
3136            c,
3137            c,
3138            [
3139                2 * LANE as u32,
3140                2 * LANE as u32 + 1,
3141                2 * LANE as u32,
3142                2 * LANE as u32 + 1,
3143                2 * LANE as u32,
3144                2 * LANE as u32 + 1,
3145                2 * LANE as u32,
3146                2 * LANE as u32 + 1
3147            ]
3148        );
3149        vcmlaq_f16(a, b, c)
3150    }
3151}
3152#[doc = "Floating-point complex multiply accumulate"]
3153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"]
3154#[inline(always)]
3155#[target_feature(enable = "neon,fcma")]
3156#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3157#[rustc_legacy_const_generics(3)]
3158#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3159pub fn vcmla_laneq_f32<const LANE: i32>(
3160    a: float32x2_t,
3161    b: float32x2_t,
3162    c: float32x4_t,
3163) -> float32x2_t {
3164    static_assert_uimm_bits!(LANE, 1);
3165    unsafe {
3166        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3167        vcmla_f32(a, b, c)
3168    }
3169}
3170#[doc = "Floating-point complex multiply accumulate"]
3171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"]
3172#[inline(always)]
3173#[target_feature(enable = "neon,fcma")]
3174#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3175#[rustc_legacy_const_generics(3)]
3176#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3177pub fn vcmlaq_laneq_f32<const LANE: i32>(
3178    a: float32x4_t,
3179    b: float32x4_t,
3180    c: float32x4_t,
3181) -> float32x4_t {
3182    static_assert_uimm_bits!(LANE, 1);
3183    unsafe {
3184        let c: float32x4_t = simd_shuffle!(
3185            c,
3186            c,
3187            [
3188                2 * LANE as u32,
3189                2 * LANE as u32 + 1,
3190                2 * LANE as u32,
3191                2 * LANE as u32 + 1
3192            ]
3193        );
3194        vcmlaq_f32(a, b, c)
3195    }
3196}
3197#[doc = "Floating-point complex multiply accumulate"]
3198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"]
3199#[inline(always)]
3200#[target_feature(enable = "neon,fcma")]
3201#[target_feature(enable = "neon,fp16")]
3202#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3203#[cfg(not(target_arch = "arm64ec"))]
3204#[cfg_attr(test, assert_instr(fcmla))]
3205pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3206    unsafe extern "unadjusted" {
3207        #[cfg_attr(
3208            any(target_arch = "aarch64", target_arch = "arm64ec"),
3209            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f16"
3210        )]
3211        fn _vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3212    }
3213    unsafe { _vcmla_rot180_f16(a, b, c) }
3214}
3215#[doc = "Floating-point complex multiply accumulate"]
3216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"]
3217#[inline(always)]
3218#[target_feature(enable = "neon,fcma")]
3219#[target_feature(enable = "neon,fp16")]
3220#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3221#[cfg(not(target_arch = "arm64ec"))]
3222#[cfg_attr(test, assert_instr(fcmla))]
3223pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3224    unsafe extern "unadjusted" {
3225        #[cfg_attr(
3226            any(target_arch = "aarch64", target_arch = "arm64ec"),
3227            link_name = "llvm.aarch64.neon.vcmla.rot180.v8f16"
3228        )]
3229        fn _vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3230    }
3231    unsafe { _vcmlaq_rot180_f16(a, b, c) }
3232}
3233#[doc = "Floating-point complex multiply accumulate"]
3234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"]
3235#[inline(always)]
3236#[target_feature(enable = "neon,fcma")]
3237#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3238#[cfg_attr(test, assert_instr(fcmla))]
3239pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3240    unsafe extern "unadjusted" {
3241        #[cfg_attr(
3242            any(target_arch = "aarch64", target_arch = "arm64ec"),
3243            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f32"
3244        )]
3245        fn _vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3246    }
3247    unsafe { _vcmla_rot180_f32(a, b, c) }
3248}
3249#[doc = "Floating-point complex multiply accumulate"]
3250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"]
3251#[inline(always)]
3252#[target_feature(enable = "neon,fcma")]
3253#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3254#[cfg_attr(test, assert_instr(fcmla))]
3255pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3256    unsafe extern "unadjusted" {
3257        #[cfg_attr(
3258            any(target_arch = "aarch64", target_arch = "arm64ec"),
3259            link_name = "llvm.aarch64.neon.vcmla.rot180.v4f32"
3260        )]
3261        fn _vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3262    }
3263    unsafe { _vcmlaq_rot180_f32(a, b, c) }
3264}
3265#[doc = "Floating-point complex multiply accumulate"]
3266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"]
3267#[inline(always)]
3268#[target_feature(enable = "neon,fcma")]
3269#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3270#[cfg_attr(test, assert_instr(fcmla))]
3271pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3272    unsafe extern "unadjusted" {
3273        #[cfg_attr(
3274            any(target_arch = "aarch64", target_arch = "arm64ec"),
3275            link_name = "llvm.aarch64.neon.vcmla.rot180.v2f64"
3276        )]
3277        fn _vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3278    }
3279    unsafe { _vcmlaq_rot180_f64(a, b, c) }
3280}
3281#[doc = "Floating-point complex multiply accumulate"]
3282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f16)"]
3283#[inline(always)]
3284#[target_feature(enable = "neon,fcma")]
3285#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3286#[rustc_legacy_const_generics(3)]
3287#[target_feature(enable = "neon,fp16")]
3288#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3289#[cfg(not(target_arch = "arm64ec"))]
3290pub fn vcmla_rot180_lane_f16<const LANE: i32>(
3291    a: float16x4_t,
3292    b: float16x4_t,
3293    c: float16x4_t,
3294) -> float16x4_t {
3295    static_assert_uimm_bits!(LANE, 1);
3296    unsafe {
3297        let c: float16x4_t = simd_shuffle!(
3298            c,
3299            c,
3300            [
3301                2 * LANE as u32,
3302                2 * LANE as u32 + 1,
3303                2 * LANE as u32,
3304                2 * LANE as u32 + 1
3305            ]
3306        );
3307        vcmla_rot180_f16(a, b, c)
3308    }
3309}
3310#[doc = "Floating-point complex multiply accumulate"]
3311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f16)"]
3312#[inline(always)]
3313#[target_feature(enable = "neon,fcma")]
3314#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3315#[rustc_legacy_const_generics(3)]
3316#[target_feature(enable = "neon,fp16")]
3317#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3318#[cfg(not(target_arch = "arm64ec"))]
3319pub fn vcmlaq_rot180_lane_f16<const LANE: i32>(
3320    a: float16x8_t,
3321    b: float16x8_t,
3322    c: float16x4_t,
3323) -> float16x8_t {
3324    static_assert_uimm_bits!(LANE, 1);
3325    unsafe {
3326        let c: float16x8_t = simd_shuffle!(
3327            c,
3328            c,
3329            [
3330                2 * LANE as u32,
3331                2 * LANE as u32 + 1,
3332                2 * LANE as u32,
3333                2 * LANE as u32 + 1,
3334                2 * LANE as u32,
3335                2 * LANE as u32 + 1,
3336                2 * LANE as u32,
3337                2 * LANE as u32 + 1
3338            ]
3339        );
3340        vcmlaq_rot180_f16(a, b, c)
3341    }
3342}
3343#[doc = "Floating-point complex multiply accumulate"]
3344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"]
3345#[inline(always)]
3346#[target_feature(enable = "neon,fcma")]
3347#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3348#[rustc_legacy_const_generics(3)]
3349#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3350pub fn vcmla_rot180_lane_f32<const LANE: i32>(
3351    a: float32x2_t,
3352    b: float32x2_t,
3353    c: float32x2_t,
3354) -> float32x2_t {
3355    static_assert!(LANE == 0);
3356    unsafe {
3357        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3358        vcmla_rot180_f32(a, b, c)
3359    }
3360}
3361#[doc = "Floating-point complex multiply accumulate"]
3362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"]
3363#[inline(always)]
3364#[target_feature(enable = "neon,fcma")]
3365#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3366#[rustc_legacy_const_generics(3)]
3367#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3368pub fn vcmlaq_rot180_lane_f32<const LANE: i32>(
3369    a: float32x4_t,
3370    b: float32x4_t,
3371    c: float32x2_t,
3372) -> float32x4_t {
3373    static_assert!(LANE == 0);
3374    unsafe {
3375        let c: float32x4_t = simd_shuffle!(
3376            c,
3377            c,
3378            [
3379                2 * LANE as u32,
3380                2 * LANE as u32 + 1,
3381                2 * LANE as u32,
3382                2 * LANE as u32 + 1
3383            ]
3384        );
3385        vcmlaq_rot180_f32(a, b, c)
3386    }
3387}
3388#[doc = "Floating-point complex multiply accumulate"]
3389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f16)"]
3390#[inline(always)]
3391#[target_feature(enable = "neon,fcma")]
3392#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3393#[rustc_legacy_const_generics(3)]
3394#[target_feature(enable = "neon,fp16")]
3395#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3396#[cfg(not(target_arch = "arm64ec"))]
3397pub fn vcmla_rot180_laneq_f16<const LANE: i32>(
3398    a: float16x4_t,
3399    b: float16x4_t,
3400    c: float16x8_t,
3401) -> float16x4_t {
3402    static_assert_uimm_bits!(LANE, 2);
3403    unsafe {
3404        let c: float16x4_t = simd_shuffle!(
3405            c,
3406            c,
3407            [
3408                2 * LANE as u32,
3409                2 * LANE as u32 + 1,
3410                2 * LANE as u32,
3411                2 * LANE as u32 + 1
3412            ]
3413        );
3414        vcmla_rot180_f16(a, b, c)
3415    }
3416}
3417#[doc = "Floating-point complex multiply accumulate"]
3418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f16)"]
3419#[inline(always)]
3420#[target_feature(enable = "neon,fcma")]
3421#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3422#[rustc_legacy_const_generics(3)]
3423#[target_feature(enable = "neon,fp16")]
3424#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3425#[cfg(not(target_arch = "arm64ec"))]
3426pub fn vcmlaq_rot180_laneq_f16<const LANE: i32>(
3427    a: float16x8_t,
3428    b: float16x8_t,
3429    c: float16x8_t,
3430) -> float16x8_t {
3431    static_assert_uimm_bits!(LANE, 2);
3432    unsafe {
3433        let c: float16x8_t = simd_shuffle!(
3434            c,
3435            c,
3436            [
3437                2 * LANE as u32,
3438                2 * LANE as u32 + 1,
3439                2 * LANE as u32,
3440                2 * LANE as u32 + 1,
3441                2 * LANE as u32,
3442                2 * LANE as u32 + 1,
3443                2 * LANE as u32,
3444                2 * LANE as u32 + 1
3445            ]
3446        );
3447        vcmlaq_rot180_f16(a, b, c)
3448    }
3449}
3450#[doc = "Floating-point complex multiply accumulate"]
3451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"]
3452#[inline(always)]
3453#[target_feature(enable = "neon,fcma")]
3454#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3455#[rustc_legacy_const_generics(3)]
3456#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3457pub fn vcmla_rot180_laneq_f32<const LANE: i32>(
3458    a: float32x2_t,
3459    b: float32x2_t,
3460    c: float32x4_t,
3461) -> float32x2_t {
3462    static_assert_uimm_bits!(LANE, 1);
3463    unsafe {
3464        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3465        vcmla_rot180_f32(a, b, c)
3466    }
3467}
3468#[doc = "Floating-point complex multiply accumulate"]
3469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"]
3470#[inline(always)]
3471#[target_feature(enable = "neon,fcma")]
3472#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3473#[rustc_legacy_const_generics(3)]
3474#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3475pub fn vcmlaq_rot180_laneq_f32<const LANE: i32>(
3476    a: float32x4_t,
3477    b: float32x4_t,
3478    c: float32x4_t,
3479) -> float32x4_t {
3480    static_assert_uimm_bits!(LANE, 1);
3481    unsafe {
3482        let c: float32x4_t = simd_shuffle!(
3483            c,
3484            c,
3485            [
3486                2 * LANE as u32,
3487                2 * LANE as u32 + 1,
3488                2 * LANE as u32,
3489                2 * LANE as u32 + 1
3490            ]
3491        );
3492        vcmlaq_rot180_f32(a, b, c)
3493    }
3494}
3495#[doc = "Floating-point complex multiply accumulate"]
3496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"]
3497#[inline(always)]
3498#[target_feature(enable = "neon,fcma")]
3499#[target_feature(enable = "neon,fp16")]
3500#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3501#[cfg(not(target_arch = "arm64ec"))]
3502#[cfg_attr(test, assert_instr(fcmla))]
3503pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3504    unsafe extern "unadjusted" {
3505        #[cfg_attr(
3506            any(target_arch = "aarch64", target_arch = "arm64ec"),
3507            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f16"
3508        )]
3509        fn _vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3510    }
3511    unsafe { _vcmla_rot270_f16(a, b, c) }
3512}
3513#[doc = "Floating-point complex multiply accumulate"]
3514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"]
3515#[inline(always)]
3516#[target_feature(enable = "neon,fcma")]
3517#[target_feature(enable = "neon,fp16")]
3518#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3519#[cfg(not(target_arch = "arm64ec"))]
3520#[cfg_attr(test, assert_instr(fcmla))]
3521pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3522    unsafe extern "unadjusted" {
3523        #[cfg_attr(
3524            any(target_arch = "aarch64", target_arch = "arm64ec"),
3525            link_name = "llvm.aarch64.neon.vcmla.rot270.v8f16"
3526        )]
3527        fn _vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3528    }
3529    unsafe { _vcmlaq_rot270_f16(a, b, c) }
3530}
3531#[doc = "Floating-point complex multiply accumulate"]
3532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"]
3533#[inline(always)]
3534#[target_feature(enable = "neon,fcma")]
3535#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3536#[cfg_attr(test, assert_instr(fcmla))]
3537pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3538    unsafe extern "unadjusted" {
3539        #[cfg_attr(
3540            any(target_arch = "aarch64", target_arch = "arm64ec"),
3541            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f32"
3542        )]
3543        fn _vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3544    }
3545    unsafe { _vcmla_rot270_f32(a, b, c) }
3546}
3547#[doc = "Floating-point complex multiply accumulate"]
3548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"]
3549#[inline(always)]
3550#[target_feature(enable = "neon,fcma")]
3551#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3552#[cfg_attr(test, assert_instr(fcmla))]
3553pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3554    unsafe extern "unadjusted" {
3555        #[cfg_attr(
3556            any(target_arch = "aarch64", target_arch = "arm64ec"),
3557            link_name = "llvm.aarch64.neon.vcmla.rot270.v4f32"
3558        )]
3559        fn _vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3560    }
3561    unsafe { _vcmlaq_rot270_f32(a, b, c) }
3562}
3563#[doc = "Floating-point complex multiply accumulate"]
3564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"]
3565#[inline(always)]
3566#[target_feature(enable = "neon,fcma")]
3567#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3568#[cfg_attr(test, assert_instr(fcmla))]
3569pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3570    unsafe extern "unadjusted" {
3571        #[cfg_attr(
3572            any(target_arch = "aarch64", target_arch = "arm64ec"),
3573            link_name = "llvm.aarch64.neon.vcmla.rot270.v2f64"
3574        )]
3575        fn _vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3576    }
3577    unsafe { _vcmlaq_rot270_f64(a, b, c) }
3578}
3579#[doc = "Floating-point complex multiply accumulate"]
3580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f16)"]
3581#[inline(always)]
3582#[target_feature(enable = "neon,fcma")]
3583#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3584#[rustc_legacy_const_generics(3)]
3585#[target_feature(enable = "neon,fp16")]
3586#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3587#[cfg(not(target_arch = "arm64ec"))]
3588pub fn vcmla_rot270_lane_f16<const LANE: i32>(
3589    a: float16x4_t,
3590    b: float16x4_t,
3591    c: float16x4_t,
3592) -> float16x4_t {
3593    static_assert_uimm_bits!(LANE, 1);
3594    unsafe {
3595        let c: float16x4_t = simd_shuffle!(
3596            c,
3597            c,
3598            [
3599                2 * LANE as u32,
3600                2 * LANE as u32 + 1,
3601                2 * LANE as u32,
3602                2 * LANE as u32 + 1
3603            ]
3604        );
3605        vcmla_rot270_f16(a, b, c)
3606    }
3607}
3608#[doc = "Floating-point complex multiply accumulate"]
3609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f16)"]
3610#[inline(always)]
3611#[target_feature(enable = "neon,fcma")]
3612#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3613#[rustc_legacy_const_generics(3)]
3614#[target_feature(enable = "neon,fp16")]
3615#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3616#[cfg(not(target_arch = "arm64ec"))]
3617pub fn vcmlaq_rot270_lane_f16<const LANE: i32>(
3618    a: float16x8_t,
3619    b: float16x8_t,
3620    c: float16x4_t,
3621) -> float16x8_t {
3622    static_assert_uimm_bits!(LANE, 1);
3623    unsafe {
3624        let c: float16x8_t = simd_shuffle!(
3625            c,
3626            c,
3627            [
3628                2 * LANE as u32,
3629                2 * LANE as u32 + 1,
3630                2 * LANE as u32,
3631                2 * LANE as u32 + 1,
3632                2 * LANE as u32,
3633                2 * LANE as u32 + 1,
3634                2 * LANE as u32,
3635                2 * LANE as u32 + 1
3636            ]
3637        );
3638        vcmlaq_rot270_f16(a, b, c)
3639    }
3640}
3641#[doc = "Floating-point complex multiply accumulate"]
3642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"]
3643#[inline(always)]
3644#[target_feature(enable = "neon,fcma")]
3645#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3646#[rustc_legacy_const_generics(3)]
3647#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3648pub fn vcmla_rot270_lane_f32<const LANE: i32>(
3649    a: float32x2_t,
3650    b: float32x2_t,
3651    c: float32x2_t,
3652) -> float32x2_t {
3653    static_assert!(LANE == 0);
3654    unsafe {
3655        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3656        vcmla_rot270_f32(a, b, c)
3657    }
3658}
3659#[doc = "Floating-point complex multiply accumulate"]
3660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"]
3661#[inline(always)]
3662#[target_feature(enable = "neon,fcma")]
3663#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3664#[rustc_legacy_const_generics(3)]
3665#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3666pub fn vcmlaq_rot270_lane_f32<const LANE: i32>(
3667    a: float32x4_t,
3668    b: float32x4_t,
3669    c: float32x2_t,
3670) -> float32x4_t {
3671    static_assert!(LANE == 0);
3672    unsafe {
3673        let c: float32x4_t = simd_shuffle!(
3674            c,
3675            c,
3676            [
3677                2 * LANE as u32,
3678                2 * LANE as u32 + 1,
3679                2 * LANE as u32,
3680                2 * LANE as u32 + 1
3681            ]
3682        );
3683        vcmlaq_rot270_f32(a, b, c)
3684    }
3685}
3686#[doc = "Floating-point complex multiply accumulate"]
3687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f16)"]
3688#[inline(always)]
3689#[target_feature(enable = "neon,fcma")]
3690#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3691#[rustc_legacy_const_generics(3)]
3692#[target_feature(enable = "neon,fp16")]
3693#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3694#[cfg(not(target_arch = "arm64ec"))]
3695pub fn vcmla_rot270_laneq_f16<const LANE: i32>(
3696    a: float16x4_t,
3697    b: float16x4_t,
3698    c: float16x8_t,
3699) -> float16x4_t {
3700    static_assert_uimm_bits!(LANE, 2);
3701    unsafe {
3702        let c: float16x4_t = simd_shuffle!(
3703            c,
3704            c,
3705            [
3706                2 * LANE as u32,
3707                2 * LANE as u32 + 1,
3708                2 * LANE as u32,
3709                2 * LANE as u32 + 1
3710            ]
3711        );
3712        vcmla_rot270_f16(a, b, c)
3713    }
3714}
3715#[doc = "Floating-point complex multiply accumulate"]
3716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f16)"]
3717#[inline(always)]
3718#[target_feature(enable = "neon,fcma")]
3719#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3720#[rustc_legacy_const_generics(3)]
3721#[target_feature(enable = "neon,fp16")]
3722#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3723#[cfg(not(target_arch = "arm64ec"))]
3724pub fn vcmlaq_rot270_laneq_f16<const LANE: i32>(
3725    a: float16x8_t,
3726    b: float16x8_t,
3727    c: float16x8_t,
3728) -> float16x8_t {
3729    static_assert_uimm_bits!(LANE, 2);
3730    unsafe {
3731        let c: float16x8_t = simd_shuffle!(
3732            c,
3733            c,
3734            [
3735                2 * LANE as u32,
3736                2 * LANE as u32 + 1,
3737                2 * LANE as u32,
3738                2 * LANE as u32 + 1,
3739                2 * LANE as u32,
3740                2 * LANE as u32 + 1,
3741                2 * LANE as u32,
3742                2 * LANE as u32 + 1
3743            ]
3744        );
3745        vcmlaq_rot270_f16(a, b, c)
3746    }
3747}
3748#[doc = "Floating-point complex multiply accumulate"]
3749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"]
3750#[inline(always)]
3751#[target_feature(enable = "neon,fcma")]
3752#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3753#[rustc_legacy_const_generics(3)]
3754#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3755pub fn vcmla_rot270_laneq_f32<const LANE: i32>(
3756    a: float32x2_t,
3757    b: float32x2_t,
3758    c: float32x4_t,
3759) -> float32x2_t {
3760    static_assert_uimm_bits!(LANE, 1);
3761    unsafe {
3762        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3763        vcmla_rot270_f32(a, b, c)
3764    }
3765}
3766#[doc = "Floating-point complex multiply accumulate"]
3767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"]
3768#[inline(always)]
3769#[target_feature(enable = "neon,fcma")]
3770#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3771#[rustc_legacy_const_generics(3)]
3772#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3773pub fn vcmlaq_rot270_laneq_f32<const LANE: i32>(
3774    a: float32x4_t,
3775    b: float32x4_t,
3776    c: float32x4_t,
3777) -> float32x4_t {
3778    static_assert_uimm_bits!(LANE, 1);
3779    unsafe {
3780        let c: float32x4_t = simd_shuffle!(
3781            c,
3782            c,
3783            [
3784                2 * LANE as u32,
3785                2 * LANE as u32 + 1,
3786                2 * LANE as u32,
3787                2 * LANE as u32 + 1
3788            ]
3789        );
3790        vcmlaq_rot270_f32(a, b, c)
3791    }
3792}
3793#[doc = "Floating-point complex multiply accumulate"]
3794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"]
3795#[inline(always)]
3796#[target_feature(enable = "neon,fcma")]
3797#[target_feature(enable = "neon,fp16")]
3798#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3799#[cfg(not(target_arch = "arm64ec"))]
3800#[cfg_attr(test, assert_instr(fcmla))]
3801pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t {
3802    unsafe extern "unadjusted" {
3803        #[cfg_attr(
3804            any(target_arch = "aarch64", target_arch = "arm64ec"),
3805            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f16"
3806        )]
3807        fn _vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t;
3808    }
3809    unsafe { _vcmla_rot90_f16(a, b, c) }
3810}
3811#[doc = "Floating-point complex multiply accumulate"]
3812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"]
3813#[inline(always)]
3814#[target_feature(enable = "neon,fcma")]
3815#[target_feature(enable = "neon,fp16")]
3816#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3817#[cfg(not(target_arch = "arm64ec"))]
3818#[cfg_attr(test, assert_instr(fcmla))]
3819pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t {
3820    unsafe extern "unadjusted" {
3821        #[cfg_attr(
3822            any(target_arch = "aarch64", target_arch = "arm64ec"),
3823            link_name = "llvm.aarch64.neon.vcmla.rot90.v8f16"
3824        )]
3825        fn _vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t;
3826    }
3827    unsafe { _vcmlaq_rot90_f16(a, b, c) }
3828}
3829#[doc = "Floating-point complex multiply accumulate"]
3830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"]
3831#[inline(always)]
3832#[target_feature(enable = "neon,fcma")]
3833#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3834#[cfg_attr(test, assert_instr(fcmla))]
3835pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t {
3836    unsafe extern "unadjusted" {
3837        #[cfg_attr(
3838            any(target_arch = "aarch64", target_arch = "arm64ec"),
3839            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f32"
3840        )]
3841        fn _vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t;
3842    }
3843    unsafe { _vcmla_rot90_f32(a, b, c) }
3844}
3845#[doc = "Floating-point complex multiply accumulate"]
3846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"]
3847#[inline(always)]
3848#[target_feature(enable = "neon,fcma")]
3849#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3850#[cfg_attr(test, assert_instr(fcmla))]
3851pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t {
3852    unsafe extern "unadjusted" {
3853        #[cfg_attr(
3854            any(target_arch = "aarch64", target_arch = "arm64ec"),
3855            link_name = "llvm.aarch64.neon.vcmla.rot90.v4f32"
3856        )]
3857        fn _vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t;
3858    }
3859    unsafe { _vcmlaq_rot90_f32(a, b, c) }
3860}
3861#[doc = "Floating-point complex multiply accumulate"]
3862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"]
3863#[inline(always)]
3864#[target_feature(enable = "neon,fcma")]
3865#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3866#[cfg_attr(test, assert_instr(fcmla))]
3867pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
3868    unsafe extern "unadjusted" {
3869        #[cfg_attr(
3870            any(target_arch = "aarch64", target_arch = "arm64ec"),
3871            link_name = "llvm.aarch64.neon.vcmla.rot90.v2f64"
3872        )]
3873        fn _vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t;
3874    }
3875    unsafe { _vcmlaq_rot90_f64(a, b, c) }
3876}
3877#[doc = "Floating-point complex multiply accumulate"]
3878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f16)"]
3879#[inline(always)]
3880#[target_feature(enable = "neon,fcma")]
3881#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3882#[rustc_legacy_const_generics(3)]
3883#[target_feature(enable = "neon,fp16")]
3884#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3885#[cfg(not(target_arch = "arm64ec"))]
3886pub fn vcmla_rot90_lane_f16<const LANE: i32>(
3887    a: float16x4_t,
3888    b: float16x4_t,
3889    c: float16x4_t,
3890) -> float16x4_t {
3891    static_assert_uimm_bits!(LANE, 1);
3892    unsafe {
3893        let c: float16x4_t = simd_shuffle!(
3894            c,
3895            c,
3896            [
3897                2 * LANE as u32,
3898                2 * LANE as u32 + 1,
3899                2 * LANE as u32,
3900                2 * LANE as u32 + 1
3901            ]
3902        );
3903        vcmla_rot90_f16(a, b, c)
3904    }
3905}
3906#[doc = "Floating-point complex multiply accumulate"]
3907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f16)"]
3908#[inline(always)]
3909#[target_feature(enable = "neon,fcma")]
3910#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3911#[rustc_legacy_const_generics(3)]
3912#[target_feature(enable = "neon,fp16")]
3913#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3914#[cfg(not(target_arch = "arm64ec"))]
3915pub fn vcmlaq_rot90_lane_f16<const LANE: i32>(
3916    a: float16x8_t,
3917    b: float16x8_t,
3918    c: float16x4_t,
3919) -> float16x8_t {
3920    static_assert_uimm_bits!(LANE, 1);
3921    unsafe {
3922        let c: float16x8_t = simd_shuffle!(
3923            c,
3924            c,
3925            [
3926                2 * LANE as u32,
3927                2 * LANE as u32 + 1,
3928                2 * LANE as u32,
3929                2 * LANE as u32 + 1,
3930                2 * LANE as u32,
3931                2 * LANE as u32 + 1,
3932                2 * LANE as u32,
3933                2 * LANE as u32 + 1
3934            ]
3935        );
3936        vcmlaq_rot90_f16(a, b, c)
3937    }
3938}
3939#[doc = "Floating-point complex multiply accumulate"]
3940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"]
3941#[inline(always)]
3942#[target_feature(enable = "neon,fcma")]
3943#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3944#[rustc_legacy_const_generics(3)]
3945#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3946pub fn vcmla_rot90_lane_f32<const LANE: i32>(
3947    a: float32x2_t,
3948    b: float32x2_t,
3949    c: float32x2_t,
3950) -> float32x2_t {
3951    static_assert!(LANE == 0);
3952    unsafe {
3953        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
3954        vcmla_rot90_f32(a, b, c)
3955    }
3956}
3957#[doc = "Floating-point complex multiply accumulate"]
3958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"]
3959#[inline(always)]
3960#[target_feature(enable = "neon,fcma")]
3961#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3962#[rustc_legacy_const_generics(3)]
3963#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3964pub fn vcmlaq_rot90_lane_f32<const LANE: i32>(
3965    a: float32x4_t,
3966    b: float32x4_t,
3967    c: float32x2_t,
3968) -> float32x4_t {
3969    static_assert!(LANE == 0);
3970    unsafe {
3971        let c: float32x4_t = simd_shuffle!(
3972            c,
3973            c,
3974            [
3975                2 * LANE as u32,
3976                2 * LANE as u32 + 1,
3977                2 * LANE as u32,
3978                2 * LANE as u32 + 1
3979            ]
3980        );
3981        vcmlaq_rot90_f32(a, b, c)
3982    }
3983}
3984#[doc = "Floating-point complex multiply accumulate"]
3985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f16)"]
3986#[inline(always)]
3987#[target_feature(enable = "neon,fcma")]
3988#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
3989#[rustc_legacy_const_generics(3)]
3990#[target_feature(enable = "neon,fp16")]
3991#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
3992#[cfg(not(target_arch = "arm64ec"))]
3993pub fn vcmla_rot90_laneq_f16<const LANE: i32>(
3994    a: float16x4_t,
3995    b: float16x4_t,
3996    c: float16x8_t,
3997) -> float16x4_t {
3998    static_assert_uimm_bits!(LANE, 2);
3999    unsafe {
4000        let c: float16x4_t = simd_shuffle!(
4001            c,
4002            c,
4003            [
4004                2 * LANE as u32,
4005                2 * LANE as u32 + 1,
4006                2 * LANE as u32,
4007                2 * LANE as u32 + 1
4008            ]
4009        );
4010        vcmla_rot90_f16(a, b, c)
4011    }
4012}
4013#[doc = "Floating-point complex multiply accumulate"]
4014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f16)"]
4015#[inline(always)]
4016#[target_feature(enable = "neon,fcma")]
4017#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4018#[rustc_legacy_const_generics(3)]
4019#[target_feature(enable = "neon,fp16")]
4020#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4021#[cfg(not(target_arch = "arm64ec"))]
4022pub fn vcmlaq_rot90_laneq_f16<const LANE: i32>(
4023    a: float16x8_t,
4024    b: float16x8_t,
4025    c: float16x8_t,
4026) -> float16x8_t {
4027    static_assert_uimm_bits!(LANE, 2);
4028    unsafe {
4029        let c: float16x8_t = simd_shuffle!(
4030            c,
4031            c,
4032            [
4033                2 * LANE as u32,
4034                2 * LANE as u32 + 1,
4035                2 * LANE as u32,
4036                2 * LANE as u32 + 1,
4037                2 * LANE as u32,
4038                2 * LANE as u32 + 1,
4039                2 * LANE as u32,
4040                2 * LANE as u32 + 1
4041            ]
4042        );
4043        vcmlaq_rot90_f16(a, b, c)
4044    }
4045}
4046#[doc = "Floating-point complex multiply accumulate"]
4047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"]
4048#[inline(always)]
4049#[target_feature(enable = "neon,fcma")]
4050#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4051#[rustc_legacy_const_generics(3)]
4052#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4053pub fn vcmla_rot90_laneq_f32<const LANE: i32>(
4054    a: float32x2_t,
4055    b: float32x2_t,
4056    c: float32x4_t,
4057) -> float32x2_t {
4058    static_assert_uimm_bits!(LANE, 1);
4059    unsafe {
4060        let c: float32x2_t = simd_shuffle!(c, c, [2 * LANE as u32, 2 * LANE as u32 + 1]);
4061        vcmla_rot90_f32(a, b, c)
4062    }
4063}
4064#[doc = "Floating-point complex multiply accumulate"]
4065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"]
4066#[inline(always)]
4067#[target_feature(enable = "neon,fcma")]
4068#[cfg_attr(test, assert_instr(fcmla, LANE = 0))]
4069#[rustc_legacy_const_generics(3)]
4070#[unstable(feature = "stdarch_neon_fcma", issue = "117222")]
4071pub fn vcmlaq_rot90_laneq_f32<const LANE: i32>(
4072    a: float32x4_t,
4073    b: float32x4_t,
4074    c: float32x4_t,
4075) -> float32x4_t {
4076    static_assert_uimm_bits!(LANE, 1);
4077    unsafe {
4078        let c: float32x4_t = simd_shuffle!(
4079            c,
4080            c,
4081            [
4082                2 * LANE as u32,
4083                2 * LANE as u32 + 1,
4084                2 * LANE as u32,
4085                2 * LANE as u32 + 1
4086            ]
4087        );
4088        vcmlaq_rot90_f32(a, b, c)
4089    }
4090}
4091#[doc = "Insert vector element from another vector element"]
4092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"]
4093#[inline(always)]
4094#[target_feature(enable = "neon")]
4095#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4096#[rustc_legacy_const_generics(1, 3)]
4097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4098pub fn vcopy_lane_f32<const LANE1: i32, const LANE2: i32>(
4099    a: float32x2_t,
4100    b: float32x2_t,
4101) -> float32x2_t {
4102    static_assert_uimm_bits!(LANE1, 1);
4103    static_assert_uimm_bits!(LANE2, 1);
4104    unsafe {
4105        match LANE1 & 0b1 {
4106            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4107            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4108            _ => unreachable_unchecked(),
4109        }
4110    }
4111}
4112#[doc = "Insert vector element from another vector element"]
4113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"]
4114#[inline(always)]
4115#[target_feature(enable = "neon")]
4116#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4117#[rustc_legacy_const_generics(1, 3)]
4118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4119pub fn vcopy_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
4120    static_assert_uimm_bits!(LANE1, 3);
4121    static_assert_uimm_bits!(LANE2, 3);
4122    unsafe {
4123        match LANE1 & 0b111 {
4124            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4125            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4126            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4127            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4128            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4129            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4130            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4131            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4132            _ => unreachable_unchecked(),
4133        }
4134    }
4135}
4136#[doc = "Insert vector element from another vector element"]
4137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"]
4138#[inline(always)]
4139#[target_feature(enable = "neon")]
4140#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4141#[rustc_legacy_const_generics(1, 3)]
4142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4143pub fn vcopy_lane_s16<const LANE1: i32, const LANE2: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
4144    static_assert_uimm_bits!(LANE1, 2);
4145    static_assert_uimm_bits!(LANE2, 2);
4146    unsafe {
4147        match LANE1 & 0b11 {
4148            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4149            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4150            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4151            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4152            _ => unreachable_unchecked(),
4153        }
4154    }
4155}
4156#[doc = "Insert vector element from another vector element"]
4157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"]
4158#[inline(always)]
4159#[target_feature(enable = "neon")]
4160#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4161#[rustc_legacy_const_generics(1, 3)]
4162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4163pub fn vcopy_lane_s32<const LANE1: i32, const LANE2: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
4164    static_assert_uimm_bits!(LANE1, 1);
4165    static_assert_uimm_bits!(LANE2, 1);
4166    unsafe {
4167        match LANE1 & 0b1 {
4168            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4169            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4170            _ => unreachable_unchecked(),
4171        }
4172    }
4173}
4174#[doc = "Insert vector element from another vector element"]
4175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"]
4176#[inline(always)]
4177#[target_feature(enable = "neon")]
4178#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4179#[rustc_legacy_const_generics(1, 3)]
4180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4181pub fn vcopy_lane_u8<const LANE1: i32, const LANE2: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
4182    static_assert_uimm_bits!(LANE1, 3);
4183    static_assert_uimm_bits!(LANE2, 3);
4184    unsafe {
4185        match LANE1 & 0b111 {
4186            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4187            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4188            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4189            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4190            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4191            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4192            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4193            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4194            _ => unreachable_unchecked(),
4195        }
4196    }
4197}
4198#[doc = "Insert vector element from another vector element"]
4199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"]
4200#[inline(always)]
4201#[target_feature(enable = "neon")]
4202#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4203#[rustc_legacy_const_generics(1, 3)]
4204#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4205pub fn vcopy_lane_u16<const LANE1: i32, const LANE2: i32>(
4206    a: uint16x4_t,
4207    b: uint16x4_t,
4208) -> uint16x4_t {
4209    static_assert_uimm_bits!(LANE1, 2);
4210    static_assert_uimm_bits!(LANE2, 2);
4211    unsafe {
4212        match LANE1 & 0b11 {
4213            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4214            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4215            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4216            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4217            _ => unreachable_unchecked(),
4218        }
4219    }
4220}
4221#[doc = "Insert vector element from another vector element"]
4222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"]
4223#[inline(always)]
4224#[target_feature(enable = "neon")]
4225#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4226#[rustc_legacy_const_generics(1, 3)]
4227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4228pub fn vcopy_lane_u32<const LANE1: i32, const LANE2: i32>(
4229    a: uint32x2_t,
4230    b: uint32x2_t,
4231) -> uint32x2_t {
4232    static_assert_uimm_bits!(LANE1, 1);
4233    static_assert_uimm_bits!(LANE2, 1);
4234    unsafe {
4235        match LANE1 & 0b1 {
4236            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4237            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4238            _ => unreachable_unchecked(),
4239        }
4240    }
4241}
4242#[doc = "Insert vector element from another vector element"]
4243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"]
4244#[inline(always)]
4245#[target_feature(enable = "neon")]
4246#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4247#[rustc_legacy_const_generics(1, 3)]
4248#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4249pub fn vcopy_lane_p8<const LANE1: i32, const LANE2: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
4250    static_assert_uimm_bits!(LANE1, 3);
4251    static_assert_uimm_bits!(LANE2, 3);
4252    unsafe {
4253        match LANE1 & 0b111 {
4254            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4255            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4256            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
4257            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
4258            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
4259            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
4260            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
4261            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
4262            _ => unreachable_unchecked(),
4263        }
4264    }
4265}
4266#[doc = "Insert vector element from another vector element"]
4267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"]
4268#[inline(always)]
4269#[target_feature(enable = "neon")]
4270#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4271#[rustc_legacy_const_generics(1, 3)]
4272#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4273pub fn vcopy_lane_p16<const LANE1: i32, const LANE2: i32>(
4274    a: poly16x4_t,
4275    b: poly16x4_t,
4276) -> poly16x4_t {
4277    static_assert_uimm_bits!(LANE1, 2);
4278    static_assert_uimm_bits!(LANE2, 2);
4279    unsafe {
4280        match LANE1 & 0b11 {
4281            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4282            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4283            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4284            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4285            _ => unreachable_unchecked(),
4286        }
4287    }
4288}
4289#[doc = "Insert vector element from another vector element"]
4290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"]
4291#[inline(always)]
4292#[target_feature(enable = "neon")]
4293#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4294#[rustc_legacy_const_generics(1, 3)]
4295#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4296pub fn vcopy_laneq_f32<const LANE1: i32, const LANE2: i32>(
4297    a: float32x2_t,
4298    b: float32x4_t,
4299) -> float32x2_t {
4300    static_assert_uimm_bits!(LANE1, 1);
4301    static_assert_uimm_bits!(LANE2, 2);
4302    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4303    unsafe {
4304        match LANE1 & 0b1 {
4305            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4306            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4307            _ => unreachable_unchecked(),
4308        }
4309    }
4310}
4311#[doc = "Insert vector element from another vector element"]
4312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"]
4313#[inline(always)]
4314#[target_feature(enable = "neon")]
4315#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4316#[rustc_legacy_const_generics(1, 3)]
4317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4318pub fn vcopy_laneq_s8<const LANE1: i32, const LANE2: i32>(a: int8x8_t, b: int8x16_t) -> int8x8_t {
4319    static_assert_uimm_bits!(LANE1, 3);
4320    static_assert_uimm_bits!(LANE2, 4);
4321    let a: int8x16_t =
4322        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4323    unsafe {
4324        match LANE1 & 0b111 {
4325            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4326            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4327            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4328            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4329            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4330            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4331            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4332            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4333            _ => unreachable_unchecked(),
4334        }
4335    }
4336}
4337#[doc = "Insert vector element from another vector element"]
4338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"]
4339#[inline(always)]
4340#[target_feature(enable = "neon")]
4341#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4342#[rustc_legacy_const_generics(1, 3)]
4343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4344pub fn vcopy_laneq_s16<const LANE1: i32, const LANE2: i32>(
4345    a: int16x4_t,
4346    b: int16x8_t,
4347) -> int16x4_t {
4348    static_assert_uimm_bits!(LANE1, 2);
4349    static_assert_uimm_bits!(LANE2, 3);
4350    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4351    unsafe {
4352        match LANE1 & 0b11 {
4353            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4354            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4355            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4356            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4357            _ => unreachable_unchecked(),
4358        }
4359    }
4360}
4361#[doc = "Insert vector element from another vector element"]
4362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"]
4363#[inline(always)]
4364#[target_feature(enable = "neon")]
4365#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4366#[rustc_legacy_const_generics(1, 3)]
4367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4368pub fn vcopy_laneq_s32<const LANE1: i32, const LANE2: i32>(
4369    a: int32x2_t,
4370    b: int32x4_t,
4371) -> int32x2_t {
4372    static_assert_uimm_bits!(LANE1, 1);
4373    static_assert_uimm_bits!(LANE2, 2);
4374    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4375    unsafe {
4376        match LANE1 & 0b1 {
4377            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4378            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4379            _ => unreachable_unchecked(),
4380        }
4381    }
4382}
4383#[doc = "Insert vector element from another vector element"]
4384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"]
4385#[inline(always)]
4386#[target_feature(enable = "neon")]
4387#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4388#[rustc_legacy_const_generics(1, 3)]
4389#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4390pub fn vcopy_laneq_u8<const LANE1: i32, const LANE2: i32>(
4391    a: uint8x8_t,
4392    b: uint8x16_t,
4393) -> uint8x8_t {
4394    static_assert_uimm_bits!(LANE1, 3);
4395    static_assert_uimm_bits!(LANE2, 4);
4396    let a: uint8x16_t =
4397        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4398    unsafe {
4399        match LANE1 & 0b111 {
4400            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4401            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4402            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4403            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4404            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4405            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4406            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4407            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4408            _ => unreachable_unchecked(),
4409        }
4410    }
4411}
4412#[doc = "Insert vector element from another vector element"]
4413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"]
4414#[inline(always)]
4415#[target_feature(enable = "neon")]
4416#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4417#[rustc_legacy_const_generics(1, 3)]
4418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4419pub fn vcopy_laneq_u16<const LANE1: i32, const LANE2: i32>(
4420    a: uint16x4_t,
4421    b: uint16x8_t,
4422) -> uint16x4_t {
4423    static_assert_uimm_bits!(LANE1, 2);
4424    static_assert_uimm_bits!(LANE2, 3);
4425    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4426    unsafe {
4427        match LANE1 & 0b11 {
4428            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4429            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4430            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4431            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4432            _ => unreachable_unchecked(),
4433        }
4434    }
4435}
4436#[doc = "Insert vector element from another vector element"]
4437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"]
4438#[inline(always)]
4439#[target_feature(enable = "neon")]
4440#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4441#[rustc_legacy_const_generics(1, 3)]
4442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4443pub fn vcopy_laneq_u32<const LANE1: i32, const LANE2: i32>(
4444    a: uint32x2_t,
4445    b: uint32x4_t,
4446) -> uint32x2_t {
4447    static_assert_uimm_bits!(LANE1, 1);
4448    static_assert_uimm_bits!(LANE2, 2);
4449    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };
4450    unsafe {
4451        match LANE1 & 0b1 {
4452            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]),
4453            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]),
4454            _ => unreachable_unchecked(),
4455        }
4456    }
4457}
4458#[doc = "Insert vector element from another vector element"]
4459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"]
4460#[inline(always)]
4461#[target_feature(enable = "neon")]
4462#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4463#[rustc_legacy_const_generics(1, 3)]
4464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4465pub fn vcopy_laneq_p8<const LANE1: i32, const LANE2: i32>(
4466    a: poly8x8_t,
4467    b: poly8x16_t,
4468) -> poly8x8_t {
4469    static_assert_uimm_bits!(LANE1, 3);
4470    static_assert_uimm_bits!(LANE2, 4);
4471    let a: poly8x16_t =
4472        unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4473    unsafe {
4474        match LANE1 & 0b111 {
4475            0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
4476            1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
4477            2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]),
4478            3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]),
4479            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]),
4480            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]),
4481            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]),
4482            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]),
4483            _ => unreachable_unchecked(),
4484        }
4485    }
4486}
4487#[doc = "Insert vector element from another vector element"]
4488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"]
4489#[inline(always)]
4490#[target_feature(enable = "neon")]
4491#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4492#[rustc_legacy_const_generics(1, 3)]
4493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4494pub fn vcopy_laneq_p16<const LANE1: i32, const LANE2: i32>(
4495    a: poly16x4_t,
4496    b: poly16x8_t,
4497) -> poly16x4_t {
4498    static_assert_uimm_bits!(LANE1, 2);
4499    static_assert_uimm_bits!(LANE2, 3);
4500    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };
4501    unsafe {
4502        match LANE1 & 0b11 {
4503            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]),
4504            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]),
4505            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]),
4506            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]),
4507            _ => unreachable_unchecked(),
4508        }
4509    }
4510}
4511#[doc = "Insert vector element from another vector element"]
4512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"]
4513#[inline(always)]
4514#[target_feature(enable = "neon")]
4515#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4516#[rustc_legacy_const_generics(1, 3)]
4517#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4518pub fn vcopyq_lane_f32<const LANE1: i32, const LANE2: i32>(
4519    a: float32x4_t,
4520    b: float32x2_t,
4521) -> float32x4_t {
4522    static_assert_uimm_bits!(LANE1, 2);
4523    static_assert_uimm_bits!(LANE2, 1);
4524    let b: float32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
4525    unsafe {
4526        match LANE1 & 0b11 {
4527            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
4528            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
4529            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
4530            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
4531            _ => unreachable_unchecked(),
4532        }
4533    }
4534}
4535#[doc = "Insert vector element from another vector element"]
4536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"]
4537#[inline(always)]
4538#[target_feature(enable = "neon")]
4539#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4540#[rustc_legacy_const_generics(1, 3)]
4541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4542pub fn vcopyq_lane_f64<const LANE1: i32, const LANE2: i32>(
4543    a: float64x2_t,
4544    b: float64x1_t,
4545) -> float64x2_t {
4546    static_assert_uimm_bits!(LANE1, 1);
4547    static_assert!(LANE2 == 0);
4548    let b: float64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4549    unsafe {
4550        match LANE1 & 0b1 {
4551            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4552            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4553            _ => unreachable_unchecked(),
4554        }
4555    }
4556}
4557#[doc = "Insert vector element from another vector element"]
4558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"]
4559#[inline(always)]
4560#[target_feature(enable = "neon")]
4561#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4562#[rustc_legacy_const_generics(1, 3)]
4563#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4564pub fn vcopyq_lane_s64<const LANE1: i32, const LANE2: i32>(
4565    a: int64x2_t,
4566    b: int64x1_t,
4567) -> int64x2_t {
4568    static_assert_uimm_bits!(LANE1, 1);
4569    static_assert!(LANE2 == 0);
4570    let b: int64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4571    unsafe {
4572        match LANE1 & 0b1 {
4573            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4574            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4575            _ => unreachable_unchecked(),
4576        }
4577    }
4578}
4579#[doc = "Insert vector element from another vector element"]
4580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"]
4581#[inline(always)]
4582#[target_feature(enable = "neon")]
4583#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4584#[rustc_legacy_const_generics(1, 3)]
4585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4586pub fn vcopyq_lane_u64<const LANE1: i32, const LANE2: i32>(
4587    a: uint64x2_t,
4588    b: uint64x1_t,
4589) -> uint64x2_t {
4590    static_assert_uimm_bits!(LANE1, 1);
4591    static_assert!(LANE2 == 0);
4592    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4593    unsafe {
4594        match LANE1 & 0b1 {
4595            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4596            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4597            _ => unreachable_unchecked(),
4598        }
4599    }
4600}
4601#[doc = "Insert vector element from another vector element"]
4602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"]
4603#[inline(always)]
4604#[target_feature(enable = "neon")]
4605#[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))]
4606#[rustc_legacy_const_generics(1, 3)]
4607#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4608pub fn vcopyq_lane_p64<const LANE1: i32, const LANE2: i32>(
4609    a: poly64x2_t,
4610    b: poly64x1_t,
4611) -> poly64x2_t {
4612    static_assert_uimm_bits!(LANE1, 1);
4613    static_assert!(LANE2 == 0);
4614    let b: poly64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };
4615    unsafe {
4616        match LANE1 & 0b1 {
4617            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
4618            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
4619            _ => unreachable_unchecked(),
4620        }
4621    }
4622}
4623#[doc = "Insert vector element from another vector element"]
4624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"]
4625#[inline(always)]
4626#[target_feature(enable = "neon")]
4627#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4628#[rustc_legacy_const_generics(1, 3)]
4629#[stable(feature = "neon_intrinsics", since = "1.59.0")]
4630pub fn vcopyq_lane_s8<const LANE1: i32, const LANE2: i32>(a: int8x16_t, b: int8x8_t) -> int8x16_t {
4631    static_assert_uimm_bits!(LANE1, 4);
4632    static_assert_uimm_bits!(LANE2, 3);
4633    let b: int8x16_t =
4634        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
4635    unsafe {
4636        match LANE1 & 0b1111 {
4637            0 => simd_shuffle!(
4638                a,
4639                b,
4640                [
4641                    16 + LANE2 as u32,
4642                    1,
4643                    2,
4644                    3,
4645                    4,
4646                    5,
4647                    6,
4648                    7,
4649                    8,
4650                    9,
4651                    10,
4652                    11,
4653                    12,
4654                    13,
4655                    14,
4656                    15
4657                ]
4658            ),
4659            1 => simd_shuffle!(
4660                a,
4661                b,
4662                [
4663                    0,
4664                    16 + LANE2 as u32,
4665                    2,
4666                    3,
4667                    4,
4668                    5,
4669                    6,
4670                    7,
4671                    8,
4672                    9,
4673                    10,
4674                    11,
4675                    12,
4676                    13,
4677                    14,
4678                    15
4679                ]
4680            ),
4681            2 => simd_shuffle!(
4682                a,
4683                b,
4684                [
4685                    0,
4686                    1,
4687                    16 + LANE2 as u32,
4688                    3,
4689                    4,
4690                    5,
4691                    6,
4692                    7,
4693                    8,
4694                    9,
4695                    10,
4696                    11,
4697                    12,
4698                    13,
4699                    14,
4700                    15
4701                ]
4702            ),
4703            3 => simd_shuffle!(
4704                a,
4705                b,
4706                [
4707                    0,
4708                    1,
4709                    2,
4710                    16 + LANE2 as u32,
4711                    4,
4712                    5,
4713                    6,
4714                    7,
4715                    8,
4716                    9,
4717                    10,
4718                    11,
4719                    12,
4720                    13,
4721                    14,
4722                    15
4723                ]
4724            ),
4725            4 => simd_shuffle!(
4726                a,
4727                b,
4728                [
4729                    0,
4730                    1,
4731                    2,
4732                    3,
4733                    16 + LANE2 as u32,
4734                    5,
4735                    6,
4736                    7,
4737                    8,
4738                    9,
4739                    10,
4740                    11,
4741                    12,
4742                    13,
4743                    14,
4744                    15
4745                ]
4746            ),
4747            5 => simd_shuffle!(
4748                a,
4749                b,
4750                [
4751                    0,
4752                    1,
4753                    2,
4754                    3,
4755                    4,
4756                    16 + LANE2 as u32,
4757                    6,
4758                    7,
4759                    8,
4760                    9,
4761                    10,
4762                    11,
4763                    12,
4764                    13,
4765                    14,
4766                    15
4767                ]
4768            ),
4769            6 => simd_shuffle!(
4770                a,
4771                b,
4772                [
4773                    0,
4774                    1,
4775                    2,
4776                    3,
4777                    4,
4778                    5,
4779                    16 + LANE2 as u32,
4780                    7,
4781                    8,
4782                    9,
4783                    10,
4784                    11,
4785                    12,
4786                    13,
4787                    14,
4788                    15
4789                ]
4790            ),
4791            7 => simd_shuffle!(
4792                a,
4793                b,
4794                [
4795                    0,
4796                    1,
4797                    2,
4798                    3,
4799                    4,
4800                    5,
4801                    6,
4802                    16 + LANE2 as u32,
4803                    8,
4804                    9,
4805                    10,
4806                    11,
4807                    12,
4808                    13,
4809                    14,
4810                    15
4811                ]
4812            ),
4813            8 => simd_shuffle!(
4814                a,
4815                b,
4816                [
4817                    0,
4818                    1,
4819                    2,
4820                    3,
4821                    4,
4822                    5,
4823                    6,
4824                    7,
4825                    16 + LANE2 as u32,
4826                    9,
4827                    10,
4828                    11,
4829                    12,
4830                    13,
4831                    14,
4832                    15
4833                ]
4834            ),
4835            9 => simd_shuffle!(
4836                a,
4837                b,
4838                [
4839                    0,
4840                    1,
4841                    2,
4842                    3,
4843                    4,
4844                    5,
4845                    6,
4846                    7,
4847                    8,
4848                    16 + LANE2 as u32,
4849                    10,
4850                    11,
4851                    12,
4852                    13,
4853                    14,
4854                    15
4855                ]
4856            ),
4857            10 => simd_shuffle!(
4858                a,
4859                b,
4860                [
4861                    0,
4862                    1,
4863                    2,
4864                    3,
4865                    4,
4866                    5,
4867                    6,
4868                    7,
4869                    8,
4870                    9,
4871                    16 + LANE2 as u32,
4872                    11,
4873                    12,
4874                    13,
4875                    14,
4876                    15
4877                ]
4878            ),
4879            11 => simd_shuffle!(
4880                a,
4881                b,
4882                [
4883                    0,
4884                    1,
4885                    2,
4886                    3,
4887                    4,
4888                    5,
4889                    6,
4890                    7,
4891                    8,
4892                    9,
4893                    10,
4894                    16 + LANE2 as u32,
4895                    12,
4896                    13,
4897                    14,
4898                    15
4899                ]
4900            ),
4901            12 => simd_shuffle!(
4902                a,
4903                b,
4904                [
4905                    0,
4906                    1,
4907                    2,
4908                    3,
4909                    4,
4910                    5,
4911                    6,
4912                    7,
4913                    8,
4914                    9,
4915                    10,
4916                    11,
4917                    16 + LANE2 as u32,
4918                    13,
4919                    14,
4920                    15
4921                ]
4922            ),
4923            13 => simd_shuffle!(
4924                a,
4925                b,
4926                [
4927                    0,
4928                    1,
4929                    2,
4930                    3,
4931                    4,
4932                    5,
4933                    6,
4934                    7,
4935                    8,
4936                    9,
4937                    10,
4938                    11,
4939                    12,
4940                    16 + LANE2 as u32,
4941                    14,
4942                    15
4943                ]
4944            ),
4945            14 => simd_shuffle!(
4946                a,
4947                b,
4948                [
4949                    0,
4950                    1,
4951                    2,
4952                    3,
4953                    4,
4954                    5,
4955                    6,
4956                    7,
4957                    8,
4958                    9,
4959                    10,
4960                    11,
4961                    12,
4962                    13,
4963                    16 + LANE2 as u32,
4964                    15
4965                ]
4966            ),
4967            15 => simd_shuffle!(
4968                a,
4969                b,
4970                [
4971                    0,
4972                    1,
4973                    2,
4974                    3,
4975                    4,
4976                    5,
4977                    6,
4978                    7,
4979                    8,
4980                    9,
4981                    10,
4982                    11,
4983                    12,
4984                    13,
4985                    14,
4986                    16 + LANE2 as u32
4987                ]
4988            ),
4989            _ => unreachable_unchecked(),
4990        }
4991    }
4992}
4993#[doc = "Insert vector element from another vector element"]
4994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"]
4995#[inline(always)]
4996#[target_feature(enable = "neon")]
4997#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
4998#[rustc_legacy_const_generics(1, 3)]
4999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5000pub fn vcopyq_lane_s16<const LANE1: i32, const LANE2: i32>(
5001    a: int16x8_t,
5002    b: int16x4_t,
5003) -> int16x8_t {
5004    static_assert_uimm_bits!(LANE1, 3);
5005    static_assert_uimm_bits!(LANE2, 2);
5006    let b: int16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5007    unsafe {
5008        match LANE1 & 0b111 {
5009            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5010            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5011            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5012            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5013            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5014            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5015            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5016            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5017            _ => unreachable_unchecked(),
5018        }
5019    }
5020}
5021#[doc = "Insert vector element from another vector element"]
5022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"]
5023#[inline(always)]
5024#[target_feature(enable = "neon")]
5025#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5026#[rustc_legacy_const_generics(1, 3)]
5027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5028pub fn vcopyq_lane_s32<const LANE1: i32, const LANE2: i32>(
5029    a: int32x4_t,
5030    b: int32x2_t,
5031) -> int32x4_t {
5032    static_assert_uimm_bits!(LANE1, 2);
5033    static_assert_uimm_bits!(LANE2, 1);
5034    let b: int32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5035    unsafe {
5036        match LANE1 & 0b11 {
5037            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5038            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5039            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5040            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5041            _ => unreachable_unchecked(),
5042        }
5043    }
5044}
5045#[doc = "Insert vector element from another vector element"]
5046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"]
5047#[inline(always)]
5048#[target_feature(enable = "neon")]
5049#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5050#[rustc_legacy_const_generics(1, 3)]
5051#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5052pub fn vcopyq_lane_u8<const LANE1: i32, const LANE2: i32>(
5053    a: uint8x16_t,
5054    b: uint8x8_t,
5055) -> uint8x16_t {
5056    static_assert_uimm_bits!(LANE1, 4);
5057    static_assert_uimm_bits!(LANE2, 3);
5058    let b: uint8x16_t =
5059        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5060    unsafe {
5061        match LANE1 & 0b1111 {
5062            0 => simd_shuffle!(
5063                a,
5064                b,
5065                [
5066                    16 + LANE2 as u32,
5067                    1,
5068                    2,
5069                    3,
5070                    4,
5071                    5,
5072                    6,
5073                    7,
5074                    8,
5075                    9,
5076                    10,
5077                    11,
5078                    12,
5079                    13,
5080                    14,
5081                    15
5082                ]
5083            ),
5084            1 => simd_shuffle!(
5085                a,
5086                b,
5087                [
5088                    0,
5089                    16 + LANE2 as u32,
5090                    2,
5091                    3,
5092                    4,
5093                    5,
5094                    6,
5095                    7,
5096                    8,
5097                    9,
5098                    10,
5099                    11,
5100                    12,
5101                    13,
5102                    14,
5103                    15
5104                ]
5105            ),
5106            2 => simd_shuffle!(
5107                a,
5108                b,
5109                [
5110                    0,
5111                    1,
5112                    16 + LANE2 as u32,
5113                    3,
5114                    4,
5115                    5,
5116                    6,
5117                    7,
5118                    8,
5119                    9,
5120                    10,
5121                    11,
5122                    12,
5123                    13,
5124                    14,
5125                    15
5126                ]
5127            ),
5128            3 => simd_shuffle!(
5129                a,
5130                b,
5131                [
5132                    0,
5133                    1,
5134                    2,
5135                    16 + LANE2 as u32,
5136                    4,
5137                    5,
5138                    6,
5139                    7,
5140                    8,
5141                    9,
5142                    10,
5143                    11,
5144                    12,
5145                    13,
5146                    14,
5147                    15
5148                ]
5149            ),
5150            4 => simd_shuffle!(
5151                a,
5152                b,
5153                [
5154                    0,
5155                    1,
5156                    2,
5157                    3,
5158                    16 + LANE2 as u32,
5159                    5,
5160                    6,
5161                    7,
5162                    8,
5163                    9,
5164                    10,
5165                    11,
5166                    12,
5167                    13,
5168                    14,
5169                    15
5170                ]
5171            ),
5172            5 => simd_shuffle!(
5173                a,
5174                b,
5175                [
5176                    0,
5177                    1,
5178                    2,
5179                    3,
5180                    4,
5181                    16 + LANE2 as u32,
5182                    6,
5183                    7,
5184                    8,
5185                    9,
5186                    10,
5187                    11,
5188                    12,
5189                    13,
5190                    14,
5191                    15
5192                ]
5193            ),
5194            6 => simd_shuffle!(
5195                a,
5196                b,
5197                [
5198                    0,
5199                    1,
5200                    2,
5201                    3,
5202                    4,
5203                    5,
5204                    16 + LANE2 as u32,
5205                    7,
5206                    8,
5207                    9,
5208                    10,
5209                    11,
5210                    12,
5211                    13,
5212                    14,
5213                    15
5214                ]
5215            ),
5216            7 => simd_shuffle!(
5217                a,
5218                b,
5219                [
5220                    0,
5221                    1,
5222                    2,
5223                    3,
5224                    4,
5225                    5,
5226                    6,
5227                    16 + LANE2 as u32,
5228                    8,
5229                    9,
5230                    10,
5231                    11,
5232                    12,
5233                    13,
5234                    14,
5235                    15
5236                ]
5237            ),
5238            8 => simd_shuffle!(
5239                a,
5240                b,
5241                [
5242                    0,
5243                    1,
5244                    2,
5245                    3,
5246                    4,
5247                    5,
5248                    6,
5249                    7,
5250                    16 + LANE2 as u32,
5251                    9,
5252                    10,
5253                    11,
5254                    12,
5255                    13,
5256                    14,
5257                    15
5258                ]
5259            ),
5260            9 => simd_shuffle!(
5261                a,
5262                b,
5263                [
5264                    0,
5265                    1,
5266                    2,
5267                    3,
5268                    4,
5269                    5,
5270                    6,
5271                    7,
5272                    8,
5273                    16 + LANE2 as u32,
5274                    10,
5275                    11,
5276                    12,
5277                    13,
5278                    14,
5279                    15
5280                ]
5281            ),
5282            10 => simd_shuffle!(
5283                a,
5284                b,
5285                [
5286                    0,
5287                    1,
5288                    2,
5289                    3,
5290                    4,
5291                    5,
5292                    6,
5293                    7,
5294                    8,
5295                    9,
5296                    16 + LANE2 as u32,
5297                    11,
5298                    12,
5299                    13,
5300                    14,
5301                    15
5302                ]
5303            ),
5304            11 => simd_shuffle!(
5305                a,
5306                b,
5307                [
5308                    0,
5309                    1,
5310                    2,
5311                    3,
5312                    4,
5313                    5,
5314                    6,
5315                    7,
5316                    8,
5317                    9,
5318                    10,
5319                    16 + LANE2 as u32,
5320                    12,
5321                    13,
5322                    14,
5323                    15
5324                ]
5325            ),
5326            12 => simd_shuffle!(
5327                a,
5328                b,
5329                [
5330                    0,
5331                    1,
5332                    2,
5333                    3,
5334                    4,
5335                    5,
5336                    6,
5337                    7,
5338                    8,
5339                    9,
5340                    10,
5341                    11,
5342                    16 + LANE2 as u32,
5343                    13,
5344                    14,
5345                    15
5346                ]
5347            ),
5348            13 => simd_shuffle!(
5349                a,
5350                b,
5351                [
5352                    0,
5353                    1,
5354                    2,
5355                    3,
5356                    4,
5357                    5,
5358                    6,
5359                    7,
5360                    8,
5361                    9,
5362                    10,
5363                    11,
5364                    12,
5365                    16 + LANE2 as u32,
5366                    14,
5367                    15
5368                ]
5369            ),
5370            14 => simd_shuffle!(
5371                a,
5372                b,
5373                [
5374                    0,
5375                    1,
5376                    2,
5377                    3,
5378                    4,
5379                    5,
5380                    6,
5381                    7,
5382                    8,
5383                    9,
5384                    10,
5385                    11,
5386                    12,
5387                    13,
5388                    16 + LANE2 as u32,
5389                    15
5390                ]
5391            ),
5392            15 => simd_shuffle!(
5393                a,
5394                b,
5395                [
5396                    0,
5397                    1,
5398                    2,
5399                    3,
5400                    4,
5401                    5,
5402                    6,
5403                    7,
5404                    8,
5405                    9,
5406                    10,
5407                    11,
5408                    12,
5409                    13,
5410                    14,
5411                    16 + LANE2 as u32
5412                ]
5413            ),
5414            _ => unreachable_unchecked(),
5415        }
5416    }
5417}
5418#[doc = "Insert vector element from another vector element"]
5419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"]
5420#[inline(always)]
5421#[target_feature(enable = "neon")]
5422#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5423#[rustc_legacy_const_generics(1, 3)]
5424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5425pub fn vcopyq_lane_u16<const LANE1: i32, const LANE2: i32>(
5426    a: uint16x8_t,
5427    b: uint16x4_t,
5428) -> uint16x8_t {
5429    static_assert_uimm_bits!(LANE1, 3);
5430    static_assert_uimm_bits!(LANE2, 2);
5431    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5432    unsafe {
5433        match LANE1 & 0b111 {
5434            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5435            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5436            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5437            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5438            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5439            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5440            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5441            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5442            _ => unreachable_unchecked(),
5443        }
5444    }
5445}
5446#[doc = "Insert vector element from another vector element"]
5447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"]
5448#[inline(always)]
5449#[target_feature(enable = "neon")]
5450#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5451#[rustc_legacy_const_generics(1, 3)]
5452#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5453pub fn vcopyq_lane_u32<const LANE1: i32, const LANE2: i32>(
5454    a: uint32x4_t,
5455    b: uint32x2_t,
5456) -> uint32x4_t {
5457    static_assert_uimm_bits!(LANE1, 2);
5458    static_assert_uimm_bits!(LANE2, 1);
5459    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };
5460    unsafe {
5461        match LANE1 & 0b11 {
5462            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5463            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5464            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5465            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5466            _ => unreachable_unchecked(),
5467        }
5468    }
5469}
5470#[doc = "Insert vector element from another vector element"]
5471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"]
5472#[inline(always)]
5473#[target_feature(enable = "neon")]
5474#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5475#[rustc_legacy_const_generics(1, 3)]
5476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5477pub fn vcopyq_lane_p8<const LANE1: i32, const LANE2: i32>(
5478    a: poly8x16_t,
5479    b: poly8x8_t,
5480) -> poly8x16_t {
5481    static_assert_uimm_bits!(LANE1, 4);
5482    static_assert_uimm_bits!(LANE2, 3);
5483    let b: poly8x16_t =
5484        unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };
5485    unsafe {
5486        match LANE1 & 0b1111 {
5487            0 => simd_shuffle!(
5488                a,
5489                b,
5490                [
5491                    16 + LANE2 as u32,
5492                    1,
5493                    2,
5494                    3,
5495                    4,
5496                    5,
5497                    6,
5498                    7,
5499                    8,
5500                    9,
5501                    10,
5502                    11,
5503                    12,
5504                    13,
5505                    14,
5506                    15
5507                ]
5508            ),
5509            1 => simd_shuffle!(
5510                a,
5511                b,
5512                [
5513                    0,
5514                    16 + LANE2 as u32,
5515                    2,
5516                    3,
5517                    4,
5518                    5,
5519                    6,
5520                    7,
5521                    8,
5522                    9,
5523                    10,
5524                    11,
5525                    12,
5526                    13,
5527                    14,
5528                    15
5529                ]
5530            ),
5531            2 => simd_shuffle!(
5532                a,
5533                b,
5534                [
5535                    0,
5536                    1,
5537                    16 + LANE2 as u32,
5538                    3,
5539                    4,
5540                    5,
5541                    6,
5542                    7,
5543                    8,
5544                    9,
5545                    10,
5546                    11,
5547                    12,
5548                    13,
5549                    14,
5550                    15
5551                ]
5552            ),
5553            3 => simd_shuffle!(
5554                a,
5555                b,
5556                [
5557                    0,
5558                    1,
5559                    2,
5560                    16 + LANE2 as u32,
5561                    4,
5562                    5,
5563                    6,
5564                    7,
5565                    8,
5566                    9,
5567                    10,
5568                    11,
5569                    12,
5570                    13,
5571                    14,
5572                    15
5573                ]
5574            ),
5575            4 => simd_shuffle!(
5576                a,
5577                b,
5578                [
5579                    0,
5580                    1,
5581                    2,
5582                    3,
5583                    16 + LANE2 as u32,
5584                    5,
5585                    6,
5586                    7,
5587                    8,
5588                    9,
5589                    10,
5590                    11,
5591                    12,
5592                    13,
5593                    14,
5594                    15
5595                ]
5596            ),
5597            5 => simd_shuffle!(
5598                a,
5599                b,
5600                [
5601                    0,
5602                    1,
5603                    2,
5604                    3,
5605                    4,
5606                    16 + LANE2 as u32,
5607                    6,
5608                    7,
5609                    8,
5610                    9,
5611                    10,
5612                    11,
5613                    12,
5614                    13,
5615                    14,
5616                    15
5617                ]
5618            ),
5619            6 => simd_shuffle!(
5620                a,
5621                b,
5622                [
5623                    0,
5624                    1,
5625                    2,
5626                    3,
5627                    4,
5628                    5,
5629                    16 + LANE2 as u32,
5630                    7,
5631                    8,
5632                    9,
5633                    10,
5634                    11,
5635                    12,
5636                    13,
5637                    14,
5638                    15
5639                ]
5640            ),
5641            7 => simd_shuffle!(
5642                a,
5643                b,
5644                [
5645                    0,
5646                    1,
5647                    2,
5648                    3,
5649                    4,
5650                    5,
5651                    6,
5652                    16 + LANE2 as u32,
5653                    8,
5654                    9,
5655                    10,
5656                    11,
5657                    12,
5658                    13,
5659                    14,
5660                    15
5661                ]
5662            ),
5663            8 => simd_shuffle!(
5664                a,
5665                b,
5666                [
5667                    0,
5668                    1,
5669                    2,
5670                    3,
5671                    4,
5672                    5,
5673                    6,
5674                    7,
5675                    16 + LANE2 as u32,
5676                    9,
5677                    10,
5678                    11,
5679                    12,
5680                    13,
5681                    14,
5682                    15
5683                ]
5684            ),
5685            9 => simd_shuffle!(
5686                a,
5687                b,
5688                [
5689                    0,
5690                    1,
5691                    2,
5692                    3,
5693                    4,
5694                    5,
5695                    6,
5696                    7,
5697                    8,
5698                    16 + LANE2 as u32,
5699                    10,
5700                    11,
5701                    12,
5702                    13,
5703                    14,
5704                    15
5705                ]
5706            ),
5707            10 => simd_shuffle!(
5708                a,
5709                b,
5710                [
5711                    0,
5712                    1,
5713                    2,
5714                    3,
5715                    4,
5716                    5,
5717                    6,
5718                    7,
5719                    8,
5720                    9,
5721                    16 + LANE2 as u32,
5722                    11,
5723                    12,
5724                    13,
5725                    14,
5726                    15
5727                ]
5728            ),
5729            11 => simd_shuffle!(
5730                a,
5731                b,
5732                [
5733                    0,
5734                    1,
5735                    2,
5736                    3,
5737                    4,
5738                    5,
5739                    6,
5740                    7,
5741                    8,
5742                    9,
5743                    10,
5744                    16 + LANE2 as u32,
5745                    12,
5746                    13,
5747                    14,
5748                    15
5749                ]
5750            ),
5751            12 => simd_shuffle!(
5752                a,
5753                b,
5754                [
5755                    0,
5756                    1,
5757                    2,
5758                    3,
5759                    4,
5760                    5,
5761                    6,
5762                    7,
5763                    8,
5764                    9,
5765                    10,
5766                    11,
5767                    16 + LANE2 as u32,
5768                    13,
5769                    14,
5770                    15
5771                ]
5772            ),
5773            13 => simd_shuffle!(
5774                a,
5775                b,
5776                [
5777                    0,
5778                    1,
5779                    2,
5780                    3,
5781                    4,
5782                    5,
5783                    6,
5784                    7,
5785                    8,
5786                    9,
5787                    10,
5788                    11,
5789                    12,
5790                    16 + LANE2 as u32,
5791                    14,
5792                    15
5793                ]
5794            ),
5795            14 => simd_shuffle!(
5796                a,
5797                b,
5798                [
5799                    0,
5800                    1,
5801                    2,
5802                    3,
5803                    4,
5804                    5,
5805                    6,
5806                    7,
5807                    8,
5808                    9,
5809                    10,
5810                    11,
5811                    12,
5812                    13,
5813                    16 + LANE2 as u32,
5814                    15
5815                ]
5816            ),
5817            15 => simd_shuffle!(
5818                a,
5819                b,
5820                [
5821                    0,
5822                    1,
5823                    2,
5824                    3,
5825                    4,
5826                    5,
5827                    6,
5828                    7,
5829                    8,
5830                    9,
5831                    10,
5832                    11,
5833                    12,
5834                    13,
5835                    14,
5836                    16 + LANE2 as u32
5837                ]
5838            ),
5839            _ => unreachable_unchecked(),
5840        }
5841    }
5842}
5843#[doc = "Insert vector element from another vector element"]
5844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"]
5845#[inline(always)]
5846#[target_feature(enable = "neon")]
5847#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5848#[rustc_legacy_const_generics(1, 3)]
5849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5850pub fn vcopyq_lane_p16<const LANE1: i32, const LANE2: i32>(
5851    a: poly16x8_t,
5852    b: poly16x4_t,
5853) -> poly16x8_t {
5854    static_assert_uimm_bits!(LANE1, 3);
5855    static_assert_uimm_bits!(LANE2, 2);
5856    let b: poly16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };
5857    unsafe {
5858        match LANE1 & 0b111 {
5859            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
5860            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
5861            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
5862            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
5863            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
5864            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
5865            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
5866            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
5867            _ => unreachable_unchecked(),
5868        }
5869    }
5870}
5871#[doc = "Insert vector element from another vector element"]
5872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"]
5873#[inline(always)]
5874#[target_feature(enable = "neon")]
5875#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5876#[rustc_legacy_const_generics(1, 3)]
5877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5878pub fn vcopyq_laneq_f32<const LANE1: i32, const LANE2: i32>(
5879    a: float32x4_t,
5880    b: float32x4_t,
5881) -> float32x4_t {
5882    static_assert_uimm_bits!(LANE1, 2);
5883    static_assert_uimm_bits!(LANE2, 2);
5884    unsafe {
5885        match LANE1 & 0b11 {
5886            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
5887            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
5888            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
5889            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
5890            _ => unreachable_unchecked(),
5891        }
5892    }
5893}
5894#[doc = "Insert vector element from another vector element"]
5895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"]
5896#[inline(always)]
5897#[target_feature(enable = "neon")]
5898#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5899#[rustc_legacy_const_generics(1, 3)]
5900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5901pub fn vcopyq_laneq_f64<const LANE1: i32, const LANE2: i32>(
5902    a: float64x2_t,
5903    b: float64x2_t,
5904) -> float64x2_t {
5905    static_assert_uimm_bits!(LANE1, 1);
5906    static_assert_uimm_bits!(LANE2, 1);
5907    unsafe {
5908        match LANE1 & 0b1 {
5909            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
5910            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
5911            _ => unreachable_unchecked(),
5912        }
5913    }
5914}
5915#[doc = "Insert vector element from another vector element"]
5916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"]
5917#[inline(always)]
5918#[target_feature(enable = "neon")]
5919#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
5920#[rustc_legacy_const_generics(1, 3)]
5921#[stable(feature = "neon_intrinsics", since = "1.59.0")]
5922pub fn vcopyq_laneq_s8<const LANE1: i32, const LANE2: i32>(
5923    a: int8x16_t,
5924    b: int8x16_t,
5925) -> int8x16_t {
5926    static_assert_uimm_bits!(LANE1, 4);
5927    static_assert_uimm_bits!(LANE2, 4);
5928    unsafe {
5929        match LANE1 & 0b1111 {
5930            0 => simd_shuffle!(
5931                a,
5932                b,
5933                [
5934                    16 + LANE2 as u32,
5935                    1,
5936                    2,
5937                    3,
5938                    4,
5939                    5,
5940                    6,
5941                    7,
5942                    8,
5943                    9,
5944                    10,
5945                    11,
5946                    12,
5947                    13,
5948                    14,
5949                    15
5950                ]
5951            ),
5952            1 => simd_shuffle!(
5953                a,
5954                b,
5955                [
5956                    0,
5957                    16 + LANE2 as u32,
5958                    2,
5959                    3,
5960                    4,
5961                    5,
5962                    6,
5963                    7,
5964                    8,
5965                    9,
5966                    10,
5967                    11,
5968                    12,
5969                    13,
5970                    14,
5971                    15
5972                ]
5973            ),
5974            2 => simd_shuffle!(
5975                a,
5976                b,
5977                [
5978                    0,
5979                    1,
5980                    16 + LANE2 as u32,
5981                    3,
5982                    4,
5983                    5,
5984                    6,
5985                    7,
5986                    8,
5987                    9,
5988                    10,
5989                    11,
5990                    12,
5991                    13,
5992                    14,
5993                    15
5994                ]
5995            ),
5996            3 => simd_shuffle!(
5997                a,
5998                b,
5999                [
6000                    0,
6001                    1,
6002                    2,
6003                    16 + LANE2 as u32,
6004                    4,
6005                    5,
6006                    6,
6007                    7,
6008                    8,
6009                    9,
6010                    10,
6011                    11,
6012                    12,
6013                    13,
6014                    14,
6015                    15
6016                ]
6017            ),
6018            4 => simd_shuffle!(
6019                a,
6020                b,
6021                [
6022                    0,
6023                    1,
6024                    2,
6025                    3,
6026                    16 + LANE2 as u32,
6027                    5,
6028                    6,
6029                    7,
6030                    8,
6031                    9,
6032                    10,
6033                    11,
6034                    12,
6035                    13,
6036                    14,
6037                    15
6038                ]
6039            ),
6040            5 => simd_shuffle!(
6041                a,
6042                b,
6043                [
6044                    0,
6045                    1,
6046                    2,
6047                    3,
6048                    4,
6049                    16 + LANE2 as u32,
6050                    6,
6051                    7,
6052                    8,
6053                    9,
6054                    10,
6055                    11,
6056                    12,
6057                    13,
6058                    14,
6059                    15
6060                ]
6061            ),
6062            6 => simd_shuffle!(
6063                a,
6064                b,
6065                [
6066                    0,
6067                    1,
6068                    2,
6069                    3,
6070                    4,
6071                    5,
6072                    16 + LANE2 as u32,
6073                    7,
6074                    8,
6075                    9,
6076                    10,
6077                    11,
6078                    12,
6079                    13,
6080                    14,
6081                    15
6082                ]
6083            ),
6084            7 => simd_shuffle!(
6085                a,
6086                b,
6087                [
6088                    0,
6089                    1,
6090                    2,
6091                    3,
6092                    4,
6093                    5,
6094                    6,
6095                    16 + LANE2 as u32,
6096                    8,
6097                    9,
6098                    10,
6099                    11,
6100                    12,
6101                    13,
6102                    14,
6103                    15
6104                ]
6105            ),
6106            8 => simd_shuffle!(
6107                a,
6108                b,
6109                [
6110                    0,
6111                    1,
6112                    2,
6113                    3,
6114                    4,
6115                    5,
6116                    6,
6117                    7,
6118                    16 + LANE2 as u32,
6119                    9,
6120                    10,
6121                    11,
6122                    12,
6123                    13,
6124                    14,
6125                    15
6126                ]
6127            ),
6128            9 => simd_shuffle!(
6129                a,
6130                b,
6131                [
6132                    0,
6133                    1,
6134                    2,
6135                    3,
6136                    4,
6137                    5,
6138                    6,
6139                    7,
6140                    8,
6141                    16 + LANE2 as u32,
6142                    10,
6143                    11,
6144                    12,
6145                    13,
6146                    14,
6147                    15
6148                ]
6149            ),
6150            10 => simd_shuffle!(
6151                a,
6152                b,
6153                [
6154                    0,
6155                    1,
6156                    2,
6157                    3,
6158                    4,
6159                    5,
6160                    6,
6161                    7,
6162                    8,
6163                    9,
6164                    16 + LANE2 as u32,
6165                    11,
6166                    12,
6167                    13,
6168                    14,
6169                    15
6170                ]
6171            ),
6172            11 => simd_shuffle!(
6173                a,
6174                b,
6175                [
6176                    0,
6177                    1,
6178                    2,
6179                    3,
6180                    4,
6181                    5,
6182                    6,
6183                    7,
6184                    8,
6185                    9,
6186                    10,
6187                    16 + LANE2 as u32,
6188                    12,
6189                    13,
6190                    14,
6191                    15
6192                ]
6193            ),
6194            12 => simd_shuffle!(
6195                a,
6196                b,
6197                [
6198                    0,
6199                    1,
6200                    2,
6201                    3,
6202                    4,
6203                    5,
6204                    6,
6205                    7,
6206                    8,
6207                    9,
6208                    10,
6209                    11,
6210                    16 + LANE2 as u32,
6211                    13,
6212                    14,
6213                    15
6214                ]
6215            ),
6216            13 => simd_shuffle!(
6217                a,
6218                b,
6219                [
6220                    0,
6221                    1,
6222                    2,
6223                    3,
6224                    4,
6225                    5,
6226                    6,
6227                    7,
6228                    8,
6229                    9,
6230                    10,
6231                    11,
6232                    12,
6233                    16 + LANE2 as u32,
6234                    14,
6235                    15
6236                ]
6237            ),
6238            14 => simd_shuffle!(
6239                a,
6240                b,
6241                [
6242                    0,
6243                    1,
6244                    2,
6245                    3,
6246                    4,
6247                    5,
6248                    6,
6249                    7,
6250                    8,
6251                    9,
6252                    10,
6253                    11,
6254                    12,
6255                    13,
6256                    16 + LANE2 as u32,
6257                    15
6258                ]
6259            ),
6260            15 => simd_shuffle!(
6261                a,
6262                b,
6263                [
6264                    0,
6265                    1,
6266                    2,
6267                    3,
6268                    4,
6269                    5,
6270                    6,
6271                    7,
6272                    8,
6273                    9,
6274                    10,
6275                    11,
6276                    12,
6277                    13,
6278                    14,
6279                    16 + LANE2 as u32
6280                ]
6281            ),
6282            _ => unreachable_unchecked(),
6283        }
6284    }
6285}
6286#[doc = "Insert vector element from another vector element"]
6287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"]
6288#[inline(always)]
6289#[target_feature(enable = "neon")]
6290#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6291#[rustc_legacy_const_generics(1, 3)]
6292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6293pub fn vcopyq_laneq_s16<const LANE1: i32, const LANE2: i32>(
6294    a: int16x8_t,
6295    b: int16x8_t,
6296) -> int16x8_t {
6297    static_assert_uimm_bits!(LANE1, 3);
6298    static_assert_uimm_bits!(LANE2, 3);
6299    unsafe {
6300        match LANE1 & 0b111 {
6301            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6302            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6303            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6304            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6305            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6306            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6307            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6308            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6309            _ => unreachable_unchecked(),
6310        }
6311    }
6312}
6313#[doc = "Insert vector element from another vector element"]
6314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"]
6315#[inline(always)]
6316#[target_feature(enable = "neon")]
6317#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6318#[rustc_legacy_const_generics(1, 3)]
6319#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6320pub fn vcopyq_laneq_s32<const LANE1: i32, const LANE2: i32>(
6321    a: int32x4_t,
6322    b: int32x4_t,
6323) -> int32x4_t {
6324    static_assert_uimm_bits!(LANE1, 2);
6325    static_assert_uimm_bits!(LANE2, 2);
6326    unsafe {
6327        match LANE1 & 0b11 {
6328            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6329            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6330            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6331            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6332            _ => unreachable_unchecked(),
6333        }
6334    }
6335}
6336#[doc = "Insert vector element from another vector element"]
6337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"]
6338#[inline(always)]
6339#[target_feature(enable = "neon")]
6340#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6341#[rustc_legacy_const_generics(1, 3)]
6342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6343pub fn vcopyq_laneq_s64<const LANE1: i32, const LANE2: i32>(
6344    a: int64x2_t,
6345    b: int64x2_t,
6346) -> int64x2_t {
6347    static_assert_uimm_bits!(LANE1, 1);
6348    static_assert_uimm_bits!(LANE2, 1);
6349    unsafe {
6350        match LANE1 & 0b1 {
6351            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6352            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6353            _ => unreachable_unchecked(),
6354        }
6355    }
6356}
6357#[doc = "Insert vector element from another vector element"]
6358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"]
6359#[inline(always)]
6360#[target_feature(enable = "neon")]
6361#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6362#[rustc_legacy_const_generics(1, 3)]
6363#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6364pub fn vcopyq_laneq_u8<const LANE1: i32, const LANE2: i32>(
6365    a: uint8x16_t,
6366    b: uint8x16_t,
6367) -> uint8x16_t {
6368    static_assert_uimm_bits!(LANE1, 4);
6369    static_assert_uimm_bits!(LANE2, 4);
6370    unsafe {
6371        match LANE1 & 0b1111 {
6372            0 => simd_shuffle!(
6373                a,
6374                b,
6375                [
6376                    16 + LANE2 as u32,
6377                    1,
6378                    2,
6379                    3,
6380                    4,
6381                    5,
6382                    6,
6383                    7,
6384                    8,
6385                    9,
6386                    10,
6387                    11,
6388                    12,
6389                    13,
6390                    14,
6391                    15
6392                ]
6393            ),
6394            1 => simd_shuffle!(
6395                a,
6396                b,
6397                [
6398                    0,
6399                    16 + LANE2 as u32,
6400                    2,
6401                    3,
6402                    4,
6403                    5,
6404                    6,
6405                    7,
6406                    8,
6407                    9,
6408                    10,
6409                    11,
6410                    12,
6411                    13,
6412                    14,
6413                    15
6414                ]
6415            ),
6416            2 => simd_shuffle!(
6417                a,
6418                b,
6419                [
6420                    0,
6421                    1,
6422                    16 + LANE2 as u32,
6423                    3,
6424                    4,
6425                    5,
6426                    6,
6427                    7,
6428                    8,
6429                    9,
6430                    10,
6431                    11,
6432                    12,
6433                    13,
6434                    14,
6435                    15
6436                ]
6437            ),
6438            3 => simd_shuffle!(
6439                a,
6440                b,
6441                [
6442                    0,
6443                    1,
6444                    2,
6445                    16 + LANE2 as u32,
6446                    4,
6447                    5,
6448                    6,
6449                    7,
6450                    8,
6451                    9,
6452                    10,
6453                    11,
6454                    12,
6455                    13,
6456                    14,
6457                    15
6458                ]
6459            ),
6460            4 => simd_shuffle!(
6461                a,
6462                b,
6463                [
6464                    0,
6465                    1,
6466                    2,
6467                    3,
6468                    16 + LANE2 as u32,
6469                    5,
6470                    6,
6471                    7,
6472                    8,
6473                    9,
6474                    10,
6475                    11,
6476                    12,
6477                    13,
6478                    14,
6479                    15
6480                ]
6481            ),
6482            5 => simd_shuffle!(
6483                a,
6484                b,
6485                [
6486                    0,
6487                    1,
6488                    2,
6489                    3,
6490                    4,
6491                    16 + LANE2 as u32,
6492                    6,
6493                    7,
6494                    8,
6495                    9,
6496                    10,
6497                    11,
6498                    12,
6499                    13,
6500                    14,
6501                    15
6502                ]
6503            ),
6504            6 => simd_shuffle!(
6505                a,
6506                b,
6507                [
6508                    0,
6509                    1,
6510                    2,
6511                    3,
6512                    4,
6513                    5,
6514                    16 + LANE2 as u32,
6515                    7,
6516                    8,
6517                    9,
6518                    10,
6519                    11,
6520                    12,
6521                    13,
6522                    14,
6523                    15
6524                ]
6525            ),
6526            7 => simd_shuffle!(
6527                a,
6528                b,
6529                [
6530                    0,
6531                    1,
6532                    2,
6533                    3,
6534                    4,
6535                    5,
6536                    6,
6537                    16 + LANE2 as u32,
6538                    8,
6539                    9,
6540                    10,
6541                    11,
6542                    12,
6543                    13,
6544                    14,
6545                    15
6546                ]
6547            ),
6548            8 => simd_shuffle!(
6549                a,
6550                b,
6551                [
6552                    0,
6553                    1,
6554                    2,
6555                    3,
6556                    4,
6557                    5,
6558                    6,
6559                    7,
6560                    16 + LANE2 as u32,
6561                    9,
6562                    10,
6563                    11,
6564                    12,
6565                    13,
6566                    14,
6567                    15
6568                ]
6569            ),
6570            9 => simd_shuffle!(
6571                a,
6572                b,
6573                [
6574                    0,
6575                    1,
6576                    2,
6577                    3,
6578                    4,
6579                    5,
6580                    6,
6581                    7,
6582                    8,
6583                    16 + LANE2 as u32,
6584                    10,
6585                    11,
6586                    12,
6587                    13,
6588                    14,
6589                    15
6590                ]
6591            ),
6592            10 => simd_shuffle!(
6593                a,
6594                b,
6595                [
6596                    0,
6597                    1,
6598                    2,
6599                    3,
6600                    4,
6601                    5,
6602                    6,
6603                    7,
6604                    8,
6605                    9,
6606                    16 + LANE2 as u32,
6607                    11,
6608                    12,
6609                    13,
6610                    14,
6611                    15
6612                ]
6613            ),
6614            11 => simd_shuffle!(
6615                a,
6616                b,
6617                [
6618                    0,
6619                    1,
6620                    2,
6621                    3,
6622                    4,
6623                    5,
6624                    6,
6625                    7,
6626                    8,
6627                    9,
6628                    10,
6629                    16 + LANE2 as u32,
6630                    12,
6631                    13,
6632                    14,
6633                    15
6634                ]
6635            ),
6636            12 => simd_shuffle!(
6637                a,
6638                b,
6639                [
6640                    0,
6641                    1,
6642                    2,
6643                    3,
6644                    4,
6645                    5,
6646                    6,
6647                    7,
6648                    8,
6649                    9,
6650                    10,
6651                    11,
6652                    16 + LANE2 as u32,
6653                    13,
6654                    14,
6655                    15
6656                ]
6657            ),
6658            13 => simd_shuffle!(
6659                a,
6660                b,
6661                [
6662                    0,
6663                    1,
6664                    2,
6665                    3,
6666                    4,
6667                    5,
6668                    6,
6669                    7,
6670                    8,
6671                    9,
6672                    10,
6673                    11,
6674                    12,
6675                    16 + LANE2 as u32,
6676                    14,
6677                    15
6678                ]
6679            ),
6680            14 => simd_shuffle!(
6681                a,
6682                b,
6683                [
6684                    0,
6685                    1,
6686                    2,
6687                    3,
6688                    4,
6689                    5,
6690                    6,
6691                    7,
6692                    8,
6693                    9,
6694                    10,
6695                    11,
6696                    12,
6697                    13,
6698                    16 + LANE2 as u32,
6699                    15
6700                ]
6701            ),
6702            15 => simd_shuffle!(
6703                a,
6704                b,
6705                [
6706                    0,
6707                    1,
6708                    2,
6709                    3,
6710                    4,
6711                    5,
6712                    6,
6713                    7,
6714                    8,
6715                    9,
6716                    10,
6717                    11,
6718                    12,
6719                    13,
6720                    14,
6721                    16 + LANE2 as u32
6722                ]
6723            ),
6724            _ => unreachable_unchecked(),
6725        }
6726    }
6727}
6728#[doc = "Insert vector element from another vector element"]
6729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"]
6730#[inline(always)]
6731#[target_feature(enable = "neon")]
6732#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6733#[rustc_legacy_const_generics(1, 3)]
6734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6735pub fn vcopyq_laneq_u16<const LANE1: i32, const LANE2: i32>(
6736    a: uint16x8_t,
6737    b: uint16x8_t,
6738) -> uint16x8_t {
6739    static_assert_uimm_bits!(LANE1, 3);
6740    static_assert_uimm_bits!(LANE2, 3);
6741    unsafe {
6742        match LANE1 & 0b111 {
6743            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
6744            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
6745            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
6746            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
6747            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
6748            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
6749            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
6750            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
6751            _ => unreachable_unchecked(),
6752        }
6753    }
6754}
6755#[doc = "Insert vector element from another vector element"]
6756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"]
6757#[inline(always)]
6758#[target_feature(enable = "neon")]
6759#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6760#[rustc_legacy_const_generics(1, 3)]
6761#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6762pub fn vcopyq_laneq_u32<const LANE1: i32, const LANE2: i32>(
6763    a: uint32x4_t,
6764    b: uint32x4_t,
6765) -> uint32x4_t {
6766    static_assert_uimm_bits!(LANE1, 2);
6767    static_assert_uimm_bits!(LANE2, 2);
6768    unsafe {
6769        match LANE1 & 0b11 {
6770            0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]),
6771            1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]),
6772            2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]),
6773            3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]),
6774            _ => unreachable_unchecked(),
6775        }
6776    }
6777}
6778#[doc = "Insert vector element from another vector element"]
6779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"]
6780#[inline(always)]
6781#[target_feature(enable = "neon")]
6782#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6783#[rustc_legacy_const_generics(1, 3)]
6784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6785pub fn vcopyq_laneq_u64<const LANE1: i32, const LANE2: i32>(
6786    a: uint64x2_t,
6787    b: uint64x2_t,
6788) -> uint64x2_t {
6789    static_assert_uimm_bits!(LANE1, 1);
6790    static_assert_uimm_bits!(LANE2, 1);
6791    unsafe {
6792        match LANE1 & 0b1 {
6793            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
6794            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
6795            _ => unreachable_unchecked(),
6796        }
6797    }
6798}
6799#[doc = "Insert vector element from another vector element"]
6800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"]
6801#[inline(always)]
6802#[target_feature(enable = "neon")]
6803#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
6804#[rustc_legacy_const_generics(1, 3)]
6805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
6806pub fn vcopyq_laneq_p8<const LANE1: i32, const LANE2: i32>(
6807    a: poly8x16_t,
6808    b: poly8x16_t,
6809) -> poly8x16_t {
6810    static_assert_uimm_bits!(LANE1, 4);
6811    static_assert_uimm_bits!(LANE2, 4);
6812    unsafe {
6813        match LANE1 & 0b1111 {
6814            0 => simd_shuffle!(
6815                a,
6816                b,
6817                [
6818                    16 + LANE2 as u32,
6819                    1,
6820                    2,
6821                    3,
6822                    4,
6823                    5,
6824                    6,
6825                    7,
6826                    8,
6827                    9,
6828                    10,
6829                    11,
6830                    12,
6831                    13,
6832                    14,
6833                    15
6834                ]
6835            ),
6836            1 => simd_shuffle!(
6837                a,
6838                b,
6839                [
6840                    0,
6841                    16 + LANE2 as u32,
6842                    2,
6843                    3,
6844                    4,
6845                    5,
6846                    6,
6847                    7,
6848                    8,
6849                    9,
6850                    10,
6851                    11,
6852                    12,
6853                    13,
6854                    14,
6855                    15
6856                ]
6857            ),
6858            2 => simd_shuffle!(
6859                a,
6860                b,
6861                [
6862                    0,
6863                    1,
6864                    16 + LANE2 as u32,
6865                    3,
6866                    4,
6867                    5,
6868                    6,
6869                    7,
6870                    8,
6871                    9,
6872                    10,
6873                    11,
6874                    12,
6875                    13,
6876                    14,
6877                    15
6878                ]
6879            ),
6880            3 => simd_shuffle!(
6881                a,
6882                b,
6883                [
6884                    0,
6885                    1,
6886                    2,
6887                    16 + LANE2 as u32,
6888                    4,
6889                    5,
6890                    6,
6891                    7,
6892                    8,
6893                    9,
6894                    10,
6895                    11,
6896                    12,
6897                    13,
6898                    14,
6899                    15
6900                ]
6901            ),
6902            4 => simd_shuffle!(
6903                a,
6904                b,
6905                [
6906                    0,
6907                    1,
6908                    2,
6909                    3,
6910                    16 + LANE2 as u32,
6911                    5,
6912                    6,
6913                    7,
6914                    8,
6915                    9,
6916                    10,
6917                    11,
6918                    12,
6919                    13,
6920                    14,
6921                    15
6922                ]
6923            ),
6924            5 => simd_shuffle!(
6925                a,
6926                b,
6927                [
6928                    0,
6929                    1,
6930                    2,
6931                    3,
6932                    4,
6933                    16 + LANE2 as u32,
6934                    6,
6935                    7,
6936                    8,
6937                    9,
6938                    10,
6939                    11,
6940                    12,
6941                    13,
6942                    14,
6943                    15
6944                ]
6945            ),
6946            6 => simd_shuffle!(
6947                a,
6948                b,
6949                [
6950                    0,
6951                    1,
6952                    2,
6953                    3,
6954                    4,
6955                    5,
6956                    16 + LANE2 as u32,
6957                    7,
6958                    8,
6959                    9,
6960                    10,
6961                    11,
6962                    12,
6963                    13,
6964                    14,
6965                    15
6966                ]
6967            ),
6968            7 => simd_shuffle!(
6969                a,
6970                b,
6971                [
6972                    0,
6973                    1,
6974                    2,
6975                    3,
6976                    4,
6977                    5,
6978                    6,
6979                    16 + LANE2 as u32,
6980                    8,
6981                    9,
6982                    10,
6983                    11,
6984                    12,
6985                    13,
6986                    14,
6987                    15
6988                ]
6989            ),
6990            8 => simd_shuffle!(
6991                a,
6992                b,
6993                [
6994                    0,
6995                    1,
6996                    2,
6997                    3,
6998                    4,
6999                    5,
7000                    6,
7001                    7,
7002                    16 + LANE2 as u32,
7003                    9,
7004                    10,
7005                    11,
7006                    12,
7007                    13,
7008                    14,
7009                    15
7010                ]
7011            ),
7012            9 => simd_shuffle!(
7013                a,
7014                b,
7015                [
7016                    0,
7017                    1,
7018                    2,
7019                    3,
7020                    4,
7021                    5,
7022                    6,
7023                    7,
7024                    8,
7025                    16 + LANE2 as u32,
7026                    10,
7027                    11,
7028                    12,
7029                    13,
7030                    14,
7031                    15
7032                ]
7033            ),
7034            10 => simd_shuffle!(
7035                a,
7036                b,
7037                [
7038                    0,
7039                    1,
7040                    2,
7041                    3,
7042                    4,
7043                    5,
7044                    6,
7045                    7,
7046                    8,
7047                    9,
7048                    16 + LANE2 as u32,
7049                    11,
7050                    12,
7051                    13,
7052                    14,
7053                    15
7054                ]
7055            ),
7056            11 => simd_shuffle!(
7057                a,
7058                b,
7059                [
7060                    0,
7061                    1,
7062                    2,
7063                    3,
7064                    4,
7065                    5,
7066                    6,
7067                    7,
7068                    8,
7069                    9,
7070                    10,
7071                    16 + LANE2 as u32,
7072                    12,
7073                    13,
7074                    14,
7075                    15
7076                ]
7077            ),
7078            12 => simd_shuffle!(
7079                a,
7080                b,
7081                [
7082                    0,
7083                    1,
7084                    2,
7085                    3,
7086                    4,
7087                    5,
7088                    6,
7089                    7,
7090                    8,
7091                    9,
7092                    10,
7093                    11,
7094                    16 + LANE2 as u32,
7095                    13,
7096                    14,
7097                    15
7098                ]
7099            ),
7100            13 => simd_shuffle!(
7101                a,
7102                b,
7103                [
7104                    0,
7105                    1,
7106                    2,
7107                    3,
7108                    4,
7109                    5,
7110                    6,
7111                    7,
7112                    8,
7113                    9,
7114                    10,
7115                    11,
7116                    12,
7117                    16 + LANE2 as u32,
7118                    14,
7119                    15
7120                ]
7121            ),
7122            14 => simd_shuffle!(
7123                a,
7124                b,
7125                [
7126                    0,
7127                    1,
7128                    2,
7129                    3,
7130                    4,
7131                    5,
7132                    6,
7133                    7,
7134                    8,
7135                    9,
7136                    10,
7137                    11,
7138                    12,
7139                    13,
7140                    16 + LANE2 as u32,
7141                    15
7142                ]
7143            ),
7144            15 => simd_shuffle!(
7145                a,
7146                b,
7147                [
7148                    0,
7149                    1,
7150                    2,
7151                    3,
7152                    4,
7153                    5,
7154                    6,
7155                    7,
7156                    8,
7157                    9,
7158                    10,
7159                    11,
7160                    12,
7161                    13,
7162                    14,
7163                    16 + LANE2 as u32
7164                ]
7165            ),
7166            _ => unreachable_unchecked(),
7167        }
7168    }
7169}
7170#[doc = "Insert vector element from another vector element"]
7171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"]
7172#[inline(always)]
7173#[target_feature(enable = "neon")]
7174#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
7175#[rustc_legacy_const_generics(1, 3)]
7176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7177pub fn vcopyq_laneq_p16<const LANE1: i32, const LANE2: i32>(
7178    a: poly16x8_t,
7179    b: poly16x8_t,
7180) -> poly16x8_t {
7181    static_assert_uimm_bits!(LANE1, 3);
7182    static_assert_uimm_bits!(LANE2, 3);
7183    unsafe {
7184        match LANE1 & 0b111 {
7185            0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]),
7186            1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]),
7187            2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]),
7188            3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]),
7189            4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]),
7190            5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]),
7191            6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]),
7192            7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]),
7193            _ => unreachable_unchecked(),
7194        }
7195    }
7196}
7197#[doc = "Insert vector element from another vector element"]
7198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"]
7199#[inline(always)]
7200#[target_feature(enable = "neon")]
7201#[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))]
7202#[rustc_legacy_const_generics(1, 3)]
7203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7204pub fn vcopyq_laneq_p64<const LANE1: i32, const LANE2: i32>(
7205    a: poly64x2_t,
7206    b: poly64x2_t,
7207) -> poly64x2_t {
7208    static_assert_uimm_bits!(LANE1, 1);
7209    static_assert_uimm_bits!(LANE2, 1);
7210    unsafe {
7211        match LANE1 & 0b1 {
7212            0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]),
7213            1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]),
7214            _ => unreachable_unchecked(),
7215        }
7216    }
7217}
7218#[doc = "Insert vector element from another vector element"]
7219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"]
7220#[inline(always)]
7221#[target_feature(enable = "neon")]
7222#[cfg_attr(test, assert_instr(nop))]
7223#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7224pub fn vcreate_f64(a: u64) -> float64x1_t {
7225    unsafe { transmute(a) }
7226}
7227#[doc = "Floating-point convert"]
7228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"]
7229#[inline(always)]
7230#[target_feature(enable = "neon")]
7231#[cfg_attr(test, assert_instr(fcvtn))]
7232#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7233pub fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t {
7234    unsafe { simd_cast(a) }
7235}
7236#[doc = "Floating-point convert to higher precision long"]
7237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"]
7238#[inline(always)]
7239#[target_feature(enable = "neon")]
7240#[cfg_attr(test, assert_instr(fcvtl))]
7241#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7242pub fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t {
7243    unsafe { simd_cast(a) }
7244}
7245#[doc = "Fixed-point convert to floating-point"]
7246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"]
7247#[inline(always)]
7248#[target_feature(enable = "neon")]
7249#[cfg_attr(test, assert_instr(scvtf))]
7250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7251pub fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t {
7252    unsafe { simd_cast(a) }
7253}
7254#[doc = "Fixed-point convert to floating-point"]
7255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"]
7256#[inline(always)]
7257#[target_feature(enable = "neon")]
7258#[cfg_attr(test, assert_instr(scvtf))]
7259#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7260pub fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t {
7261    unsafe { simd_cast(a) }
7262}
7263#[doc = "Fixed-point convert to floating-point"]
7264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"]
7265#[inline(always)]
7266#[target_feature(enable = "neon")]
7267#[cfg_attr(test, assert_instr(ucvtf))]
7268#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7269pub fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t {
7270    unsafe { simd_cast(a) }
7271}
7272#[doc = "Fixed-point convert to floating-point"]
7273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"]
7274#[inline(always)]
7275#[target_feature(enable = "neon")]
7276#[cfg_attr(test, assert_instr(ucvtf))]
7277#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7278pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t {
7279    unsafe { simd_cast(a) }
7280}
7281#[doc = "Floating-point convert to lower precision"]
7282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f16_f32)"]
7283#[inline(always)]
7284#[target_feature(enable = "neon")]
7285#[cfg_attr(test, assert_instr(fcvtn2))]
7286#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7287#[cfg(not(target_arch = "arm64ec"))]
7288pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t {
7289    vcombine_f16(a, vcvt_f16_f32(b))
7290}
7291#[doc = "Floating-point convert to higher precision"]
7292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f16)"]
7293#[inline(always)]
7294#[target_feature(enable = "neon")]
7295#[cfg_attr(test, assert_instr(fcvtl2))]
7296#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7297#[cfg(not(target_arch = "arm64ec"))]
7298pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t {
7299    vcvt_f32_f16(vget_high_f16(a))
7300}
7301#[doc = "Floating-point convert to lower precision narrow"]
7302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"]
7303#[inline(always)]
7304#[target_feature(enable = "neon")]
7305#[cfg_attr(test, assert_instr(fcvtn2))]
7306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7307pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
7308    unsafe { simd_shuffle!(a, simd_cast(b), [0, 1, 2, 3]) }
7309}
7310#[doc = "Floating-point convert to higher precision long"]
7311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"]
7312#[inline(always)]
7313#[target_feature(enable = "neon")]
7314#[cfg_attr(test, assert_instr(fcvtl2))]
7315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7316pub fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t {
7317    unsafe {
7318        let b: float32x2_t = simd_shuffle!(a, a, [2, 3]);
7319        simd_cast(b)
7320    }
7321}
7322#[doc = "Fixed-point convert to floating-point"]
7323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"]
7324#[inline(always)]
7325#[target_feature(enable = "neon")]
7326#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7327#[rustc_legacy_const_generics(1)]
7328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7329pub fn vcvt_n_f64_s64<const N: i32>(a: int64x1_t) -> float64x1_t {
7330    static_assert!(N >= 1 && N <= 64);
7331    unsafe extern "unadjusted" {
7332        #[cfg_attr(
7333            any(target_arch = "aarch64", target_arch = "arm64ec"),
7334            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64"
7335        )]
7336        fn _vcvt_n_f64_s64(a: int64x1_t, n: i32) -> float64x1_t;
7337    }
7338    unsafe { _vcvt_n_f64_s64(a, N) }
7339}
7340#[doc = "Fixed-point convert to floating-point"]
7341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"]
7342#[inline(always)]
7343#[target_feature(enable = "neon")]
7344#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7345#[rustc_legacy_const_generics(1)]
7346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7347pub fn vcvtq_n_f64_s64<const N: i32>(a: int64x2_t) -> float64x2_t {
7348    static_assert!(N >= 1 && N <= 64);
7349    unsafe extern "unadjusted" {
7350        #[cfg_attr(
7351            any(target_arch = "aarch64", target_arch = "arm64ec"),
7352            link_name = "llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64"
7353        )]
7354        fn _vcvtq_n_f64_s64(a: int64x2_t, n: i32) -> float64x2_t;
7355    }
7356    unsafe { _vcvtq_n_f64_s64(a, N) }
7357}
7358#[doc = "Fixed-point convert to floating-point"]
7359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"]
7360#[inline(always)]
7361#[target_feature(enable = "neon")]
7362#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7363#[rustc_legacy_const_generics(1)]
7364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7365pub fn vcvt_n_f64_u64<const N: i32>(a: uint64x1_t) -> float64x1_t {
7366    static_assert!(N >= 1 && N <= 64);
7367    unsafe extern "unadjusted" {
7368        #[cfg_attr(
7369            any(target_arch = "aarch64", target_arch = "arm64ec"),
7370            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64"
7371        )]
7372        fn _vcvt_n_f64_u64(a: uint64x1_t, n: i32) -> float64x1_t;
7373    }
7374    unsafe { _vcvt_n_f64_u64(a, N) }
7375}
7376#[doc = "Fixed-point convert to floating-point"]
7377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"]
7378#[inline(always)]
7379#[target_feature(enable = "neon")]
7380#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
7381#[rustc_legacy_const_generics(1)]
7382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7383pub fn vcvtq_n_f64_u64<const N: i32>(a: uint64x2_t) -> float64x2_t {
7384    static_assert!(N >= 1 && N <= 64);
7385    unsafe extern "unadjusted" {
7386        #[cfg_attr(
7387            any(target_arch = "aarch64", target_arch = "arm64ec"),
7388            link_name = "llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64"
7389        )]
7390        fn _vcvtq_n_f64_u64(a: uint64x2_t, n: i32) -> float64x2_t;
7391    }
7392    unsafe { _vcvtq_n_f64_u64(a, N) }
7393}
7394#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"]
7396#[inline(always)]
7397#[target_feature(enable = "neon")]
7398#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7399#[rustc_legacy_const_generics(1)]
7400#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7401pub fn vcvt_n_s64_f64<const N: i32>(a: float64x1_t) -> int64x1_t {
7402    static_assert!(N >= 1 && N <= 64);
7403    unsafe extern "unadjusted" {
7404        #[cfg_attr(
7405            any(target_arch = "aarch64", target_arch = "arm64ec"),
7406            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64"
7407        )]
7408        fn _vcvt_n_s64_f64(a: float64x1_t, n: i32) -> int64x1_t;
7409    }
7410    unsafe { _vcvt_n_s64_f64(a, N) }
7411}
7412#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"]
7414#[inline(always)]
7415#[target_feature(enable = "neon")]
7416#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
7417#[rustc_legacy_const_generics(1)]
7418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7419pub fn vcvtq_n_s64_f64<const N: i32>(a: float64x2_t) -> int64x2_t {
7420    static_assert!(N >= 1 && N <= 64);
7421    unsafe extern "unadjusted" {
7422        #[cfg_attr(
7423            any(target_arch = "aarch64", target_arch = "arm64ec"),
7424            link_name = "llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64"
7425        )]
7426        fn _vcvtq_n_s64_f64(a: float64x2_t, n: i32) -> int64x2_t;
7427    }
7428    unsafe { _vcvtq_n_s64_f64(a, N) }
7429}
7430#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"]
7432#[inline(always)]
7433#[target_feature(enable = "neon")]
7434#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7435#[rustc_legacy_const_generics(1)]
7436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7437pub fn vcvt_n_u64_f64<const N: i32>(a: float64x1_t) -> uint64x1_t {
7438    static_assert!(N >= 1 && N <= 64);
7439    unsafe extern "unadjusted" {
7440        #[cfg_attr(
7441            any(target_arch = "aarch64", target_arch = "arm64ec"),
7442            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64"
7443        )]
7444        fn _vcvt_n_u64_f64(a: float64x1_t, n: i32) -> uint64x1_t;
7445    }
7446    unsafe { _vcvt_n_u64_f64(a, N) }
7447}
7448#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
7449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"]
7450#[inline(always)]
7451#[target_feature(enable = "neon")]
7452#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
7453#[rustc_legacy_const_generics(1)]
7454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7455pub fn vcvtq_n_u64_f64<const N: i32>(a: float64x2_t) -> uint64x2_t {
7456    static_assert!(N >= 1 && N <= 64);
7457    unsafe extern "unadjusted" {
7458        #[cfg_attr(
7459            any(target_arch = "aarch64", target_arch = "arm64ec"),
7460            link_name = "llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64"
7461        )]
7462        fn _vcvtq_n_u64_f64(a: float64x2_t, n: i32) -> uint64x2_t;
7463    }
7464    unsafe { _vcvtq_n_u64_f64(a, N) }
7465}
7466#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"]
7468#[inline(always)]
7469#[target_feature(enable = "neon")]
7470#[cfg_attr(test, assert_instr(fcvtzs))]
7471#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7472pub fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t {
7473    unsafe extern "unadjusted" {
7474        #[cfg_attr(
7475            any(target_arch = "aarch64", target_arch = "arm64ec"),
7476            link_name = "llvm.fptosi.sat.v1i64.v1f64"
7477        )]
7478        fn _vcvt_s64_f64(a: float64x1_t) -> int64x1_t;
7479    }
7480    unsafe { _vcvt_s64_f64(a) }
7481}
7482#[doc = "Floating-point convert to signed fixed-point, rounding toward zero"]
7483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"]
7484#[inline(always)]
7485#[target_feature(enable = "neon")]
7486#[cfg_attr(test, assert_instr(fcvtzs))]
7487#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7488pub fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t {
7489    unsafe extern "unadjusted" {
7490        #[cfg_attr(
7491            any(target_arch = "aarch64", target_arch = "arm64ec"),
7492            link_name = "llvm.fptosi.sat.v2i64.v2f64"
7493        )]
7494        fn _vcvtq_s64_f64(a: float64x2_t) -> int64x2_t;
7495    }
7496    unsafe { _vcvtq_s64_f64(a) }
7497}
7498#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"]
7500#[inline(always)]
7501#[target_feature(enable = "neon")]
7502#[cfg_attr(test, assert_instr(fcvtzu))]
7503#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7504pub fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t {
7505    unsafe extern "unadjusted" {
7506        #[cfg_attr(
7507            any(target_arch = "aarch64", target_arch = "arm64ec"),
7508            link_name = "llvm.fptoui.sat.v1i64.v1f64"
7509        )]
7510        fn _vcvt_u64_f64(a: float64x1_t) -> uint64x1_t;
7511    }
7512    unsafe { _vcvt_u64_f64(a) }
7513}
7514#[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"]
7515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"]
7516#[inline(always)]
7517#[target_feature(enable = "neon")]
7518#[cfg_attr(test, assert_instr(fcvtzu))]
7519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7520pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t {
7521    unsafe extern "unadjusted" {
7522        #[cfg_attr(
7523            any(target_arch = "aarch64", target_arch = "arm64ec"),
7524            link_name = "llvm.fptoui.sat.v2i64.v2f64"
7525        )]
7526        fn _vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t;
7527    }
7528    unsafe { _vcvtq_u64_f64(a) }
7529}
7530#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s16_f16)"]
7532#[inline(always)]
7533#[cfg_attr(test, assert_instr(fcvtas))]
7534#[target_feature(enable = "neon,fp16")]
7535#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7536#[cfg(not(target_arch = "arm64ec"))]
7537pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t {
7538    unsafe extern "unadjusted" {
7539        #[cfg_attr(
7540            any(target_arch = "aarch64", target_arch = "arm64ec"),
7541            link_name = "llvm.aarch64.neon.fcvtas.v4i16.v4f16"
7542        )]
7543        fn _vcvta_s16_f16(a: float16x4_t) -> int16x4_t;
7544    }
7545    unsafe { _vcvta_s16_f16(a) }
7546}
7547#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s16_f16)"]
7549#[inline(always)]
7550#[cfg_attr(test, assert_instr(fcvtas))]
7551#[target_feature(enable = "neon,fp16")]
7552#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7553#[cfg(not(target_arch = "arm64ec"))]
7554pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t {
7555    unsafe extern "unadjusted" {
7556        #[cfg_attr(
7557            any(target_arch = "aarch64", target_arch = "arm64ec"),
7558            link_name = "llvm.aarch64.neon.fcvtas.v8i16.v8f16"
7559        )]
7560        fn _vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t;
7561    }
7562    unsafe { _vcvtaq_s16_f16(a) }
7563}
7564#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"]
7566#[inline(always)]
7567#[target_feature(enable = "neon")]
7568#[cfg_attr(test, assert_instr(fcvtas))]
7569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7570pub fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t {
7571    unsafe extern "unadjusted" {
7572        #[cfg_attr(
7573            any(target_arch = "aarch64", target_arch = "arm64ec"),
7574            link_name = "llvm.aarch64.neon.fcvtas.v2i32.v2f32"
7575        )]
7576        fn _vcvta_s32_f32(a: float32x2_t) -> int32x2_t;
7577    }
7578    unsafe { _vcvta_s32_f32(a) }
7579}
7580#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"]
7582#[inline(always)]
7583#[target_feature(enable = "neon")]
7584#[cfg_attr(test, assert_instr(fcvtas))]
7585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7586pub fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t {
7587    unsafe extern "unadjusted" {
7588        #[cfg_attr(
7589            any(target_arch = "aarch64", target_arch = "arm64ec"),
7590            link_name = "llvm.aarch64.neon.fcvtas.v4i32.v4f32"
7591        )]
7592        fn _vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t;
7593    }
7594    unsafe { _vcvtaq_s32_f32(a) }
7595}
7596#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"]
7598#[inline(always)]
7599#[target_feature(enable = "neon")]
7600#[cfg_attr(test, assert_instr(fcvtas))]
7601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7602pub fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t {
7603    unsafe extern "unadjusted" {
7604        #[cfg_attr(
7605            any(target_arch = "aarch64", target_arch = "arm64ec"),
7606            link_name = "llvm.aarch64.neon.fcvtas.v1i64.v1f64"
7607        )]
7608        fn _vcvta_s64_f64(a: float64x1_t) -> int64x1_t;
7609    }
7610    unsafe { _vcvta_s64_f64(a) }
7611}
7612#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"]
7613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"]
7614#[inline(always)]
7615#[target_feature(enable = "neon")]
7616#[cfg_attr(test, assert_instr(fcvtas))]
7617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7618pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t {
7619    unsafe extern "unadjusted" {
7620        #[cfg_attr(
7621            any(target_arch = "aarch64", target_arch = "arm64ec"),
7622            link_name = "llvm.aarch64.neon.fcvtas.v2i64.v2f64"
7623        )]
7624        fn _vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t;
7625    }
7626    unsafe { _vcvtaq_s64_f64(a) }
7627}
7628#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u16_f16)"]
7630#[inline(always)]
7631#[cfg_attr(test, assert_instr(fcvtau))]
7632#[target_feature(enable = "neon,fp16")]
7633#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7634#[cfg(not(target_arch = "arm64ec"))]
7635pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t {
7636    unsafe extern "unadjusted" {
7637        #[cfg_attr(
7638            any(target_arch = "aarch64", target_arch = "arm64ec"),
7639            link_name = "llvm.aarch64.neon.fcvtau.v4i16.v4f16"
7640        )]
7641        fn _vcvta_u16_f16(a: float16x4_t) -> uint16x4_t;
7642    }
7643    unsafe { _vcvta_u16_f16(a) }
7644}
7645#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u16_f16)"]
7647#[inline(always)]
7648#[cfg_attr(test, assert_instr(fcvtau))]
7649#[target_feature(enable = "neon,fp16")]
7650#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
7651#[cfg(not(target_arch = "arm64ec"))]
7652pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t {
7653    unsafe extern "unadjusted" {
7654        #[cfg_attr(
7655            any(target_arch = "aarch64", target_arch = "arm64ec"),
7656            link_name = "llvm.aarch64.neon.fcvtau.v8i16.v8f16"
7657        )]
7658        fn _vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t;
7659    }
7660    unsafe { _vcvtaq_u16_f16(a) }
7661}
7662#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"]
7664#[inline(always)]
7665#[target_feature(enable = "neon")]
7666#[cfg_attr(test, assert_instr(fcvtau))]
7667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7668pub fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t {
7669    unsafe extern "unadjusted" {
7670        #[cfg_attr(
7671            any(target_arch = "aarch64", target_arch = "arm64ec"),
7672            link_name = "llvm.aarch64.neon.fcvtau.v2i32.v2f32"
7673        )]
7674        fn _vcvta_u32_f32(a: float32x2_t) -> uint32x2_t;
7675    }
7676    unsafe { _vcvta_u32_f32(a) }
7677}
7678#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"]
7680#[inline(always)]
7681#[target_feature(enable = "neon")]
7682#[cfg_attr(test, assert_instr(fcvtau))]
7683#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7684pub fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t {
7685    unsafe extern "unadjusted" {
7686        #[cfg_attr(
7687            any(target_arch = "aarch64", target_arch = "arm64ec"),
7688            link_name = "llvm.aarch64.neon.fcvtau.v4i32.v4f32"
7689        )]
7690        fn _vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t;
7691    }
7692    unsafe { _vcvtaq_u32_f32(a) }
7693}
7694#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"]
7696#[inline(always)]
7697#[target_feature(enable = "neon")]
7698#[cfg_attr(test, assert_instr(fcvtau))]
7699#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7700pub fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t {
7701    unsafe extern "unadjusted" {
7702        #[cfg_attr(
7703            any(target_arch = "aarch64", target_arch = "arm64ec"),
7704            link_name = "llvm.aarch64.neon.fcvtau.v1i64.v1f64"
7705        )]
7706        fn _vcvta_u64_f64(a: float64x1_t) -> uint64x1_t;
7707    }
7708    unsafe { _vcvta_u64_f64(a) }
7709}
7710#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"]
7711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"]
7712#[inline(always)]
7713#[target_feature(enable = "neon")]
7714#[cfg_attr(test, assert_instr(fcvtau))]
7715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7716pub fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t {
7717    unsafe extern "unadjusted" {
7718        #[cfg_attr(
7719            any(target_arch = "aarch64", target_arch = "arm64ec"),
7720            link_name = "llvm.aarch64.neon.fcvtau.v2i64.v2f64"
7721        )]
7722        fn _vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t;
7723    }
7724    unsafe { _vcvtaq_u64_f64(a) }
7725}
7726#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s16_f16)"]
7728#[inline(always)]
7729#[cfg_attr(test, assert_instr(fcvtas))]
7730#[target_feature(enable = "neon,fp16")]
7731#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7732#[cfg(not(target_arch = "arm64ec"))]
7733pub fn vcvtah_s16_f16(a: f16) -> i16 {
7734    vcvtah_s32_f16(a) as i16
7735}
7736#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s32_f16)"]
7738#[inline(always)]
7739#[cfg_attr(test, assert_instr(fcvtas))]
7740#[target_feature(enable = "neon,fp16")]
7741#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7742#[cfg(not(target_arch = "arm64ec"))]
7743pub fn vcvtah_s32_f16(a: f16) -> i32 {
7744    unsafe extern "unadjusted" {
7745        #[cfg_attr(
7746            any(target_arch = "aarch64", target_arch = "arm64ec"),
7747            link_name = "llvm.aarch64.neon.fcvtas.i32.f16"
7748        )]
7749        fn _vcvtah_s32_f16(a: f16) -> i32;
7750    }
7751    unsafe { _vcvtah_s32_f16(a) }
7752}
7753#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s64_f16)"]
7755#[inline(always)]
7756#[cfg_attr(test, assert_instr(fcvtas))]
7757#[target_feature(enable = "neon,fp16")]
7758#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7759#[cfg(not(target_arch = "arm64ec"))]
7760pub fn vcvtah_s64_f16(a: f16) -> i64 {
7761    unsafe extern "unadjusted" {
7762        #[cfg_attr(
7763            any(target_arch = "aarch64", target_arch = "arm64ec"),
7764            link_name = "llvm.aarch64.neon.fcvtas.i64.f16"
7765        )]
7766        fn _vcvtah_s64_f16(a: f16) -> i64;
7767    }
7768    unsafe { _vcvtah_s64_f16(a) }
7769}
7770#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u16_f16)"]
7772#[inline(always)]
7773#[cfg_attr(test, assert_instr(fcvtau))]
7774#[target_feature(enable = "neon,fp16")]
7775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7776#[cfg(not(target_arch = "arm64ec"))]
7777pub fn vcvtah_u16_f16(a: f16) -> u16 {
7778    vcvtah_u32_f16(a) as u16
7779}
7780#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u32_f16)"]
7782#[inline(always)]
7783#[cfg_attr(test, assert_instr(fcvtau))]
7784#[target_feature(enable = "neon,fp16")]
7785#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7786#[cfg(not(target_arch = "arm64ec"))]
7787pub fn vcvtah_u32_f16(a: f16) -> u32 {
7788    unsafe extern "unadjusted" {
7789        #[cfg_attr(
7790            any(target_arch = "aarch64", target_arch = "arm64ec"),
7791            link_name = "llvm.aarch64.neon.fcvtau.i32.f16"
7792        )]
7793        fn _vcvtah_u32_f16(a: f16) -> u32;
7794    }
7795    unsafe { _vcvtah_u32_f16(a) }
7796}
7797#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u64_f16)"]
7799#[inline(always)]
7800#[cfg_attr(test, assert_instr(fcvtau))]
7801#[target_feature(enable = "neon,fp16")]
7802#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7803#[cfg(not(target_arch = "arm64ec"))]
7804pub fn vcvtah_u64_f16(a: f16) -> u64 {
7805    unsafe extern "unadjusted" {
7806        #[cfg_attr(
7807            any(target_arch = "aarch64", target_arch = "arm64ec"),
7808            link_name = "llvm.aarch64.neon.fcvtau.i64.f16"
7809        )]
7810        fn _vcvtah_u64_f16(a: f16) -> u64;
7811    }
7812    unsafe { _vcvtah_u64_f16(a) }
7813}
7814#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"]
7816#[inline(always)]
7817#[target_feature(enable = "neon")]
7818#[cfg_attr(test, assert_instr(fcvtas))]
7819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7820pub fn vcvtas_s32_f32(a: f32) -> i32 {
7821    unsafe extern "unadjusted" {
7822        #[cfg_attr(
7823            any(target_arch = "aarch64", target_arch = "arm64ec"),
7824            link_name = "llvm.aarch64.neon.fcvtas.i32.f32"
7825        )]
7826        fn _vcvtas_s32_f32(a: f32) -> i32;
7827    }
7828    unsafe { _vcvtas_s32_f32(a) }
7829}
7830#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"]
7832#[inline(always)]
7833#[target_feature(enable = "neon")]
7834#[cfg_attr(test, assert_instr(fcvtas))]
7835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7836pub fn vcvtad_s64_f64(a: f64) -> i64 {
7837    unsafe extern "unadjusted" {
7838        #[cfg_attr(
7839            any(target_arch = "aarch64", target_arch = "arm64ec"),
7840            link_name = "llvm.aarch64.neon.fcvtas.i64.f64"
7841        )]
7842        fn _vcvtad_s64_f64(a: f64) -> i64;
7843    }
7844    unsafe { _vcvtad_s64_f64(a) }
7845}
7846#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"]
7848#[inline(always)]
7849#[target_feature(enable = "neon")]
7850#[cfg_attr(test, assert_instr(fcvtau))]
7851#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7852pub fn vcvtas_u32_f32(a: f32) -> u32 {
7853    unsafe extern "unadjusted" {
7854        #[cfg_attr(
7855            any(target_arch = "aarch64", target_arch = "arm64ec"),
7856            link_name = "llvm.aarch64.neon.fcvtau.i32.f32"
7857        )]
7858        fn _vcvtas_u32_f32(a: f32) -> u32;
7859    }
7860    unsafe { _vcvtas_u32_f32(a) }
7861}
7862#[doc = "Floating-point convert to integer, rounding to nearest with ties to away"]
7863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"]
7864#[inline(always)]
7865#[target_feature(enable = "neon")]
7866#[cfg_attr(test, assert_instr(fcvtau))]
7867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7868pub fn vcvtad_u64_f64(a: f64) -> u64 {
7869    unsafe extern "unadjusted" {
7870        #[cfg_attr(
7871            any(target_arch = "aarch64", target_arch = "arm64ec"),
7872            link_name = "llvm.aarch64.neon.fcvtau.i64.f64"
7873        )]
7874        fn _vcvtad_u64_f64(a: f64) -> u64;
7875    }
7876    unsafe { _vcvtad_u64_f64(a) }
7877}
7878#[doc = "Fixed-point convert to floating-point"]
7879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"]
7880#[inline(always)]
7881#[target_feature(enable = "neon")]
7882#[cfg_attr(test, assert_instr(scvtf))]
7883#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7884pub fn vcvtd_f64_s64(a: i64) -> f64 {
7885    a as f64
7886}
7887#[doc = "Fixed-point convert to floating-point"]
7888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"]
7889#[inline(always)]
7890#[target_feature(enable = "neon")]
7891#[cfg_attr(test, assert_instr(scvtf))]
7892#[stable(feature = "neon_intrinsics", since = "1.59.0")]
7893pub fn vcvts_f32_s32(a: i32) -> f32 {
7894    a as f32
7895}
7896#[doc = "Fixed-point convert to floating-point"]
7897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s16)"]
7898#[inline(always)]
7899#[cfg_attr(test, assert_instr(scvtf))]
7900#[target_feature(enable = "neon,fp16")]
7901#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7902#[cfg(not(target_arch = "arm64ec"))]
7903pub fn vcvth_f16_s16(a: i16) -> f16 {
7904    a as f16
7905}
7906#[doc = "Fixed-point convert to floating-point"]
7907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s32)"]
7908#[inline(always)]
7909#[cfg_attr(test, assert_instr(scvtf))]
7910#[target_feature(enable = "neon,fp16")]
7911#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7912#[cfg(not(target_arch = "arm64ec"))]
7913pub fn vcvth_f16_s32(a: i32) -> f16 {
7914    a as f16
7915}
7916#[doc = "Fixed-point convert to floating-point"]
7917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s64)"]
7918#[inline(always)]
7919#[cfg_attr(test, assert_instr(scvtf))]
7920#[target_feature(enable = "neon,fp16")]
7921#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7922#[cfg(not(target_arch = "arm64ec"))]
7923pub fn vcvth_f16_s64(a: i64) -> f16 {
7924    a as f16
7925}
7926#[doc = "Unsigned fixed-point convert to floating-point"]
7927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u16)"]
7928#[inline(always)]
7929#[cfg_attr(test, assert_instr(ucvtf))]
7930#[target_feature(enable = "neon,fp16")]
7931#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7932#[cfg(not(target_arch = "arm64ec"))]
7933pub fn vcvth_f16_u16(a: u16) -> f16 {
7934    a as f16
7935}
7936#[doc = "Unsigned fixed-point convert to floating-point"]
7937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u32)"]
7938#[inline(always)]
7939#[cfg_attr(test, assert_instr(ucvtf))]
7940#[target_feature(enable = "neon,fp16")]
7941#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7942#[cfg(not(target_arch = "arm64ec"))]
7943pub fn vcvth_f16_u32(a: u32) -> f16 {
7944    a as f16
7945}
7946#[doc = "Unsigned fixed-point convert to floating-point"]
7947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u64)"]
7948#[inline(always)]
7949#[cfg_attr(test, assert_instr(ucvtf))]
7950#[target_feature(enable = "neon,fp16")]
7951#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7952#[cfg(not(target_arch = "arm64ec"))]
7953pub fn vcvth_f16_u64(a: u64) -> f16 {
7954    a as f16
7955}
7956#[doc = "Fixed-point convert to floating-point"]
7957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s16)"]
7958#[inline(always)]
7959#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7960#[rustc_legacy_const_generics(1)]
7961#[target_feature(enable = "neon,fp16")]
7962#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7963#[cfg(not(target_arch = "arm64ec"))]
7964pub fn vcvth_n_f16_s16<const N: i32>(a: i16) -> f16 {
7965    static_assert!(N >= 1 && N <= 16);
7966    vcvth_n_f16_s32::<N>(a as i32)
7967}
7968#[doc = "Fixed-point convert to floating-point"]
7969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s32)"]
7970#[inline(always)]
7971#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7972#[rustc_legacy_const_generics(1)]
7973#[target_feature(enable = "neon,fp16")]
7974#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7975#[cfg(not(target_arch = "arm64ec"))]
7976pub fn vcvth_n_f16_s32<const N: i32>(a: i32) -> f16 {
7977    static_assert!(N >= 1 && N <= 16);
7978    unsafe extern "unadjusted" {
7979        #[cfg_attr(
7980            any(target_arch = "aarch64", target_arch = "arm64ec"),
7981            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i32"
7982        )]
7983        fn _vcvth_n_f16_s32(a: i32, n: i32) -> f16;
7984    }
7985    unsafe { _vcvth_n_f16_s32(a, N) }
7986}
7987#[doc = "Fixed-point convert to floating-point"]
7988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s64)"]
7989#[inline(always)]
7990#[cfg_attr(test, assert_instr(scvtf, N = 2))]
7991#[rustc_legacy_const_generics(1)]
7992#[target_feature(enable = "neon,fp16")]
7993#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
7994#[cfg(not(target_arch = "arm64ec"))]
7995pub fn vcvth_n_f16_s64<const N: i32>(a: i64) -> f16 {
7996    static_assert!(N >= 1 && N <= 16);
7997    unsafe extern "unadjusted" {
7998        #[cfg_attr(
7999            any(target_arch = "aarch64", target_arch = "arm64ec"),
8000            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f16.i64"
8001        )]
8002        fn _vcvth_n_f16_s64(a: i64, n: i32) -> f16;
8003    }
8004    unsafe { _vcvth_n_f16_s64(a, N) }
8005}
8006#[doc = "Fixed-point convert to floating-point"]
8007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u16)"]
8008#[inline(always)]
8009#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8010#[rustc_legacy_const_generics(1)]
8011#[target_feature(enable = "neon,fp16")]
8012#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8013#[cfg(not(target_arch = "arm64ec"))]
8014pub fn vcvth_n_f16_u16<const N: i32>(a: u16) -> f16 {
8015    static_assert!(N >= 1 && N <= 16);
8016    vcvth_n_f16_u32::<N>(a as u32)
8017}
8018#[doc = "Fixed-point convert to floating-point"]
8019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u32)"]
8020#[inline(always)]
8021#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8022#[rustc_legacy_const_generics(1)]
8023#[target_feature(enable = "neon,fp16")]
8024#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8025#[cfg(not(target_arch = "arm64ec"))]
8026pub fn vcvth_n_f16_u32<const N: i32>(a: u32) -> f16 {
8027    static_assert!(N >= 1 && N <= 16);
8028    unsafe extern "unadjusted" {
8029        #[cfg_attr(
8030            any(target_arch = "aarch64", target_arch = "arm64ec"),
8031            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i32"
8032        )]
8033        fn _vcvth_n_f16_u32(a: u32, n: i32) -> f16;
8034    }
8035    unsafe { _vcvth_n_f16_u32(a, N) }
8036}
8037#[doc = "Fixed-point convert to floating-point"]
8038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u64)"]
8039#[inline(always)]
8040#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
8041#[rustc_legacy_const_generics(1)]
8042#[target_feature(enable = "neon,fp16")]
8043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8044#[cfg(not(target_arch = "arm64ec"))]
8045pub fn vcvth_n_f16_u64<const N: i32>(a: u64) -> f16 {
8046    static_assert!(N >= 1 && N <= 16);
8047    unsafe extern "unadjusted" {
8048        #[cfg_attr(
8049            any(target_arch = "aarch64", target_arch = "arm64ec"),
8050            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f16.i64"
8051        )]
8052        fn _vcvth_n_f16_u64(a: u64, n: i32) -> f16;
8053    }
8054    unsafe { _vcvth_n_f16_u64(a, N) }
8055}
8056#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s16_f16)"]
8058#[inline(always)]
8059#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8060#[rustc_legacy_const_generics(1)]
8061#[target_feature(enable = "neon,fp16")]
8062#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8063#[cfg(not(target_arch = "arm64ec"))]
8064pub fn vcvth_n_s16_f16<const N: i32>(a: f16) -> i16 {
8065    static_assert!(N >= 1 && N <= 16);
8066    vcvth_n_s32_f16::<N>(a) as i16
8067}
8068#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s32_f16)"]
8070#[inline(always)]
8071#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8072#[rustc_legacy_const_generics(1)]
8073#[target_feature(enable = "neon,fp16")]
8074#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8075#[cfg(not(target_arch = "arm64ec"))]
8076pub fn vcvth_n_s32_f16<const N: i32>(a: f16) -> i32 {
8077    static_assert!(N >= 1 && N <= 16);
8078    unsafe extern "unadjusted" {
8079        #[cfg_attr(
8080            any(target_arch = "aarch64", target_arch = "arm64ec"),
8081            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f16"
8082        )]
8083        fn _vcvth_n_s32_f16(a: f16, n: i32) -> i32;
8084    }
8085    unsafe { _vcvth_n_s32_f16(a, N) }
8086}
8087#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s64_f16)"]
8089#[inline(always)]
8090#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
8091#[rustc_legacy_const_generics(1)]
8092#[target_feature(enable = "neon,fp16")]
8093#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8094#[cfg(not(target_arch = "arm64ec"))]
8095pub fn vcvth_n_s64_f16<const N: i32>(a: f16) -> i64 {
8096    static_assert!(N >= 1 && N <= 16);
8097    unsafe extern "unadjusted" {
8098        #[cfg_attr(
8099            any(target_arch = "aarch64", target_arch = "arm64ec"),
8100            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f16"
8101        )]
8102        fn _vcvth_n_s64_f16(a: f16, n: i32) -> i64;
8103    }
8104    unsafe { _vcvth_n_s64_f16(a, N) }
8105}
8106#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u16_f16)"]
8108#[inline(always)]
8109#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8110#[rustc_legacy_const_generics(1)]
8111#[target_feature(enable = "neon,fp16")]
8112#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8113#[cfg(not(target_arch = "arm64ec"))]
8114pub fn vcvth_n_u16_f16<const N: i32>(a: f16) -> u16 {
8115    static_assert!(N >= 1 && N <= 16);
8116    vcvth_n_u32_f16::<N>(a) as u16
8117}
8118#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u32_f16)"]
8120#[inline(always)]
8121#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8122#[rustc_legacy_const_generics(1)]
8123#[target_feature(enable = "neon,fp16")]
8124#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8125#[cfg(not(target_arch = "arm64ec"))]
8126pub fn vcvth_n_u32_f16<const N: i32>(a: f16) -> u32 {
8127    static_assert!(N >= 1 && N <= 16);
8128    unsafe extern "unadjusted" {
8129        #[cfg_attr(
8130            any(target_arch = "aarch64", target_arch = "arm64ec"),
8131            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f16"
8132        )]
8133        fn _vcvth_n_u32_f16(a: f16, n: i32) -> u32;
8134    }
8135    unsafe { _vcvth_n_u32_f16(a, N) }
8136}
8137#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
8138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u64_f16)"]
8139#[inline(always)]
8140#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
8141#[rustc_legacy_const_generics(1)]
8142#[target_feature(enable = "neon,fp16")]
8143#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8144#[cfg(not(target_arch = "arm64ec"))]
8145pub fn vcvth_n_u64_f16<const N: i32>(a: f16) -> u64 {
8146    static_assert!(N >= 1 && N <= 16);
8147    unsafe extern "unadjusted" {
8148        #[cfg_attr(
8149            any(target_arch = "aarch64", target_arch = "arm64ec"),
8150            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f16"
8151        )]
8152        fn _vcvth_n_u64_f16(a: f16, n: i32) -> u64;
8153    }
8154    unsafe { _vcvth_n_u64_f16(a, N) }
8155}
8156#[doc = "Floating-point convert to signed fixed-point"]
8157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s16_f16)"]
8158#[inline(always)]
8159#[cfg_attr(test, assert_instr(fcvtzs))]
8160#[target_feature(enable = "neon,fp16")]
8161#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8162#[cfg(not(target_arch = "arm64ec"))]
8163pub fn vcvth_s16_f16(a: f16) -> i16 {
8164    a as i16
8165}
8166#[doc = "Floating-point convert to signed fixed-point"]
8167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s32_f16)"]
8168#[inline(always)]
8169#[cfg_attr(test, assert_instr(fcvtzs))]
8170#[target_feature(enable = "neon,fp16")]
8171#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8172#[cfg(not(target_arch = "arm64ec"))]
8173pub fn vcvth_s32_f16(a: f16) -> i32 {
8174    a as i32
8175}
8176#[doc = "Floating-point convert to signed fixed-point"]
8177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s64_f16)"]
8178#[inline(always)]
8179#[cfg_attr(test, assert_instr(fcvtzs))]
8180#[target_feature(enable = "neon,fp16")]
8181#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8182#[cfg(not(target_arch = "arm64ec"))]
8183pub fn vcvth_s64_f16(a: f16) -> i64 {
8184    a as i64
8185}
8186#[doc = "Floating-point convert to unsigned fixed-point"]
8187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u16_f16)"]
8188#[inline(always)]
8189#[cfg_attr(test, assert_instr(fcvtzu))]
8190#[target_feature(enable = "neon,fp16")]
8191#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8192#[cfg(not(target_arch = "arm64ec"))]
8193pub fn vcvth_u16_f16(a: f16) -> u16 {
8194    a as u16
8195}
8196#[doc = "Floating-point convert to unsigned fixed-point"]
8197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u32_f16)"]
8198#[inline(always)]
8199#[cfg_attr(test, assert_instr(fcvtzu))]
8200#[target_feature(enable = "neon,fp16")]
8201#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8202#[cfg(not(target_arch = "arm64ec"))]
8203pub fn vcvth_u32_f16(a: f16) -> u32 {
8204    a as u32
8205}
8206#[doc = "Floating-point convert to unsigned fixed-point"]
8207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u64_f16)"]
8208#[inline(always)]
8209#[cfg_attr(test, assert_instr(fcvtzu))]
8210#[target_feature(enable = "neon,fp16")]
8211#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8212#[cfg(not(target_arch = "arm64ec"))]
8213pub fn vcvth_u64_f16(a: f16) -> u64 {
8214    a as u64
8215}
8216#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s16_f16)"]
8218#[inline(always)]
8219#[cfg_attr(test, assert_instr(fcvtms))]
8220#[target_feature(enable = "neon,fp16")]
8221#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8222#[cfg(not(target_arch = "arm64ec"))]
8223pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t {
8224    unsafe extern "unadjusted" {
8225        #[cfg_attr(
8226            any(target_arch = "aarch64", target_arch = "arm64ec"),
8227            link_name = "llvm.aarch64.neon.fcvtms.v4i16.v4f16"
8228        )]
8229        fn _vcvtm_s16_f16(a: float16x4_t) -> int16x4_t;
8230    }
8231    unsafe { _vcvtm_s16_f16(a) }
8232}
8233#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s16_f16)"]
8235#[inline(always)]
8236#[cfg_attr(test, assert_instr(fcvtms))]
8237#[target_feature(enable = "neon,fp16")]
8238#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8239#[cfg(not(target_arch = "arm64ec"))]
8240pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t {
8241    unsafe extern "unadjusted" {
8242        #[cfg_attr(
8243            any(target_arch = "aarch64", target_arch = "arm64ec"),
8244            link_name = "llvm.aarch64.neon.fcvtms.v8i16.v8f16"
8245        )]
8246        fn _vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t;
8247    }
8248    unsafe { _vcvtmq_s16_f16(a) }
8249}
8250#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"]
8252#[inline(always)]
8253#[target_feature(enable = "neon")]
8254#[cfg_attr(test, assert_instr(fcvtms))]
8255#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8256pub fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t {
8257    unsafe extern "unadjusted" {
8258        #[cfg_attr(
8259            any(target_arch = "aarch64", target_arch = "arm64ec"),
8260            link_name = "llvm.aarch64.neon.fcvtms.v2i32.v2f32"
8261        )]
8262        fn _vcvtm_s32_f32(a: float32x2_t) -> int32x2_t;
8263    }
8264    unsafe { _vcvtm_s32_f32(a) }
8265}
8266#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"]
8268#[inline(always)]
8269#[target_feature(enable = "neon")]
8270#[cfg_attr(test, assert_instr(fcvtms))]
8271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8272pub fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t {
8273    unsafe extern "unadjusted" {
8274        #[cfg_attr(
8275            any(target_arch = "aarch64", target_arch = "arm64ec"),
8276            link_name = "llvm.aarch64.neon.fcvtms.v4i32.v4f32"
8277        )]
8278        fn _vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t;
8279    }
8280    unsafe { _vcvtmq_s32_f32(a) }
8281}
8282#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"]
8284#[inline(always)]
8285#[target_feature(enable = "neon")]
8286#[cfg_attr(test, assert_instr(fcvtms))]
8287#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8288pub fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t {
8289    unsafe extern "unadjusted" {
8290        #[cfg_attr(
8291            any(target_arch = "aarch64", target_arch = "arm64ec"),
8292            link_name = "llvm.aarch64.neon.fcvtms.v1i64.v1f64"
8293        )]
8294        fn _vcvtm_s64_f64(a: float64x1_t) -> int64x1_t;
8295    }
8296    unsafe { _vcvtm_s64_f64(a) }
8297}
8298#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"]
8300#[inline(always)]
8301#[target_feature(enable = "neon")]
8302#[cfg_attr(test, assert_instr(fcvtms))]
8303#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8304pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t {
8305    unsafe extern "unadjusted" {
8306        #[cfg_attr(
8307            any(target_arch = "aarch64", target_arch = "arm64ec"),
8308            link_name = "llvm.aarch64.neon.fcvtms.v2i64.v2f64"
8309        )]
8310        fn _vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t;
8311    }
8312    unsafe { _vcvtmq_s64_f64(a) }
8313}
8314#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u16_f16)"]
8316#[inline(always)]
8317#[cfg_attr(test, assert_instr(fcvtmu))]
8318#[target_feature(enable = "neon,fp16")]
8319#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8320#[cfg(not(target_arch = "arm64ec"))]
8321pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t {
8322    unsafe extern "unadjusted" {
8323        #[cfg_attr(
8324            any(target_arch = "aarch64", target_arch = "arm64ec"),
8325            link_name = "llvm.aarch64.neon.fcvtmu.v4i16.v4f16"
8326        )]
8327        fn _vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t;
8328    }
8329    unsafe { _vcvtm_u16_f16(a) }
8330}
8331#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u16_f16)"]
8333#[inline(always)]
8334#[cfg_attr(test, assert_instr(fcvtmu))]
8335#[target_feature(enable = "neon,fp16")]
8336#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8337#[cfg(not(target_arch = "arm64ec"))]
8338pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t {
8339    unsafe extern "unadjusted" {
8340        #[cfg_attr(
8341            any(target_arch = "aarch64", target_arch = "arm64ec"),
8342            link_name = "llvm.aarch64.neon.fcvtmu.v8i16.v8f16"
8343        )]
8344        fn _vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t;
8345    }
8346    unsafe { _vcvtmq_u16_f16(a) }
8347}
8348#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"]
8350#[inline(always)]
8351#[target_feature(enable = "neon")]
8352#[cfg_attr(test, assert_instr(fcvtmu))]
8353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8354pub fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t {
8355    unsafe extern "unadjusted" {
8356        #[cfg_attr(
8357            any(target_arch = "aarch64", target_arch = "arm64ec"),
8358            link_name = "llvm.aarch64.neon.fcvtmu.v2i32.v2f32"
8359        )]
8360        fn _vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t;
8361    }
8362    unsafe { _vcvtm_u32_f32(a) }
8363}
8364#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"]
8366#[inline(always)]
8367#[target_feature(enable = "neon")]
8368#[cfg_attr(test, assert_instr(fcvtmu))]
8369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8370pub fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t {
8371    unsafe extern "unadjusted" {
8372        #[cfg_attr(
8373            any(target_arch = "aarch64", target_arch = "arm64ec"),
8374            link_name = "llvm.aarch64.neon.fcvtmu.v4i32.v4f32"
8375        )]
8376        fn _vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t;
8377    }
8378    unsafe { _vcvtmq_u32_f32(a) }
8379}
8380#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"]
8382#[inline(always)]
8383#[target_feature(enable = "neon")]
8384#[cfg_attr(test, assert_instr(fcvtmu))]
8385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8386pub fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t {
8387    unsafe extern "unadjusted" {
8388        #[cfg_attr(
8389            any(target_arch = "aarch64", target_arch = "arm64ec"),
8390            link_name = "llvm.aarch64.neon.fcvtmu.v1i64.v1f64"
8391        )]
8392        fn _vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t;
8393    }
8394    unsafe { _vcvtm_u64_f64(a) }
8395}
8396#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"]
8398#[inline(always)]
8399#[target_feature(enable = "neon")]
8400#[cfg_attr(test, assert_instr(fcvtmu))]
8401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8402pub fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t {
8403    unsafe extern "unadjusted" {
8404        #[cfg_attr(
8405            any(target_arch = "aarch64", target_arch = "arm64ec"),
8406            link_name = "llvm.aarch64.neon.fcvtmu.v2i64.v2f64"
8407        )]
8408        fn _vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t;
8409    }
8410    unsafe { _vcvtmq_u64_f64(a) }
8411}
8412#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s16_f16)"]
8414#[inline(always)]
8415#[cfg_attr(test, assert_instr(fcvtms))]
8416#[target_feature(enable = "neon,fp16")]
8417#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8418#[cfg(not(target_arch = "arm64ec"))]
8419pub fn vcvtmh_s16_f16(a: f16) -> i16 {
8420    vcvtmh_s32_f16(a) as i16
8421}
8422#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s32_f16)"]
8424#[inline(always)]
8425#[cfg_attr(test, assert_instr(fcvtms))]
8426#[target_feature(enable = "neon,fp16")]
8427#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8428#[cfg(not(target_arch = "arm64ec"))]
8429pub fn vcvtmh_s32_f16(a: f16) -> i32 {
8430    unsafe extern "unadjusted" {
8431        #[cfg_attr(
8432            any(target_arch = "aarch64", target_arch = "arm64ec"),
8433            link_name = "llvm.aarch64.neon.fcvtms.i32.f16"
8434        )]
8435        fn _vcvtmh_s32_f16(a: f16) -> i32;
8436    }
8437    unsafe { _vcvtmh_s32_f16(a) }
8438}
8439#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s64_f16)"]
8441#[inline(always)]
8442#[cfg_attr(test, assert_instr(fcvtms))]
8443#[target_feature(enable = "neon,fp16")]
8444#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8445#[cfg(not(target_arch = "arm64ec"))]
8446pub fn vcvtmh_s64_f16(a: f16) -> i64 {
8447    unsafe extern "unadjusted" {
8448        #[cfg_attr(
8449            any(target_arch = "aarch64", target_arch = "arm64ec"),
8450            link_name = "llvm.aarch64.neon.fcvtms.i64.f16"
8451        )]
8452        fn _vcvtmh_s64_f16(a: f16) -> i64;
8453    }
8454    unsafe { _vcvtmh_s64_f16(a) }
8455}
8456#[doc = "Floating-point convert to integer, rounding towards minus infinity"]
8457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u16_f16)"]
8458#[inline(always)]
8459#[cfg_attr(test, assert_instr(fcvtmu))]
8460#[target_feature(enable = "neon,fp16")]
8461#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8462#[cfg(not(target_arch = "arm64ec"))]
8463pub fn vcvtmh_u16_f16(a: f16) -> u16 {
8464    vcvtmh_u32_f16(a) as u16
8465}
8466#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u32_f16)"]
8468#[inline(always)]
8469#[cfg_attr(test, assert_instr(fcvtmu))]
8470#[target_feature(enable = "neon,fp16")]
8471#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8472#[cfg(not(target_arch = "arm64ec"))]
8473pub fn vcvtmh_u32_f16(a: f16) -> u32 {
8474    unsafe extern "unadjusted" {
8475        #[cfg_attr(
8476            any(target_arch = "aarch64", target_arch = "arm64ec"),
8477            link_name = "llvm.aarch64.neon.fcvtmu.i32.f16"
8478        )]
8479        fn _vcvtmh_u32_f16(a: f16) -> u32;
8480    }
8481    unsafe { _vcvtmh_u32_f16(a) }
8482}
8483#[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"]
8484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u64_f16)"]
8485#[inline(always)]
8486#[cfg_attr(test, assert_instr(fcvtmu))]
8487#[target_feature(enable = "neon,fp16")]
8488#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8489#[cfg(not(target_arch = "arm64ec"))]
8490pub fn vcvtmh_u64_f16(a: f16) -> u64 {
8491    unsafe extern "unadjusted" {
8492        #[cfg_attr(
8493            any(target_arch = "aarch64", target_arch = "arm64ec"),
8494            link_name = "llvm.aarch64.neon.fcvtmu.i64.f16"
8495        )]
8496        fn _vcvtmh_u64_f16(a: f16) -> u64;
8497    }
8498    unsafe { _vcvtmh_u64_f16(a) }
8499}
8500#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"]
8502#[inline(always)]
8503#[target_feature(enable = "neon")]
8504#[cfg_attr(test, assert_instr(fcvtms))]
8505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8506pub fn vcvtms_s32_f32(a: f32) -> i32 {
8507    unsafe extern "unadjusted" {
8508        #[cfg_attr(
8509            any(target_arch = "aarch64", target_arch = "arm64ec"),
8510            link_name = "llvm.aarch64.neon.fcvtms.i32.f32"
8511        )]
8512        fn _vcvtms_s32_f32(a: f32) -> i32;
8513    }
8514    unsafe { _vcvtms_s32_f32(a) }
8515}
8516#[doc = "Floating-point convert to signed integer, rounding toward minus infinity"]
8517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"]
8518#[inline(always)]
8519#[target_feature(enable = "neon")]
8520#[cfg_attr(test, assert_instr(fcvtms))]
8521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8522pub fn vcvtmd_s64_f64(a: f64) -> i64 {
8523    unsafe extern "unadjusted" {
8524        #[cfg_attr(
8525            any(target_arch = "aarch64", target_arch = "arm64ec"),
8526            link_name = "llvm.aarch64.neon.fcvtms.i64.f64"
8527        )]
8528        fn _vcvtmd_s64_f64(a: f64) -> i64;
8529    }
8530    unsafe { _vcvtmd_s64_f64(a) }
8531}
8532#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"]
8534#[inline(always)]
8535#[target_feature(enable = "neon")]
8536#[cfg_attr(test, assert_instr(fcvtmu))]
8537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8538pub fn vcvtms_u32_f32(a: f32) -> u32 {
8539    unsafe extern "unadjusted" {
8540        #[cfg_attr(
8541            any(target_arch = "aarch64", target_arch = "arm64ec"),
8542            link_name = "llvm.aarch64.neon.fcvtmu.i32.f32"
8543        )]
8544        fn _vcvtms_u32_f32(a: f32) -> u32;
8545    }
8546    unsafe { _vcvtms_u32_f32(a) }
8547}
8548#[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"]
8549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"]
8550#[inline(always)]
8551#[target_feature(enable = "neon")]
8552#[cfg_attr(test, assert_instr(fcvtmu))]
8553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8554pub fn vcvtmd_u64_f64(a: f64) -> u64 {
8555    unsafe extern "unadjusted" {
8556        #[cfg_attr(
8557            any(target_arch = "aarch64", target_arch = "arm64ec"),
8558            link_name = "llvm.aarch64.neon.fcvtmu.i64.f64"
8559        )]
8560        fn _vcvtmd_u64_f64(a: f64) -> u64;
8561    }
8562    unsafe { _vcvtmd_u64_f64(a) }
8563}
8564#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s16_f16)"]
8566#[inline(always)]
8567#[cfg_attr(test, assert_instr(fcvtns))]
8568#[target_feature(enable = "neon,fp16")]
8569#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8570#[cfg(not(target_arch = "arm64ec"))]
8571pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t {
8572    unsafe extern "unadjusted" {
8573        #[cfg_attr(
8574            any(target_arch = "aarch64", target_arch = "arm64ec"),
8575            link_name = "llvm.aarch64.neon.fcvtns.v4i16.v4f16"
8576        )]
8577        fn _vcvtn_s16_f16(a: float16x4_t) -> int16x4_t;
8578    }
8579    unsafe { _vcvtn_s16_f16(a) }
8580}
8581#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s16_f16)"]
8583#[inline(always)]
8584#[cfg_attr(test, assert_instr(fcvtns))]
8585#[target_feature(enable = "neon,fp16")]
8586#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8587#[cfg(not(target_arch = "arm64ec"))]
8588pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t {
8589    unsafe extern "unadjusted" {
8590        #[cfg_attr(
8591            any(target_arch = "aarch64", target_arch = "arm64ec"),
8592            link_name = "llvm.aarch64.neon.fcvtns.v8i16.v8f16"
8593        )]
8594        fn _vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t;
8595    }
8596    unsafe { _vcvtnq_s16_f16(a) }
8597}
8598#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"]
8600#[inline(always)]
8601#[target_feature(enable = "neon")]
8602#[cfg_attr(test, assert_instr(fcvtns))]
8603#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8604pub fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t {
8605    unsafe extern "unadjusted" {
8606        #[cfg_attr(
8607            any(target_arch = "aarch64", target_arch = "arm64ec"),
8608            link_name = "llvm.aarch64.neon.fcvtns.v2i32.v2f32"
8609        )]
8610        fn _vcvtn_s32_f32(a: float32x2_t) -> int32x2_t;
8611    }
8612    unsafe { _vcvtn_s32_f32(a) }
8613}
8614#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"]
8616#[inline(always)]
8617#[target_feature(enable = "neon")]
8618#[cfg_attr(test, assert_instr(fcvtns))]
8619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8620pub fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t {
8621    unsafe extern "unadjusted" {
8622        #[cfg_attr(
8623            any(target_arch = "aarch64", target_arch = "arm64ec"),
8624            link_name = "llvm.aarch64.neon.fcvtns.v4i32.v4f32"
8625        )]
8626        fn _vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t;
8627    }
8628    unsafe { _vcvtnq_s32_f32(a) }
8629}
8630#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"]
8632#[inline(always)]
8633#[target_feature(enable = "neon")]
8634#[cfg_attr(test, assert_instr(fcvtns))]
8635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8636pub fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t {
8637    unsafe extern "unadjusted" {
8638        #[cfg_attr(
8639            any(target_arch = "aarch64", target_arch = "arm64ec"),
8640            link_name = "llvm.aarch64.neon.fcvtns.v1i64.v1f64"
8641        )]
8642        fn _vcvtn_s64_f64(a: float64x1_t) -> int64x1_t;
8643    }
8644    unsafe { _vcvtn_s64_f64(a) }
8645}
8646#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"]
8648#[inline(always)]
8649#[target_feature(enable = "neon")]
8650#[cfg_attr(test, assert_instr(fcvtns))]
8651#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8652pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t {
8653    unsafe extern "unadjusted" {
8654        #[cfg_attr(
8655            any(target_arch = "aarch64", target_arch = "arm64ec"),
8656            link_name = "llvm.aarch64.neon.fcvtns.v2i64.v2f64"
8657        )]
8658        fn _vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t;
8659    }
8660    unsafe { _vcvtnq_s64_f64(a) }
8661}
8662#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u16_f16)"]
8664#[inline(always)]
8665#[cfg_attr(test, assert_instr(fcvtnu))]
8666#[target_feature(enable = "neon,fp16")]
8667#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8668#[cfg(not(target_arch = "arm64ec"))]
8669pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t {
8670    unsafe extern "unadjusted" {
8671        #[cfg_attr(
8672            any(target_arch = "aarch64", target_arch = "arm64ec"),
8673            link_name = "llvm.aarch64.neon.fcvtnu.v4i16.v4f16"
8674        )]
8675        fn _vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t;
8676    }
8677    unsafe { _vcvtn_u16_f16(a) }
8678}
8679#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u16_f16)"]
8681#[inline(always)]
8682#[cfg_attr(test, assert_instr(fcvtnu))]
8683#[target_feature(enable = "neon,fp16")]
8684#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8685#[cfg(not(target_arch = "arm64ec"))]
8686pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t {
8687    unsafe extern "unadjusted" {
8688        #[cfg_attr(
8689            any(target_arch = "aarch64", target_arch = "arm64ec"),
8690            link_name = "llvm.aarch64.neon.fcvtnu.v8i16.v8f16"
8691        )]
8692        fn _vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t;
8693    }
8694    unsafe { _vcvtnq_u16_f16(a) }
8695}
8696#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"]
8698#[inline(always)]
8699#[target_feature(enable = "neon")]
8700#[cfg_attr(test, assert_instr(fcvtnu))]
8701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8702pub fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t {
8703    unsafe extern "unadjusted" {
8704        #[cfg_attr(
8705            any(target_arch = "aarch64", target_arch = "arm64ec"),
8706            link_name = "llvm.aarch64.neon.fcvtnu.v2i32.v2f32"
8707        )]
8708        fn _vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t;
8709    }
8710    unsafe { _vcvtn_u32_f32(a) }
8711}
8712#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"]
8714#[inline(always)]
8715#[target_feature(enable = "neon")]
8716#[cfg_attr(test, assert_instr(fcvtnu))]
8717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8718pub fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t {
8719    unsafe extern "unadjusted" {
8720        #[cfg_attr(
8721            any(target_arch = "aarch64", target_arch = "arm64ec"),
8722            link_name = "llvm.aarch64.neon.fcvtnu.v4i32.v4f32"
8723        )]
8724        fn _vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t;
8725    }
8726    unsafe { _vcvtnq_u32_f32(a) }
8727}
8728#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"]
8730#[inline(always)]
8731#[target_feature(enable = "neon")]
8732#[cfg_attr(test, assert_instr(fcvtnu))]
8733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8734pub fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t {
8735    unsafe extern "unadjusted" {
8736        #[cfg_attr(
8737            any(target_arch = "aarch64", target_arch = "arm64ec"),
8738            link_name = "llvm.aarch64.neon.fcvtnu.v1i64.v1f64"
8739        )]
8740        fn _vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t;
8741    }
8742    unsafe { _vcvtn_u64_f64(a) }
8743}
8744#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"]
8746#[inline(always)]
8747#[target_feature(enable = "neon")]
8748#[cfg_attr(test, assert_instr(fcvtnu))]
8749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8750pub fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t {
8751    unsafe extern "unadjusted" {
8752        #[cfg_attr(
8753            any(target_arch = "aarch64", target_arch = "arm64ec"),
8754            link_name = "llvm.aarch64.neon.fcvtnu.v2i64.v2f64"
8755        )]
8756        fn _vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t;
8757    }
8758    unsafe { _vcvtnq_u64_f64(a) }
8759}
8760#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s16_f16)"]
8762#[inline(always)]
8763#[cfg_attr(test, assert_instr(fcvtns))]
8764#[target_feature(enable = "neon,fp16")]
8765#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8766#[cfg(not(target_arch = "arm64ec"))]
8767pub fn vcvtnh_s16_f16(a: f16) -> i16 {
8768    vcvtnh_s32_f16(a) as i16
8769}
8770#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s32_f16)"]
8772#[inline(always)]
8773#[cfg_attr(test, assert_instr(fcvtns))]
8774#[target_feature(enable = "neon,fp16")]
8775#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8776#[cfg(not(target_arch = "arm64ec"))]
8777pub fn vcvtnh_s32_f16(a: f16) -> i32 {
8778    unsafe extern "unadjusted" {
8779        #[cfg_attr(
8780            any(target_arch = "aarch64", target_arch = "arm64ec"),
8781            link_name = "llvm.aarch64.neon.fcvtns.i32.f16"
8782        )]
8783        fn _vcvtnh_s32_f16(a: f16) -> i32;
8784    }
8785    unsafe { _vcvtnh_s32_f16(a) }
8786}
8787#[doc = "Floating-point convert to integer, rounding to nearest with ties to even"]
8788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s64_f16)"]
8789#[inline(always)]
8790#[cfg_attr(test, assert_instr(fcvtns))]
8791#[target_feature(enable = "neon,fp16")]
8792#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8793#[cfg(not(target_arch = "arm64ec"))]
8794pub fn vcvtnh_s64_f16(a: f16) -> i64 {
8795    unsafe extern "unadjusted" {
8796        #[cfg_attr(
8797            any(target_arch = "aarch64", target_arch = "arm64ec"),
8798            link_name = "llvm.aarch64.neon.fcvtns.i64.f16"
8799        )]
8800        fn _vcvtnh_s64_f16(a: f16) -> i64;
8801    }
8802    unsafe { _vcvtnh_s64_f16(a) }
8803}
8804#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u16_f16)"]
8806#[inline(always)]
8807#[cfg_attr(test, assert_instr(fcvtnu))]
8808#[target_feature(enable = "neon,fp16")]
8809#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8810#[cfg(not(target_arch = "arm64ec"))]
8811pub fn vcvtnh_u16_f16(a: f16) -> u16 {
8812    vcvtnh_u32_f16(a) as u16
8813}
8814#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u32_f16)"]
8816#[inline(always)]
8817#[cfg_attr(test, assert_instr(fcvtnu))]
8818#[target_feature(enable = "neon,fp16")]
8819#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8820#[cfg(not(target_arch = "arm64ec"))]
8821pub fn vcvtnh_u32_f16(a: f16) -> u32 {
8822    unsafe extern "unadjusted" {
8823        #[cfg_attr(
8824            any(target_arch = "aarch64", target_arch = "arm64ec"),
8825            link_name = "llvm.aarch64.neon.fcvtnu.i32.f16"
8826        )]
8827        fn _vcvtnh_u32_f16(a: f16) -> u32;
8828    }
8829    unsafe { _vcvtnh_u32_f16(a) }
8830}
8831#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u64_f16)"]
8833#[inline(always)]
8834#[cfg_attr(test, assert_instr(fcvtnu))]
8835#[target_feature(enable = "neon,fp16")]
8836#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
8837#[cfg(not(target_arch = "arm64ec"))]
8838pub fn vcvtnh_u64_f16(a: f16) -> u64 {
8839    unsafe extern "unadjusted" {
8840        #[cfg_attr(
8841            any(target_arch = "aarch64", target_arch = "arm64ec"),
8842            link_name = "llvm.aarch64.neon.fcvtnu.i64.f16"
8843        )]
8844        fn _vcvtnh_u64_f16(a: f16) -> u64;
8845    }
8846    unsafe { _vcvtnh_u64_f16(a) }
8847}
8848#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"]
8850#[inline(always)]
8851#[target_feature(enable = "neon")]
8852#[cfg_attr(test, assert_instr(fcvtns))]
8853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8854pub fn vcvtns_s32_f32(a: f32) -> i32 {
8855    unsafe extern "unadjusted" {
8856        #[cfg_attr(
8857            any(target_arch = "aarch64", target_arch = "arm64ec"),
8858            link_name = "llvm.aarch64.neon.fcvtns.i32.f32"
8859        )]
8860        fn _vcvtns_s32_f32(a: f32) -> i32;
8861    }
8862    unsafe { _vcvtns_s32_f32(a) }
8863}
8864#[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"]
8865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"]
8866#[inline(always)]
8867#[target_feature(enable = "neon")]
8868#[cfg_attr(test, assert_instr(fcvtns))]
8869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8870pub fn vcvtnd_s64_f64(a: f64) -> i64 {
8871    unsafe extern "unadjusted" {
8872        #[cfg_attr(
8873            any(target_arch = "aarch64", target_arch = "arm64ec"),
8874            link_name = "llvm.aarch64.neon.fcvtns.i64.f64"
8875        )]
8876        fn _vcvtnd_s64_f64(a: f64) -> i64;
8877    }
8878    unsafe { _vcvtnd_s64_f64(a) }
8879}
8880#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"]
8882#[inline(always)]
8883#[target_feature(enable = "neon")]
8884#[cfg_attr(test, assert_instr(fcvtnu))]
8885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8886pub fn vcvtns_u32_f32(a: f32) -> u32 {
8887    unsafe extern "unadjusted" {
8888        #[cfg_attr(
8889            any(target_arch = "aarch64", target_arch = "arm64ec"),
8890            link_name = "llvm.aarch64.neon.fcvtnu.i32.f32"
8891        )]
8892        fn _vcvtns_u32_f32(a: f32) -> u32;
8893    }
8894    unsafe { _vcvtns_u32_f32(a) }
8895}
8896#[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"]
8897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"]
8898#[inline(always)]
8899#[target_feature(enable = "neon")]
8900#[cfg_attr(test, assert_instr(fcvtnu))]
8901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8902pub fn vcvtnd_u64_f64(a: f64) -> u64 {
8903    unsafe extern "unadjusted" {
8904        #[cfg_attr(
8905            any(target_arch = "aarch64", target_arch = "arm64ec"),
8906            link_name = "llvm.aarch64.neon.fcvtnu.i64.f64"
8907        )]
8908        fn _vcvtnd_u64_f64(a: f64) -> u64;
8909    }
8910    unsafe { _vcvtnd_u64_f64(a) }
8911}
8912#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s16_f16)"]
8914#[inline(always)]
8915#[cfg_attr(test, assert_instr(fcvtps))]
8916#[target_feature(enable = "neon,fp16")]
8917#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8918#[cfg(not(target_arch = "arm64ec"))]
8919pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t {
8920    unsafe extern "unadjusted" {
8921        #[cfg_attr(
8922            any(target_arch = "aarch64", target_arch = "arm64ec"),
8923            link_name = "llvm.aarch64.neon.fcvtps.v4i16.v4f16"
8924        )]
8925        fn _vcvtp_s16_f16(a: float16x4_t) -> int16x4_t;
8926    }
8927    unsafe { _vcvtp_s16_f16(a) }
8928}
8929#[doc = "Floating-point convert to signed integer, rounding to plus infinity"]
8930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s16_f16)"]
8931#[inline(always)]
8932#[cfg_attr(test, assert_instr(fcvtps))]
8933#[target_feature(enable = "neon,fp16")]
8934#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
8935#[cfg(not(target_arch = "arm64ec"))]
8936pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t {
8937    unsafe extern "unadjusted" {
8938        #[cfg_attr(
8939            any(target_arch = "aarch64", target_arch = "arm64ec"),
8940            link_name = "llvm.aarch64.neon.fcvtps.v8i16.v8f16"
8941        )]
8942        fn _vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t;
8943    }
8944    unsafe { _vcvtpq_s16_f16(a) }
8945}
8946#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"]
8948#[inline(always)]
8949#[target_feature(enable = "neon")]
8950#[cfg_attr(test, assert_instr(fcvtps))]
8951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8952pub fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t {
8953    unsafe extern "unadjusted" {
8954        #[cfg_attr(
8955            any(target_arch = "aarch64", target_arch = "arm64ec"),
8956            link_name = "llvm.aarch64.neon.fcvtps.v2i32.v2f32"
8957        )]
8958        fn _vcvtp_s32_f32(a: float32x2_t) -> int32x2_t;
8959    }
8960    unsafe { _vcvtp_s32_f32(a) }
8961}
8962#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"]
8964#[inline(always)]
8965#[target_feature(enable = "neon")]
8966#[cfg_attr(test, assert_instr(fcvtps))]
8967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8968pub fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t {
8969    unsafe extern "unadjusted" {
8970        #[cfg_attr(
8971            any(target_arch = "aarch64", target_arch = "arm64ec"),
8972            link_name = "llvm.aarch64.neon.fcvtps.v4i32.v4f32"
8973        )]
8974        fn _vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t;
8975    }
8976    unsafe { _vcvtpq_s32_f32(a) }
8977}
8978#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"]
8980#[inline(always)]
8981#[target_feature(enable = "neon")]
8982#[cfg_attr(test, assert_instr(fcvtps))]
8983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
8984pub fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t {
8985    unsafe extern "unadjusted" {
8986        #[cfg_attr(
8987            any(target_arch = "aarch64", target_arch = "arm64ec"),
8988            link_name = "llvm.aarch64.neon.fcvtps.v1i64.v1f64"
8989        )]
8990        fn _vcvtp_s64_f64(a: float64x1_t) -> int64x1_t;
8991    }
8992    unsafe { _vcvtp_s64_f64(a) }
8993}
8994#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
8995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"]
8996#[inline(always)]
8997#[target_feature(enable = "neon")]
8998#[cfg_attr(test, assert_instr(fcvtps))]
8999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9000pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t {
9001    unsafe extern "unadjusted" {
9002        #[cfg_attr(
9003            any(target_arch = "aarch64", target_arch = "arm64ec"),
9004            link_name = "llvm.aarch64.neon.fcvtps.v2i64.v2f64"
9005        )]
9006        fn _vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t;
9007    }
9008    unsafe { _vcvtpq_s64_f64(a) }
9009}
9010#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u16_f16)"]
9012#[inline(always)]
9013#[cfg_attr(test, assert_instr(fcvtpu))]
9014#[target_feature(enable = "neon,fp16")]
9015#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9016#[cfg(not(target_arch = "arm64ec"))]
9017pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t {
9018    unsafe extern "unadjusted" {
9019        #[cfg_attr(
9020            any(target_arch = "aarch64", target_arch = "arm64ec"),
9021            link_name = "llvm.aarch64.neon.fcvtpu.v4i16.v4f16"
9022        )]
9023        fn _vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t;
9024    }
9025    unsafe { _vcvtp_u16_f16(a) }
9026}
9027#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u16_f16)"]
9029#[inline(always)]
9030#[cfg_attr(test, assert_instr(fcvtpu))]
9031#[target_feature(enable = "neon,fp16")]
9032#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9033#[cfg(not(target_arch = "arm64ec"))]
9034pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t {
9035    unsafe extern "unadjusted" {
9036        #[cfg_attr(
9037            any(target_arch = "aarch64", target_arch = "arm64ec"),
9038            link_name = "llvm.aarch64.neon.fcvtpu.v8i16.v8f16"
9039        )]
9040        fn _vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t;
9041    }
9042    unsafe { _vcvtpq_u16_f16(a) }
9043}
9044#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"]
9046#[inline(always)]
9047#[target_feature(enable = "neon")]
9048#[cfg_attr(test, assert_instr(fcvtpu))]
9049#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9050pub fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t {
9051    unsafe extern "unadjusted" {
9052        #[cfg_attr(
9053            any(target_arch = "aarch64", target_arch = "arm64ec"),
9054            link_name = "llvm.aarch64.neon.fcvtpu.v2i32.v2f32"
9055        )]
9056        fn _vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t;
9057    }
9058    unsafe { _vcvtp_u32_f32(a) }
9059}
9060#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"]
9062#[inline(always)]
9063#[target_feature(enable = "neon")]
9064#[cfg_attr(test, assert_instr(fcvtpu))]
9065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9066pub fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t {
9067    unsafe extern "unadjusted" {
9068        #[cfg_attr(
9069            any(target_arch = "aarch64", target_arch = "arm64ec"),
9070            link_name = "llvm.aarch64.neon.fcvtpu.v4i32.v4f32"
9071        )]
9072        fn _vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t;
9073    }
9074    unsafe { _vcvtpq_u32_f32(a) }
9075}
9076#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"]
9078#[inline(always)]
9079#[target_feature(enable = "neon")]
9080#[cfg_attr(test, assert_instr(fcvtpu))]
9081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9082pub fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t {
9083    unsafe extern "unadjusted" {
9084        #[cfg_attr(
9085            any(target_arch = "aarch64", target_arch = "arm64ec"),
9086            link_name = "llvm.aarch64.neon.fcvtpu.v1i64.v1f64"
9087        )]
9088        fn _vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t;
9089    }
9090    unsafe { _vcvtp_u64_f64(a) }
9091}
9092#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"]
9094#[inline(always)]
9095#[target_feature(enable = "neon")]
9096#[cfg_attr(test, assert_instr(fcvtpu))]
9097#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9098pub fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t {
9099    unsafe extern "unadjusted" {
9100        #[cfg_attr(
9101            any(target_arch = "aarch64", target_arch = "arm64ec"),
9102            link_name = "llvm.aarch64.neon.fcvtpu.v2i64.v2f64"
9103        )]
9104        fn _vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t;
9105    }
9106    unsafe { _vcvtpq_u64_f64(a) }
9107}
9108#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s16_f16)"]
9110#[inline(always)]
9111#[cfg_attr(test, assert_instr(fcvtps))]
9112#[target_feature(enable = "neon,fp16")]
9113#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9114#[cfg(not(target_arch = "arm64ec"))]
9115pub fn vcvtph_s16_f16(a: f16) -> i16 {
9116    vcvtph_s32_f16(a) as i16
9117}
9118#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s32_f16)"]
9120#[inline(always)]
9121#[cfg_attr(test, assert_instr(fcvtps))]
9122#[target_feature(enable = "neon,fp16")]
9123#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9124#[cfg(not(target_arch = "arm64ec"))]
9125pub fn vcvtph_s32_f16(a: f16) -> i32 {
9126    unsafe extern "unadjusted" {
9127        #[cfg_attr(
9128            any(target_arch = "aarch64", target_arch = "arm64ec"),
9129            link_name = "llvm.aarch64.neon.fcvtps.i32.f16"
9130        )]
9131        fn _vcvtph_s32_f16(a: f16) -> i32;
9132    }
9133    unsafe { _vcvtph_s32_f16(a) }
9134}
9135#[doc = "Floating-point convert to integer, rounding to plus infinity"]
9136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s64_f16)"]
9137#[inline(always)]
9138#[cfg_attr(test, assert_instr(fcvtps))]
9139#[target_feature(enable = "neon,fp16")]
9140#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9141#[cfg(not(target_arch = "arm64ec"))]
9142pub fn vcvtph_s64_f16(a: f16) -> i64 {
9143    unsafe extern "unadjusted" {
9144        #[cfg_attr(
9145            any(target_arch = "aarch64", target_arch = "arm64ec"),
9146            link_name = "llvm.aarch64.neon.fcvtps.i64.f16"
9147        )]
9148        fn _vcvtph_s64_f16(a: f16) -> i64;
9149    }
9150    unsafe { _vcvtph_s64_f16(a) }
9151}
9152#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u16_f16)"]
9154#[inline(always)]
9155#[cfg_attr(test, assert_instr(fcvtpu))]
9156#[target_feature(enable = "neon,fp16")]
9157#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9158#[cfg(not(target_arch = "arm64ec"))]
9159pub fn vcvtph_u16_f16(a: f16) -> u16 {
9160    vcvtph_u32_f16(a) as u16
9161}
9162#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u32_f16)"]
9164#[inline(always)]
9165#[cfg_attr(test, assert_instr(fcvtpu))]
9166#[target_feature(enable = "neon,fp16")]
9167#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9168#[cfg(not(target_arch = "arm64ec"))]
9169pub fn vcvtph_u32_f16(a: f16) -> u32 {
9170    unsafe extern "unadjusted" {
9171        #[cfg_attr(
9172            any(target_arch = "aarch64", target_arch = "arm64ec"),
9173            link_name = "llvm.aarch64.neon.fcvtpu.i32.f16"
9174        )]
9175        fn _vcvtph_u32_f16(a: f16) -> u32;
9176    }
9177    unsafe { _vcvtph_u32_f16(a) }
9178}
9179#[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"]
9180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u64_f16)"]
9181#[inline(always)]
9182#[cfg_attr(test, assert_instr(fcvtpu))]
9183#[target_feature(enable = "neon,fp16")]
9184#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9185#[cfg(not(target_arch = "arm64ec"))]
9186pub fn vcvtph_u64_f16(a: f16) -> u64 {
9187    unsafe extern "unadjusted" {
9188        #[cfg_attr(
9189            any(target_arch = "aarch64", target_arch = "arm64ec"),
9190            link_name = "llvm.aarch64.neon.fcvtpu.i64.f16"
9191        )]
9192        fn _vcvtph_u64_f16(a: f16) -> u64;
9193    }
9194    unsafe { _vcvtph_u64_f16(a) }
9195}
9196#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"]
9198#[inline(always)]
9199#[target_feature(enable = "neon")]
9200#[cfg_attr(test, assert_instr(fcvtps))]
9201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9202pub fn vcvtps_s32_f32(a: f32) -> i32 {
9203    unsafe extern "unadjusted" {
9204        #[cfg_attr(
9205            any(target_arch = "aarch64", target_arch = "arm64ec"),
9206            link_name = "llvm.aarch64.neon.fcvtps.i32.f32"
9207        )]
9208        fn _vcvtps_s32_f32(a: f32) -> i32;
9209    }
9210    unsafe { _vcvtps_s32_f32(a) }
9211}
9212#[doc = "Floating-point convert to signed integer, rounding toward plus infinity"]
9213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"]
9214#[inline(always)]
9215#[target_feature(enable = "neon")]
9216#[cfg_attr(test, assert_instr(fcvtps))]
9217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9218pub fn vcvtpd_s64_f64(a: f64) -> i64 {
9219    unsafe extern "unadjusted" {
9220        #[cfg_attr(
9221            any(target_arch = "aarch64", target_arch = "arm64ec"),
9222            link_name = "llvm.aarch64.neon.fcvtps.i64.f64"
9223        )]
9224        fn _vcvtpd_s64_f64(a: f64) -> i64;
9225    }
9226    unsafe { _vcvtpd_s64_f64(a) }
9227}
9228#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"]
9230#[inline(always)]
9231#[target_feature(enable = "neon")]
9232#[cfg_attr(test, assert_instr(fcvtpu))]
9233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9234pub fn vcvtps_u32_f32(a: f32) -> u32 {
9235    unsafe extern "unadjusted" {
9236        #[cfg_attr(
9237            any(target_arch = "aarch64", target_arch = "arm64ec"),
9238            link_name = "llvm.aarch64.neon.fcvtpu.i32.f32"
9239        )]
9240        fn _vcvtps_u32_f32(a: f32) -> u32;
9241    }
9242    unsafe { _vcvtps_u32_f32(a) }
9243}
9244#[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"]
9245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"]
9246#[inline(always)]
9247#[target_feature(enable = "neon")]
9248#[cfg_attr(test, assert_instr(fcvtpu))]
9249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9250pub fn vcvtpd_u64_f64(a: f64) -> u64 {
9251    unsafe extern "unadjusted" {
9252        #[cfg_attr(
9253            any(target_arch = "aarch64", target_arch = "arm64ec"),
9254            link_name = "llvm.aarch64.neon.fcvtpu.i64.f64"
9255        )]
9256        fn _vcvtpd_u64_f64(a: f64) -> u64;
9257    }
9258    unsafe { _vcvtpd_u64_f64(a) }
9259}
9260#[doc = "Fixed-point convert to floating-point"]
9261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"]
9262#[inline(always)]
9263#[target_feature(enable = "neon")]
9264#[cfg_attr(test, assert_instr(ucvtf))]
9265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9266pub fn vcvts_f32_u32(a: u32) -> f32 {
9267    a as f32
9268}
9269#[doc = "Fixed-point convert to floating-point"]
9270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"]
9271#[inline(always)]
9272#[target_feature(enable = "neon")]
9273#[cfg_attr(test, assert_instr(ucvtf))]
9274#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9275pub fn vcvtd_f64_u64(a: u64) -> f64 {
9276    a as f64
9277}
9278#[doc = "Fixed-point convert to floating-point"]
9279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"]
9280#[inline(always)]
9281#[target_feature(enable = "neon")]
9282#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9283#[rustc_legacy_const_generics(1)]
9284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9285pub fn vcvts_n_f32_s32<const N: i32>(a: i32) -> f32 {
9286    static_assert!(N >= 1 && N <= 64);
9287    unsafe extern "unadjusted" {
9288        #[cfg_attr(
9289            any(target_arch = "aarch64", target_arch = "arm64ec"),
9290            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f32.i32"
9291        )]
9292        fn _vcvts_n_f32_s32(a: i32, n: i32) -> f32;
9293    }
9294    unsafe { _vcvts_n_f32_s32(a, N) }
9295}
9296#[doc = "Fixed-point convert to floating-point"]
9297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"]
9298#[inline(always)]
9299#[target_feature(enable = "neon")]
9300#[cfg_attr(test, assert_instr(scvtf, N = 2))]
9301#[rustc_legacy_const_generics(1)]
9302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9303pub fn vcvtd_n_f64_s64<const N: i32>(a: i64) -> f64 {
9304    static_assert!(N >= 1 && N <= 64);
9305    unsafe extern "unadjusted" {
9306        #[cfg_attr(
9307            any(target_arch = "aarch64", target_arch = "arm64ec"),
9308            link_name = "llvm.aarch64.neon.vcvtfxs2fp.f64.i64"
9309        )]
9310        fn _vcvtd_n_f64_s64(a: i64, n: i32) -> f64;
9311    }
9312    unsafe { _vcvtd_n_f64_s64(a, N) }
9313}
9314#[doc = "Fixed-point convert to floating-point"]
9315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"]
9316#[inline(always)]
9317#[target_feature(enable = "neon")]
9318#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9319#[rustc_legacy_const_generics(1)]
9320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9321pub fn vcvts_n_f32_u32<const N: i32>(a: u32) -> f32 {
9322    static_assert!(N >= 1 && N <= 32);
9323    unsafe extern "unadjusted" {
9324        #[cfg_attr(
9325            any(target_arch = "aarch64", target_arch = "arm64ec"),
9326            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f32.i32"
9327        )]
9328        fn _vcvts_n_f32_u32(a: u32, n: i32) -> f32;
9329    }
9330    unsafe { _vcvts_n_f32_u32(a, N) }
9331}
9332#[doc = "Fixed-point convert to floating-point"]
9333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"]
9334#[inline(always)]
9335#[target_feature(enable = "neon")]
9336#[cfg_attr(test, assert_instr(ucvtf, N = 2))]
9337#[rustc_legacy_const_generics(1)]
9338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9339pub fn vcvtd_n_f64_u64<const N: i32>(a: u64) -> f64 {
9340    static_assert!(N >= 1 && N <= 64);
9341    unsafe extern "unadjusted" {
9342        #[cfg_attr(
9343            any(target_arch = "aarch64", target_arch = "arm64ec"),
9344            link_name = "llvm.aarch64.neon.vcvtfxu2fp.f64.i64"
9345        )]
9346        fn _vcvtd_n_f64_u64(a: u64, n: i32) -> f64;
9347    }
9348    unsafe { _vcvtd_n_f64_u64(a, N) }
9349}
9350#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"]
9352#[inline(always)]
9353#[target_feature(enable = "neon")]
9354#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9355#[rustc_legacy_const_generics(1)]
9356#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9357pub fn vcvts_n_s32_f32<const N: i32>(a: f32) -> i32 {
9358    static_assert!(N >= 1 && N <= 32);
9359    unsafe extern "unadjusted" {
9360        #[cfg_attr(
9361            any(target_arch = "aarch64", target_arch = "arm64ec"),
9362            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i32.f32"
9363        )]
9364        fn _vcvts_n_s32_f32(a: f32, n: i32) -> i32;
9365    }
9366    unsafe { _vcvts_n_s32_f32(a, N) }
9367}
9368#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"]
9370#[inline(always)]
9371#[target_feature(enable = "neon")]
9372#[cfg_attr(test, assert_instr(fcvtzs, N = 2))]
9373#[rustc_legacy_const_generics(1)]
9374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9375pub fn vcvtd_n_s64_f64<const N: i32>(a: f64) -> i64 {
9376    static_assert!(N >= 1 && N <= 64);
9377    unsafe extern "unadjusted" {
9378        #[cfg_attr(
9379            any(target_arch = "aarch64", target_arch = "arm64ec"),
9380            link_name = "llvm.aarch64.neon.vcvtfp2fxs.i64.f64"
9381        )]
9382        fn _vcvtd_n_s64_f64(a: f64, n: i32) -> i64;
9383    }
9384    unsafe { _vcvtd_n_s64_f64(a, N) }
9385}
9386#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"]
9388#[inline(always)]
9389#[target_feature(enable = "neon")]
9390#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9391#[rustc_legacy_const_generics(1)]
9392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9393pub fn vcvts_n_u32_f32<const N: i32>(a: f32) -> u32 {
9394    static_assert!(N >= 1 && N <= 32);
9395    unsafe extern "unadjusted" {
9396        #[cfg_attr(
9397            any(target_arch = "aarch64", target_arch = "arm64ec"),
9398            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i32.f32"
9399        )]
9400        fn _vcvts_n_u32_f32(a: f32, n: i32) -> u32;
9401    }
9402    unsafe { _vcvts_n_u32_f32(a, N) }
9403}
9404#[doc = "Floating-point convert to fixed-point, rounding toward zero"]
9405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"]
9406#[inline(always)]
9407#[target_feature(enable = "neon")]
9408#[cfg_attr(test, assert_instr(fcvtzu, N = 2))]
9409#[rustc_legacy_const_generics(1)]
9410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9411pub fn vcvtd_n_u64_f64<const N: i32>(a: f64) -> u64 {
9412    static_assert!(N >= 1 && N <= 64);
9413    unsafe extern "unadjusted" {
9414        #[cfg_attr(
9415            any(target_arch = "aarch64", target_arch = "arm64ec"),
9416            link_name = "llvm.aarch64.neon.vcvtfp2fxu.i64.f64"
9417        )]
9418        fn _vcvtd_n_u64_f64(a: f64, n: i32) -> u64;
9419    }
9420    unsafe { _vcvtd_n_u64_f64(a, N) }
9421}
9422#[doc = "Fixed-point convert to floating-point"]
9423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"]
9424#[inline(always)]
9425#[target_feature(enable = "neon")]
9426#[cfg_attr(test, assert_instr(fcvtzs))]
9427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9428pub fn vcvts_s32_f32(a: f32) -> i32 {
9429    a as i32
9430}
9431#[doc = "Fixed-point convert to floating-point"]
9432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"]
9433#[inline(always)]
9434#[target_feature(enable = "neon")]
9435#[cfg_attr(test, assert_instr(fcvtzs))]
9436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9437pub fn vcvtd_s64_f64(a: f64) -> i64 {
9438    a as i64
9439}
9440#[doc = "Fixed-point convert to floating-point"]
9441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"]
9442#[inline(always)]
9443#[target_feature(enable = "neon")]
9444#[cfg_attr(test, assert_instr(fcvtzu))]
9445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9446pub fn vcvts_u32_f32(a: f32) -> u32 {
9447    a as u32
9448}
9449#[doc = "Fixed-point convert to floating-point"]
9450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"]
9451#[inline(always)]
9452#[target_feature(enable = "neon")]
9453#[cfg_attr(test, assert_instr(fcvtzu))]
9454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9455pub fn vcvtd_u64_f64(a: f64) -> u64 {
9456    a as u64
9457}
9458#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"]
9460#[inline(always)]
9461#[target_feature(enable = "neon")]
9462#[cfg_attr(test, assert_instr(fcvtxn))]
9463#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9464pub fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t {
9465    unsafe extern "unadjusted" {
9466        #[cfg_attr(
9467            any(target_arch = "aarch64", target_arch = "arm64ec"),
9468            link_name = "llvm.aarch64.neon.fcvtxn.v2f32.v2f64"
9469        )]
9470        fn _vcvtx_f32_f64(a: float64x2_t) -> float32x2_t;
9471    }
9472    unsafe { _vcvtx_f32_f64(a) }
9473}
9474#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"]
9476#[inline(always)]
9477#[target_feature(enable = "neon")]
9478#[cfg_attr(test, assert_instr(fcvtxn2))]
9479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9480pub fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t {
9481    unsafe { simd_shuffle!(a, vcvtx_f32_f64(b), [0, 1, 2, 3]) }
9482}
9483#[doc = "Floating-point convert to lower precision narrow, rounding to odd"]
9484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"]
9485#[inline(always)]
9486#[target_feature(enable = "neon")]
9487#[cfg_attr(test, assert_instr(fcvtxn))]
9488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9489pub fn vcvtxd_f32_f64(a: f64) -> f32 {
9490    unsafe { simd_extract!(vcvtx_f32_f64(vdupq_n_f64(a)), 0) }
9491}
9492#[doc = "Divide"]
9493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f16)"]
9494#[inline(always)]
9495#[target_feature(enable = "neon,fp16")]
9496#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9497#[cfg(not(target_arch = "arm64ec"))]
9498#[cfg_attr(test, assert_instr(fdiv))]
9499pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
9500    unsafe { simd_div(a, b) }
9501}
9502#[doc = "Divide"]
9503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f16)"]
9504#[inline(always)]
9505#[target_feature(enable = "neon,fp16")]
9506#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
9507#[cfg(not(target_arch = "arm64ec"))]
9508#[cfg_attr(test, assert_instr(fdiv))]
9509pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
9510    unsafe { simd_div(a, b) }
9511}
9512#[doc = "Divide"]
9513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"]
9514#[inline(always)]
9515#[target_feature(enable = "neon")]
9516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9517#[cfg_attr(test, assert_instr(fdiv))]
9518pub fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
9519    unsafe { simd_div(a, b) }
9520}
9521#[doc = "Divide"]
9522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"]
9523#[inline(always)]
9524#[target_feature(enable = "neon")]
9525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9526#[cfg_attr(test, assert_instr(fdiv))]
9527pub fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
9528    unsafe { simd_div(a, b) }
9529}
9530#[doc = "Divide"]
9531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"]
9532#[inline(always)]
9533#[target_feature(enable = "neon")]
9534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9535#[cfg_attr(test, assert_instr(fdiv))]
9536pub fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
9537    unsafe { simd_div(a, b) }
9538}
9539#[doc = "Divide"]
9540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"]
9541#[inline(always)]
9542#[target_feature(enable = "neon")]
9543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9544#[cfg_attr(test, assert_instr(fdiv))]
9545pub fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
9546    unsafe { simd_div(a, b) }
9547}
9548#[doc = "Divide"]
9549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivh_f16)"]
9550#[inline(always)]
9551#[target_feature(enable = "neon,fp16")]
9552#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9553#[cfg(not(target_arch = "arm64ec"))]
9554#[cfg_attr(test, assert_instr(fdiv))]
9555pub fn vdivh_f16(a: f16, b: f16) -> f16 {
9556    a / b
9557}
9558#[doc = "Set all vector lanes to the same value"]
9559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"]
9560#[inline(always)]
9561#[target_feature(enable = "neon")]
9562#[cfg_attr(test, assert_instr(nop, N = 0))]
9563#[rustc_legacy_const_generics(1)]
9564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9565pub fn vdup_lane_f64<const N: i32>(a: float64x1_t) -> float64x1_t {
9566    static_assert!(N == 0);
9567    a
9568}
9569#[doc = "Set all vector lanes to the same value"]
9570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"]
9571#[inline(always)]
9572#[target_feature(enable = "neon")]
9573#[cfg_attr(test, assert_instr(nop, N = 0))]
9574#[rustc_legacy_const_generics(1)]
9575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9576pub fn vdup_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x1_t {
9577    static_assert!(N == 0);
9578    a
9579}
9580#[doc = "Set all vector lanes to the same value"]
9581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"]
9582#[inline(always)]
9583#[target_feature(enable = "neon")]
9584#[cfg_attr(test, assert_instr(nop, N = 1))]
9585#[rustc_legacy_const_generics(1)]
9586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9587pub fn vdup_laneq_f64<const N: i32>(a: float64x2_t) -> float64x1_t {
9588    static_assert_uimm_bits!(N, 1);
9589    unsafe { transmute::<f64, _>(simd_extract!(a, N as u32)) }
9590}
9591#[doc = "Set all vector lanes to the same value"]
9592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"]
9593#[inline(always)]
9594#[target_feature(enable = "neon")]
9595#[cfg_attr(test, assert_instr(nop, N = 1))]
9596#[rustc_legacy_const_generics(1)]
9597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9598pub fn vdup_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x1_t {
9599    static_assert_uimm_bits!(N, 1);
9600    unsafe { transmute::<u64, _>(simd_extract!(a, N as u32)) }
9601}
9602#[doc = "Set all vector lanes to the same value"]
9603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"]
9604#[inline(always)]
9605#[target_feature(enable = "neon")]
9606#[cfg_attr(test, assert_instr(nop, N = 4))]
9607#[rustc_legacy_const_generics(1)]
9608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9609pub fn vdupb_lane_s8<const N: i32>(a: int8x8_t) -> i8 {
9610    static_assert_uimm_bits!(N, 3);
9611    unsafe { simd_extract!(a, N as u32) }
9612}
9613#[doc = "Set all vector lanes to the same value"]
9614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"]
9615#[inline(always)]
9616#[target_feature(enable = "neon")]
9617#[cfg_attr(test, assert_instr(nop, N = 4))]
9618#[rustc_legacy_const_generics(1)]
9619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9620pub fn vduph_laneq_s16<const N: i32>(a: int16x8_t) -> i16 {
9621    static_assert_uimm_bits!(N, 3);
9622    unsafe { simd_extract!(a, N as u32) }
9623}
9624#[doc = "Set all vector lanes to the same value"]
9625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"]
9626#[inline(always)]
9627#[target_feature(enable = "neon")]
9628#[cfg_attr(test, assert_instr(nop, N = 4))]
9629#[rustc_legacy_const_generics(1)]
9630#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9631pub fn vdupb_lane_u8<const N: i32>(a: uint8x8_t) -> u8 {
9632    static_assert_uimm_bits!(N, 3);
9633    unsafe { simd_extract!(a, N as u32) }
9634}
9635#[doc = "Set all vector lanes to the same value"]
9636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"]
9637#[inline(always)]
9638#[target_feature(enable = "neon")]
9639#[cfg_attr(test, assert_instr(nop, N = 4))]
9640#[rustc_legacy_const_generics(1)]
9641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9642pub fn vduph_laneq_u16<const N: i32>(a: uint16x8_t) -> u16 {
9643    static_assert_uimm_bits!(N, 3);
9644    unsafe { simd_extract!(a, N as u32) }
9645}
9646#[doc = "Set all vector lanes to the same value"]
9647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"]
9648#[inline(always)]
9649#[target_feature(enable = "neon")]
9650#[cfg_attr(test, assert_instr(nop, N = 4))]
9651#[rustc_legacy_const_generics(1)]
9652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9653pub fn vdupb_lane_p8<const N: i32>(a: poly8x8_t) -> p8 {
9654    static_assert_uimm_bits!(N, 3);
9655    unsafe { simd_extract!(a, N as u32) }
9656}
9657#[doc = "Set all vector lanes to the same value"]
9658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"]
9659#[inline(always)]
9660#[target_feature(enable = "neon")]
9661#[cfg_attr(test, assert_instr(nop, N = 4))]
9662#[rustc_legacy_const_generics(1)]
9663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9664pub fn vduph_laneq_p16<const N: i32>(a: poly16x8_t) -> p16 {
9665    static_assert_uimm_bits!(N, 3);
9666    unsafe { simd_extract!(a, N as u32) }
9667}
9668#[doc = "Extract an element from a vector"]
9669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"]
9670#[inline(always)]
9671#[target_feature(enable = "neon")]
9672#[cfg_attr(test, assert_instr(nop, N = 8))]
9673#[rustc_legacy_const_generics(1)]
9674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9675pub fn vdupb_laneq_s8<const N: i32>(a: int8x16_t) -> i8 {
9676    static_assert_uimm_bits!(N, 4);
9677    unsafe { simd_extract!(a, N as u32) }
9678}
9679#[doc = "Extract an element from a vector"]
9680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"]
9681#[inline(always)]
9682#[target_feature(enable = "neon")]
9683#[cfg_attr(test, assert_instr(nop, N = 8))]
9684#[rustc_legacy_const_generics(1)]
9685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9686pub fn vdupb_laneq_u8<const N: i32>(a: uint8x16_t) -> u8 {
9687    static_assert_uimm_bits!(N, 4);
9688    unsafe { simd_extract!(a, N as u32) }
9689}
9690#[doc = "Extract an element from a vector"]
9691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"]
9692#[inline(always)]
9693#[target_feature(enable = "neon")]
9694#[cfg_attr(test, assert_instr(nop, N = 8))]
9695#[rustc_legacy_const_generics(1)]
9696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9697pub fn vdupb_laneq_p8<const N: i32>(a: poly8x16_t) -> p8 {
9698    static_assert_uimm_bits!(N, 4);
9699    unsafe { simd_extract!(a, N as u32) }
9700}
9701#[doc = "Set all vector lanes to the same value"]
9702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"]
9703#[inline(always)]
9704#[target_feature(enable = "neon")]
9705#[cfg_attr(test, assert_instr(nop, N = 0))]
9706#[rustc_legacy_const_generics(1)]
9707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9708pub fn vdupd_lane_f64<const N: i32>(a: float64x1_t) -> f64 {
9709    static_assert!(N == 0);
9710    unsafe { simd_extract!(a, N as u32) }
9711}
9712#[doc = "Set all vector lanes to the same value"]
9713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"]
9714#[inline(always)]
9715#[target_feature(enable = "neon")]
9716#[cfg_attr(test, assert_instr(nop, N = 0))]
9717#[rustc_legacy_const_generics(1)]
9718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9719pub fn vdupd_lane_s64<const N: i32>(a: int64x1_t) -> i64 {
9720    static_assert!(N == 0);
9721    unsafe { simd_extract!(a, N as u32) }
9722}
9723#[doc = "Set all vector lanes to the same value"]
9724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"]
9725#[inline(always)]
9726#[target_feature(enable = "neon")]
9727#[cfg_attr(test, assert_instr(nop, N = 0))]
9728#[rustc_legacy_const_generics(1)]
9729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9730pub fn vdupd_lane_u64<const N: i32>(a: uint64x1_t) -> u64 {
9731    static_assert!(N == 0);
9732    unsafe { simd_extract!(a, N as u32) }
9733}
9734#[doc = "Set all vector lanes to the same value"]
9735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_f16)"]
9736#[inline(always)]
9737#[cfg_attr(test, assert_instr(nop, N = 2))]
9738#[rustc_legacy_const_generics(1)]
9739#[target_feature(enable = "neon,fp16")]
9740#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9741#[cfg(not(target_arch = "arm64ec"))]
9742pub fn vduph_lane_f16<const N: i32>(a: float16x4_t) -> f16 {
9743    static_assert_uimm_bits!(N, 2);
9744    unsafe { simd_extract!(a, N as u32) }
9745}
9746#[doc = "Extract an element from a vector"]
9747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_f16)"]
9748#[inline(always)]
9749#[cfg_attr(test, assert_instr(nop, N = 4))]
9750#[rustc_legacy_const_generics(1)]
9751#[target_feature(enable = "neon,fp16")]
9752#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
9753#[cfg(not(target_arch = "arm64ec"))]
9754pub fn vduph_laneq_f16<const N: i32>(a: float16x8_t) -> f16 {
9755    static_assert_uimm_bits!(N, 4);
9756    unsafe { simd_extract!(a, N as u32) }
9757}
9758#[doc = "Set all vector lanes to the same value"]
9759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"]
9760#[inline(always)]
9761#[target_feature(enable = "neon")]
9762#[cfg_attr(test, assert_instr(dup, N = 0))]
9763#[rustc_legacy_const_generics(1)]
9764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9765pub fn vdupq_lane_f64<const N: i32>(a: float64x1_t) -> float64x2_t {
9766    static_assert!(N == 0);
9767    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9768}
9769#[doc = "Set all vector lanes to the same value"]
9770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"]
9771#[inline(always)]
9772#[target_feature(enable = "neon")]
9773#[cfg_attr(test, assert_instr(dup, N = 0))]
9774#[rustc_legacy_const_generics(1)]
9775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9776pub fn vdupq_lane_p64<const N: i32>(a: poly64x1_t) -> poly64x2_t {
9777    static_assert!(N == 0);
9778    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9779}
9780#[doc = "Set all vector lanes to the same value"]
9781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"]
9782#[inline(always)]
9783#[target_feature(enable = "neon")]
9784#[cfg_attr(test, assert_instr(dup, N = 1))]
9785#[rustc_legacy_const_generics(1)]
9786#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9787pub fn vdupq_laneq_f64<const N: i32>(a: float64x2_t) -> float64x2_t {
9788    static_assert_uimm_bits!(N, 1);
9789    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9790}
9791#[doc = "Set all vector lanes to the same value"]
9792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"]
9793#[inline(always)]
9794#[target_feature(enable = "neon")]
9795#[cfg_attr(test, assert_instr(dup, N = 1))]
9796#[rustc_legacy_const_generics(1)]
9797#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9798pub fn vdupq_laneq_p64<const N: i32>(a: poly64x2_t) -> poly64x2_t {
9799    static_assert_uimm_bits!(N, 1);
9800    unsafe { simd_shuffle!(a, a, [N as u32, N as u32]) }
9801}
9802#[doc = "Set all vector lanes to the same value"]
9803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"]
9804#[inline(always)]
9805#[target_feature(enable = "neon")]
9806#[cfg_attr(test, assert_instr(nop, N = 1))]
9807#[rustc_legacy_const_generics(1)]
9808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9809pub fn vdups_lane_f32<const N: i32>(a: float32x2_t) -> f32 {
9810    static_assert_uimm_bits!(N, 1);
9811    unsafe { simd_extract!(a, N as u32) }
9812}
9813#[doc = "Set all vector lanes to the same value"]
9814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"]
9815#[inline(always)]
9816#[target_feature(enable = "neon")]
9817#[cfg_attr(test, assert_instr(nop, N = 1))]
9818#[rustc_legacy_const_generics(1)]
9819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9820pub fn vdupd_laneq_f64<const N: i32>(a: float64x2_t) -> f64 {
9821    static_assert_uimm_bits!(N, 1);
9822    unsafe { simd_extract!(a, N as u32) }
9823}
9824#[doc = "Set all vector lanes to the same value"]
9825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"]
9826#[inline(always)]
9827#[target_feature(enable = "neon")]
9828#[cfg_attr(test, assert_instr(nop, N = 1))]
9829#[rustc_legacy_const_generics(1)]
9830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9831pub fn vdups_lane_s32<const N: i32>(a: int32x2_t) -> i32 {
9832    static_assert_uimm_bits!(N, 1);
9833    unsafe { simd_extract!(a, N as u32) }
9834}
9835#[doc = "Set all vector lanes to the same value"]
9836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"]
9837#[inline(always)]
9838#[target_feature(enable = "neon")]
9839#[cfg_attr(test, assert_instr(nop, N = 1))]
9840#[rustc_legacy_const_generics(1)]
9841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9842pub fn vdupd_laneq_s64<const N: i32>(a: int64x2_t) -> i64 {
9843    static_assert_uimm_bits!(N, 1);
9844    unsafe { simd_extract!(a, N as u32) }
9845}
9846#[doc = "Set all vector lanes to the same value"]
9847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"]
9848#[inline(always)]
9849#[target_feature(enable = "neon")]
9850#[cfg_attr(test, assert_instr(nop, N = 1))]
9851#[rustc_legacy_const_generics(1)]
9852#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9853pub fn vdups_lane_u32<const N: i32>(a: uint32x2_t) -> u32 {
9854    static_assert_uimm_bits!(N, 1);
9855    unsafe { simd_extract!(a, N as u32) }
9856}
9857#[doc = "Set all vector lanes to the same value"]
9858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"]
9859#[inline(always)]
9860#[target_feature(enable = "neon")]
9861#[cfg_attr(test, assert_instr(nop, N = 1))]
9862#[rustc_legacy_const_generics(1)]
9863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9864pub fn vdupd_laneq_u64<const N: i32>(a: uint64x2_t) -> u64 {
9865    static_assert_uimm_bits!(N, 1);
9866    unsafe { simd_extract!(a, N as u32) }
9867}
9868#[doc = "Set all vector lanes to the same value"]
9869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"]
9870#[inline(always)]
9871#[target_feature(enable = "neon")]
9872#[cfg_attr(test, assert_instr(nop, N = 2))]
9873#[rustc_legacy_const_generics(1)]
9874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9875pub fn vdups_laneq_f32<const N: i32>(a: float32x4_t) -> f32 {
9876    static_assert_uimm_bits!(N, 2);
9877    unsafe { simd_extract!(a, N as u32) }
9878}
9879#[doc = "Set all vector lanes to the same value"]
9880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"]
9881#[inline(always)]
9882#[target_feature(enable = "neon")]
9883#[cfg_attr(test, assert_instr(nop, N = 2))]
9884#[rustc_legacy_const_generics(1)]
9885#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9886pub fn vduph_lane_s16<const N: i32>(a: int16x4_t) -> i16 {
9887    static_assert_uimm_bits!(N, 2);
9888    unsafe { simd_extract!(a, N as u32) }
9889}
9890#[doc = "Set all vector lanes to the same value"]
9891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"]
9892#[inline(always)]
9893#[target_feature(enable = "neon")]
9894#[cfg_attr(test, assert_instr(nop, N = 2))]
9895#[rustc_legacy_const_generics(1)]
9896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9897pub fn vdups_laneq_s32<const N: i32>(a: int32x4_t) -> i32 {
9898    static_assert_uimm_bits!(N, 2);
9899    unsafe { simd_extract!(a, N as u32) }
9900}
9901#[doc = "Set all vector lanes to the same value"]
9902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"]
9903#[inline(always)]
9904#[target_feature(enable = "neon")]
9905#[cfg_attr(test, assert_instr(nop, N = 2))]
9906#[rustc_legacy_const_generics(1)]
9907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9908pub fn vduph_lane_u16<const N: i32>(a: uint16x4_t) -> u16 {
9909    static_assert_uimm_bits!(N, 2);
9910    unsafe { simd_extract!(a, N as u32) }
9911}
9912#[doc = "Set all vector lanes to the same value"]
9913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"]
9914#[inline(always)]
9915#[target_feature(enable = "neon")]
9916#[cfg_attr(test, assert_instr(nop, N = 2))]
9917#[rustc_legacy_const_generics(1)]
9918#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9919pub fn vdups_laneq_u32<const N: i32>(a: uint32x4_t) -> u32 {
9920    static_assert_uimm_bits!(N, 2);
9921    unsafe { simd_extract!(a, N as u32) }
9922}
9923#[doc = "Set all vector lanes to the same value"]
9924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"]
9925#[inline(always)]
9926#[target_feature(enable = "neon")]
9927#[cfg_attr(test, assert_instr(nop, N = 2))]
9928#[rustc_legacy_const_generics(1)]
9929#[stable(feature = "neon_intrinsics", since = "1.59.0")]
9930pub fn vduph_lane_p16<const N: i32>(a: poly16x4_t) -> p16 {
9931    static_assert_uimm_bits!(N, 2);
9932    unsafe { simd_extract!(a, N as u32) }
9933}
9934#[doc = "Three-way exclusive OR"]
9935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"]
9936#[inline(always)]
9937#[target_feature(enable = "neon,sha3")]
9938#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9939#[cfg_attr(test, assert_instr(eor3))]
9940pub fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t {
9941    unsafe extern "unadjusted" {
9942        #[cfg_attr(
9943            any(target_arch = "aarch64", target_arch = "arm64ec"),
9944            link_name = "llvm.aarch64.crypto.eor3s.v16i8"
9945        )]
9946        fn _veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t;
9947    }
9948    unsafe { _veor3q_s8(a, b, c) }
9949}
9950#[doc = "Three-way exclusive OR"]
9951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"]
9952#[inline(always)]
9953#[target_feature(enable = "neon,sha3")]
9954#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9955#[cfg_attr(test, assert_instr(eor3))]
9956pub fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
9957    unsafe extern "unadjusted" {
9958        #[cfg_attr(
9959            any(target_arch = "aarch64", target_arch = "arm64ec"),
9960            link_name = "llvm.aarch64.crypto.eor3s.v8i16"
9961        )]
9962        fn _veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
9963    }
9964    unsafe { _veor3q_s16(a, b, c) }
9965}
9966#[doc = "Three-way exclusive OR"]
9967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"]
9968#[inline(always)]
9969#[target_feature(enable = "neon,sha3")]
9970#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9971#[cfg_attr(test, assert_instr(eor3))]
9972pub fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
9973    unsafe extern "unadjusted" {
9974        #[cfg_attr(
9975            any(target_arch = "aarch64", target_arch = "arm64ec"),
9976            link_name = "llvm.aarch64.crypto.eor3s.v4i32"
9977        )]
9978        fn _veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
9979    }
9980    unsafe { _veor3q_s32(a, b, c) }
9981}
9982#[doc = "Three-way exclusive OR"]
9983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"]
9984#[inline(always)]
9985#[target_feature(enable = "neon,sha3")]
9986#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
9987#[cfg_attr(test, assert_instr(eor3))]
9988pub fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t {
9989    unsafe extern "unadjusted" {
9990        #[cfg_attr(
9991            any(target_arch = "aarch64", target_arch = "arm64ec"),
9992            link_name = "llvm.aarch64.crypto.eor3s.v2i64"
9993        )]
9994        fn _veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t;
9995    }
9996    unsafe { _veor3q_s64(a, b, c) }
9997}
9998#[doc = "Three-way exclusive OR"]
9999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"]
10000#[inline(always)]
10001#[target_feature(enable = "neon,sha3")]
10002#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10003#[cfg_attr(test, assert_instr(eor3))]
10004pub fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
10005    unsafe extern "unadjusted" {
10006        #[cfg_attr(
10007            any(target_arch = "aarch64", target_arch = "arm64ec"),
10008            link_name = "llvm.aarch64.crypto.eor3u.v16i8"
10009        )]
10010        fn _veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t;
10011    }
10012    unsafe { _veor3q_u8(a, b, c) }
10013}
10014#[doc = "Three-way exclusive OR"]
10015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"]
10016#[inline(always)]
10017#[target_feature(enable = "neon,sha3")]
10018#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10019#[cfg_attr(test, assert_instr(eor3))]
10020pub fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t {
10021    unsafe extern "unadjusted" {
10022        #[cfg_attr(
10023            any(target_arch = "aarch64", target_arch = "arm64ec"),
10024            link_name = "llvm.aarch64.crypto.eor3u.v8i16"
10025        )]
10026        fn _veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t;
10027    }
10028    unsafe { _veor3q_u16(a, b, c) }
10029}
10030#[doc = "Three-way exclusive OR"]
10031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"]
10032#[inline(always)]
10033#[target_feature(enable = "neon,sha3")]
10034#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10035#[cfg_attr(test, assert_instr(eor3))]
10036pub fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
10037    unsafe extern "unadjusted" {
10038        #[cfg_attr(
10039            any(target_arch = "aarch64", target_arch = "arm64ec"),
10040            link_name = "llvm.aarch64.crypto.eor3u.v4i32"
10041        )]
10042        fn _veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
10043    }
10044    unsafe { _veor3q_u32(a, b, c) }
10045}
10046#[doc = "Three-way exclusive OR"]
10047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"]
10048#[inline(always)]
10049#[target_feature(enable = "neon,sha3")]
10050#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
10051#[cfg_attr(test, assert_instr(eor3))]
10052pub fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
10053    unsafe extern "unadjusted" {
10054        #[cfg_attr(
10055            any(target_arch = "aarch64", target_arch = "arm64ec"),
10056            link_name = "llvm.aarch64.crypto.eor3u.v2i64"
10057        )]
10058        fn _veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
10059    }
10060    unsafe { _veor3q_u64(a, b, c) }
10061}
10062#[doc = "Extract vector from pair of vectors"]
10063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"]
10064#[inline(always)]
10065#[target_feature(enable = "neon")]
10066#[cfg_attr(test, assert_instr(ext, N = 1))]
10067#[rustc_legacy_const_generics(2)]
10068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10069pub fn vextq_f64<const N: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
10070    static_assert_uimm_bits!(N, 1);
10071    unsafe {
10072        match N & 0b1 {
10073            0 => simd_shuffle!(a, b, [0, 1]),
10074            1 => simd_shuffle!(a, b, [1, 2]),
10075            _ => unreachable_unchecked(),
10076        }
10077    }
10078}
10079#[doc = "Extract vector from pair of vectors"]
10080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"]
10081#[inline(always)]
10082#[target_feature(enable = "neon")]
10083#[cfg_attr(test, assert_instr(ext, N = 1))]
10084#[rustc_legacy_const_generics(2)]
10085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10086pub fn vextq_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
10087    static_assert_uimm_bits!(N, 1);
10088    unsafe {
10089        match N & 0b1 {
10090            0 => simd_shuffle!(a, b, [0, 1]),
10091            1 => simd_shuffle!(a, b, [1, 2]),
10092            _ => unreachable_unchecked(),
10093        }
10094    }
10095}
10096#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"]
10098#[inline(always)]
10099#[target_feature(enable = "neon")]
10100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10101#[cfg_attr(test, assert_instr(fmadd))]
10102pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10103    unsafe { simd_fma(b, c, a) }
10104}
10105#[doc = "Floating-point fused multiply-add to accumulator"]
10106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f16)"]
10107#[inline(always)]
10108#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10109#[rustc_legacy_const_generics(3)]
10110#[target_feature(enable = "neon,fp16")]
10111#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10112#[cfg(not(target_arch = "arm64ec"))]
10113pub fn vfma_lane_f16<const LANE: i32>(
10114    a: float16x4_t,
10115    b: float16x4_t,
10116    c: float16x4_t,
10117) -> float16x4_t {
10118    static_assert_uimm_bits!(LANE, 2);
10119    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10120}
10121#[doc = "Floating-point fused multiply-add to accumulator"]
10122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f16)"]
10123#[inline(always)]
10124#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10125#[rustc_legacy_const_generics(3)]
10126#[target_feature(enable = "neon,fp16")]
10127#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10128#[cfg(not(target_arch = "arm64ec"))]
10129pub fn vfma_laneq_f16<const LANE: i32>(
10130    a: float16x4_t,
10131    b: float16x4_t,
10132    c: float16x8_t,
10133) -> float16x4_t {
10134    static_assert_uimm_bits!(LANE, 3);
10135    unsafe { vfma_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10136}
10137#[doc = "Floating-point fused multiply-add to accumulator"]
10138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f16)"]
10139#[inline(always)]
10140#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10141#[rustc_legacy_const_generics(3)]
10142#[target_feature(enable = "neon,fp16")]
10143#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10144#[cfg(not(target_arch = "arm64ec"))]
10145pub fn vfmaq_lane_f16<const LANE: i32>(
10146    a: float16x8_t,
10147    b: float16x8_t,
10148    c: float16x4_t,
10149) -> float16x8_t {
10150    static_assert_uimm_bits!(LANE, 2);
10151    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10152}
10153#[doc = "Floating-point fused multiply-add to accumulator"]
10154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f16)"]
10155#[inline(always)]
10156#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10157#[rustc_legacy_const_generics(3)]
10158#[target_feature(enable = "neon,fp16")]
10159#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10160#[cfg(not(target_arch = "arm64ec"))]
10161pub fn vfmaq_laneq_f16<const LANE: i32>(
10162    a: float16x8_t,
10163    b: float16x8_t,
10164    c: float16x8_t,
10165) -> float16x8_t {
10166    static_assert_uimm_bits!(LANE, 3);
10167    unsafe { vfmaq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10168}
10169#[doc = "Floating-point fused multiply-add to accumulator"]
10170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"]
10171#[inline(always)]
10172#[target_feature(enable = "neon")]
10173#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10174#[rustc_legacy_const_generics(3)]
10175#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10176pub fn vfma_lane_f32<const LANE: i32>(
10177    a: float32x2_t,
10178    b: float32x2_t,
10179    c: float32x2_t,
10180) -> float32x2_t {
10181    static_assert_uimm_bits!(LANE, 1);
10182    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10183}
10184#[doc = "Floating-point fused multiply-add to accumulator"]
10185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"]
10186#[inline(always)]
10187#[target_feature(enable = "neon")]
10188#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10189#[rustc_legacy_const_generics(3)]
10190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10191pub fn vfma_laneq_f32<const LANE: i32>(
10192    a: float32x2_t,
10193    b: float32x2_t,
10194    c: float32x4_t,
10195) -> float32x2_t {
10196    static_assert_uimm_bits!(LANE, 2);
10197    unsafe { vfma_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10198}
10199#[doc = "Floating-point fused multiply-add to accumulator"]
10200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"]
10201#[inline(always)]
10202#[target_feature(enable = "neon")]
10203#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10204#[rustc_legacy_const_generics(3)]
10205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10206pub fn vfmaq_lane_f32<const LANE: i32>(
10207    a: float32x4_t,
10208    b: float32x4_t,
10209    c: float32x2_t,
10210) -> float32x4_t {
10211    static_assert_uimm_bits!(LANE, 1);
10212    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10213}
10214#[doc = "Floating-point fused multiply-add to accumulator"]
10215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"]
10216#[inline(always)]
10217#[target_feature(enable = "neon")]
10218#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10219#[rustc_legacy_const_generics(3)]
10220#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10221pub fn vfmaq_laneq_f32<const LANE: i32>(
10222    a: float32x4_t,
10223    b: float32x4_t,
10224    c: float32x4_t,
10225) -> float32x4_t {
10226    static_assert_uimm_bits!(LANE, 2);
10227    unsafe { vfmaq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10228}
10229#[doc = "Floating-point fused multiply-add to accumulator"]
10230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"]
10231#[inline(always)]
10232#[target_feature(enable = "neon")]
10233#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10234#[rustc_legacy_const_generics(3)]
10235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10236pub fn vfmaq_laneq_f64<const LANE: i32>(
10237    a: float64x2_t,
10238    b: float64x2_t,
10239    c: float64x2_t,
10240) -> float64x2_t {
10241    static_assert_uimm_bits!(LANE, 1);
10242    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10243}
10244#[doc = "Floating-point fused multiply-add to accumulator"]
10245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"]
10246#[inline(always)]
10247#[target_feature(enable = "neon")]
10248#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10249#[rustc_legacy_const_generics(3)]
10250#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10251pub fn vfma_lane_f64<const LANE: i32>(
10252    a: float64x1_t,
10253    b: float64x1_t,
10254    c: float64x1_t,
10255) -> float64x1_t {
10256    static_assert!(LANE == 0);
10257    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10258}
10259#[doc = "Floating-point fused multiply-add to accumulator"]
10260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"]
10261#[inline(always)]
10262#[target_feature(enable = "neon")]
10263#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10264#[rustc_legacy_const_generics(3)]
10265#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10266pub fn vfma_laneq_f64<const LANE: i32>(
10267    a: float64x1_t,
10268    b: float64x1_t,
10269    c: float64x2_t,
10270) -> float64x1_t {
10271    static_assert_uimm_bits!(LANE, 1);
10272    unsafe { vfma_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
10273}
10274#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f16)"]
10276#[inline(always)]
10277#[target_feature(enable = "neon,fp16")]
10278#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10279#[cfg(not(target_arch = "arm64ec"))]
10280#[cfg_attr(test, assert_instr(fmla))]
10281pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
10282    vfma_f16(a, b, vdup_n_f16(c))
10283}
10284#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
10285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f16)"]
10286#[inline(always)]
10287#[target_feature(enable = "neon,fp16")]
10288#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10289#[cfg(not(target_arch = "arm64ec"))]
10290#[cfg_attr(test, assert_instr(fmla))]
10291pub fn vfmaq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
10292    vfmaq_f16(a, b, vdupq_n_f16(c))
10293}
10294#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"]
10296#[inline(always)]
10297#[target_feature(enable = "neon")]
10298#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10299#[cfg_attr(test, assert_instr(fmadd))]
10300pub fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
10301    vfma_f64(a, b, vdup_n_f64(c))
10302}
10303#[doc = "Floating-point fused multiply-add to accumulator"]
10304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"]
10305#[inline(always)]
10306#[target_feature(enable = "neon")]
10307#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10308#[rustc_legacy_const_generics(3)]
10309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10310pub fn vfmad_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
10311    static_assert!(LANE == 0);
10312    unsafe {
10313        let c: f64 = simd_extract!(c, LANE as u32);
10314        fmaf64(b, c, a)
10315    }
10316}
10317#[doc = "Floating-point fused multiply-add to accumulator"]
10318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_f16)"]
10319#[inline(always)]
10320#[cfg_attr(test, assert_instr(fmadd))]
10321#[target_feature(enable = "neon,fp16")]
10322#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10323#[cfg(not(target_arch = "arm64ec"))]
10324pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 {
10325    fmaf16(b, c, a)
10326}
10327#[doc = "Floating-point fused multiply-add to accumulator"]
10328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_lane_f16)"]
10329#[inline(always)]
10330#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10331#[rustc_legacy_const_generics(3)]
10332#[target_feature(enable = "neon,fp16")]
10333#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10334#[cfg(not(target_arch = "arm64ec"))]
10335pub fn vfmah_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
10336    static_assert_uimm_bits!(LANE, 2);
10337    unsafe {
10338        let c: f16 = simd_extract!(v, LANE as u32);
10339        vfmah_f16(a, b, c)
10340    }
10341}
10342#[doc = "Floating-point fused multiply-add to accumulator"]
10343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_laneq_f16)"]
10344#[inline(always)]
10345#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10346#[rustc_legacy_const_generics(3)]
10347#[target_feature(enable = "neon,fp16")]
10348#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
10349#[cfg(not(target_arch = "arm64ec"))]
10350pub fn vfmah_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
10351    static_assert_uimm_bits!(LANE, 3);
10352    unsafe {
10353        let c: f16 = simd_extract!(v, LANE as u32);
10354        vfmah_f16(a, b, c)
10355    }
10356}
10357#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"]
10359#[inline(always)]
10360#[target_feature(enable = "neon")]
10361#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10362#[cfg_attr(test, assert_instr(fmla))]
10363pub fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
10364    unsafe { simd_fma(b, c, a) }
10365}
10366#[doc = "Floating-point fused multiply-add to accumulator"]
10367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"]
10368#[inline(always)]
10369#[target_feature(enable = "neon")]
10370#[cfg_attr(test, assert_instr(fmla, LANE = 0))]
10371#[rustc_legacy_const_generics(3)]
10372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10373pub fn vfmaq_lane_f64<const LANE: i32>(
10374    a: float64x2_t,
10375    b: float64x2_t,
10376    c: float64x1_t,
10377) -> float64x2_t {
10378    static_assert!(LANE == 0);
10379    unsafe { vfmaq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10380}
10381#[doc = "Floating-point fused Multiply-Add to accumulator(vector)"]
10382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"]
10383#[inline(always)]
10384#[target_feature(enable = "neon")]
10385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10386#[cfg_attr(test, assert_instr(fmla))]
10387pub fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
10388    vfmaq_f64(a, b, vdupq_n_f64(c))
10389}
10390#[doc = "Floating-point fused multiply-add to accumulator"]
10391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"]
10392#[inline(always)]
10393#[target_feature(enable = "neon")]
10394#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10395#[rustc_legacy_const_generics(3)]
10396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10397pub fn vfmas_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
10398    static_assert_uimm_bits!(LANE, 1);
10399    unsafe {
10400        let c: f32 = simd_extract!(c, LANE as u32);
10401        fmaf32(b, c, a)
10402    }
10403}
10404#[doc = "Floating-point fused multiply-add to accumulator"]
10405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"]
10406#[inline(always)]
10407#[target_feature(enable = "neon")]
10408#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10409#[rustc_legacy_const_generics(3)]
10410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10411pub fn vfmas_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
10412    static_assert_uimm_bits!(LANE, 2);
10413    unsafe {
10414        let c: f32 = simd_extract!(c, LANE as u32);
10415        fmaf32(b, c, a)
10416    }
10417}
10418#[doc = "Floating-point fused multiply-add to accumulator"]
10419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"]
10420#[inline(always)]
10421#[target_feature(enable = "neon")]
10422#[cfg_attr(test, assert_instr(fmadd, LANE = 0))]
10423#[rustc_legacy_const_generics(3)]
10424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10425pub fn vfmad_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
10426    static_assert_uimm_bits!(LANE, 1);
10427    unsafe {
10428        let c: f64 = simd_extract!(c, LANE as u32);
10429        fmaf64(b, c, a)
10430    }
10431}
10432#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"]
10434#[inline(always)]
10435#[target_feature(enable = "neon,fp16")]
10436#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10437#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10438#[cfg(not(target_arch = "arm64ec"))]
10439#[cfg_attr(test, assert_instr(fmlal2))]
10440pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10441    unsafe extern "unadjusted" {
10442        #[cfg_attr(
10443            any(target_arch = "aarch64", target_arch = "arm64ec"),
10444            link_name = "llvm.aarch64.neon.fmlal2.v2f32.v4f16"
10445        )]
10446        fn _vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10447    }
10448    unsafe { _vfmlal_high_f16(r, a, b) }
10449}
10450#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"]
10452#[inline(always)]
10453#[target_feature(enable = "neon,fp16")]
10454#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10455#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10456#[cfg(not(target_arch = "arm64ec"))]
10457#[cfg_attr(test, assert_instr(fmlal2))]
10458pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10459    unsafe extern "unadjusted" {
10460        #[cfg_attr(
10461            any(target_arch = "aarch64", target_arch = "arm64ec"),
10462            link_name = "llvm.aarch64.neon.fmlal2.v4f32.v8f16"
10463        )]
10464        fn _vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10465    }
10466    unsafe { _vfmlalq_high_f16(r, a, b) }
10467}
10468#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_high_f16)"]
10470#[inline(always)]
10471#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10472#[target_feature(enable = "neon,fp16")]
10473#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10474#[rustc_legacy_const_generics(3)]
10475#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10476#[cfg(not(target_arch = "arm64ec"))]
10477pub fn vfmlal_lane_high_f16<const LANE: i32>(
10478    r: float32x2_t,
10479    a: float16x4_t,
10480    b: float16x4_t,
10481) -> float32x2_t {
10482    static_assert_uimm_bits!(LANE, 2);
10483    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10484}
10485#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_high_f16)"]
10487#[inline(always)]
10488#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10489#[target_feature(enable = "neon,fp16")]
10490#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10491#[rustc_legacy_const_generics(3)]
10492#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10493#[cfg(not(target_arch = "arm64ec"))]
10494pub fn vfmlal_laneq_high_f16<const LANE: i32>(
10495    r: float32x2_t,
10496    a: float16x4_t,
10497    b: float16x8_t,
10498) -> float32x2_t {
10499    static_assert_uimm_bits!(LANE, 3);
10500    unsafe { vfmlal_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10501}
10502#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_high_f16)"]
10504#[inline(always)]
10505#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10506#[target_feature(enable = "neon,fp16")]
10507#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10508#[rustc_legacy_const_generics(3)]
10509#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10510#[cfg(not(target_arch = "arm64ec"))]
10511pub fn vfmlalq_lane_high_f16<const LANE: i32>(
10512    r: float32x4_t,
10513    a: float16x8_t,
10514    b: float16x4_t,
10515) -> float32x4_t {
10516    static_assert_uimm_bits!(LANE, 2);
10517    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10518}
10519#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_high_f16)"]
10521#[inline(always)]
10522#[cfg_attr(test, assert_instr(fmlal2, LANE = 0))]
10523#[target_feature(enable = "neon,fp16")]
10524#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10525#[rustc_legacy_const_generics(3)]
10526#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10527#[cfg(not(target_arch = "arm64ec"))]
10528pub fn vfmlalq_laneq_high_f16<const LANE: i32>(
10529    r: float32x4_t,
10530    a: float16x8_t,
10531    b: float16x8_t,
10532) -> float32x4_t {
10533    static_assert_uimm_bits!(LANE, 3);
10534    unsafe { vfmlalq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10535}
10536#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_low_f16)"]
10538#[inline(always)]
10539#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10540#[target_feature(enable = "neon,fp16")]
10541#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10542#[rustc_legacy_const_generics(3)]
10543#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10544#[cfg(not(target_arch = "arm64ec"))]
10545pub fn vfmlal_lane_low_f16<const LANE: i32>(
10546    r: float32x2_t,
10547    a: float16x4_t,
10548    b: float16x4_t,
10549) -> float32x2_t {
10550    static_assert_uimm_bits!(LANE, 2);
10551    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10552}
10553#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_low_f16)"]
10555#[inline(always)]
10556#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10557#[target_feature(enable = "neon,fp16")]
10558#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10559#[rustc_legacy_const_generics(3)]
10560#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10561#[cfg(not(target_arch = "arm64ec"))]
10562pub fn vfmlal_laneq_low_f16<const LANE: i32>(
10563    r: float32x2_t,
10564    a: float16x4_t,
10565    b: float16x8_t,
10566) -> float32x2_t {
10567    static_assert_uimm_bits!(LANE, 3);
10568    unsafe { vfmlal_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10569}
10570#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_low_f16)"]
10572#[inline(always)]
10573#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10574#[target_feature(enable = "neon,fp16")]
10575#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10576#[rustc_legacy_const_generics(3)]
10577#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10578#[cfg(not(target_arch = "arm64ec"))]
10579pub fn vfmlalq_lane_low_f16<const LANE: i32>(
10580    r: float32x4_t,
10581    a: float16x8_t,
10582    b: float16x4_t,
10583) -> float32x4_t {
10584    static_assert_uimm_bits!(LANE, 2);
10585    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10586}
10587#[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."]
10588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_low_f16)"]
10589#[inline(always)]
10590#[cfg_attr(test, assert_instr(fmlal, LANE = 0))]
10591#[target_feature(enable = "neon,fp16")]
10592#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10593#[rustc_legacy_const_generics(3)]
10594#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10595#[cfg(not(target_arch = "arm64ec"))]
10596pub fn vfmlalq_laneq_low_f16<const LANE: i32>(
10597    r: float32x4_t,
10598    a: float16x8_t,
10599    b: float16x8_t,
10600) -> float32x4_t {
10601    static_assert_uimm_bits!(LANE, 3);
10602    unsafe { vfmlalq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10603}
10604#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"]
10606#[inline(always)]
10607#[target_feature(enable = "neon,fp16")]
10608#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10609#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10610#[cfg(not(target_arch = "arm64ec"))]
10611#[cfg_attr(test, assert_instr(fmlal))]
10612pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10613    unsafe extern "unadjusted" {
10614        #[cfg_attr(
10615            any(target_arch = "aarch64", target_arch = "arm64ec"),
10616            link_name = "llvm.aarch64.neon.fmlal.v2f32.v4f16"
10617        )]
10618        fn _vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10619    }
10620    unsafe { _vfmlal_low_f16(r, a, b) }
10621}
10622#[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."]
10623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"]
10624#[inline(always)]
10625#[target_feature(enable = "neon,fp16")]
10626#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10627#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10628#[cfg(not(target_arch = "arm64ec"))]
10629#[cfg_attr(test, assert_instr(fmlal))]
10630pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10631    unsafe extern "unadjusted" {
10632        #[cfg_attr(
10633            any(target_arch = "aarch64", target_arch = "arm64ec"),
10634            link_name = "llvm.aarch64.neon.fmlal.v4f32.v8f16"
10635        )]
10636        fn _vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10637    }
10638    unsafe { _vfmlalq_low_f16(r, a, b) }
10639}
10640#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"]
10642#[inline(always)]
10643#[target_feature(enable = "neon,fp16")]
10644#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10645#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10646#[cfg(not(target_arch = "arm64ec"))]
10647#[cfg_attr(test, assert_instr(fmlsl2))]
10648pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10649    unsafe extern "unadjusted" {
10650        #[cfg_attr(
10651            any(target_arch = "aarch64", target_arch = "arm64ec"),
10652            link_name = "llvm.aarch64.neon.fmlsl2.v2f32.v4f16"
10653        )]
10654        fn _vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10655    }
10656    unsafe { _vfmlsl_high_f16(r, a, b) }
10657}
10658#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"]
10660#[inline(always)]
10661#[target_feature(enable = "neon,fp16")]
10662#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10663#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10664#[cfg(not(target_arch = "arm64ec"))]
10665#[cfg_attr(test, assert_instr(fmlsl2))]
10666pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10667    unsafe extern "unadjusted" {
10668        #[cfg_attr(
10669            any(target_arch = "aarch64", target_arch = "arm64ec"),
10670            link_name = "llvm.aarch64.neon.fmlsl2.v4f32.v8f16"
10671        )]
10672        fn _vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10673    }
10674    unsafe { _vfmlslq_high_f16(r, a, b) }
10675}
10676#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_high_f16)"]
10678#[inline(always)]
10679#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10680#[target_feature(enable = "neon,fp16")]
10681#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10682#[rustc_legacy_const_generics(3)]
10683#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10684#[cfg(not(target_arch = "arm64ec"))]
10685pub fn vfmlsl_lane_high_f16<const LANE: i32>(
10686    r: float32x2_t,
10687    a: float16x4_t,
10688    b: float16x4_t,
10689) -> float32x2_t {
10690    static_assert_uimm_bits!(LANE, 2);
10691    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10692}
10693#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_high_f16)"]
10695#[inline(always)]
10696#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10697#[target_feature(enable = "neon,fp16")]
10698#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10699#[rustc_legacy_const_generics(3)]
10700#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10701#[cfg(not(target_arch = "arm64ec"))]
10702pub fn vfmlsl_laneq_high_f16<const LANE: i32>(
10703    r: float32x2_t,
10704    a: float16x4_t,
10705    b: float16x8_t,
10706) -> float32x2_t {
10707    static_assert_uimm_bits!(LANE, 3);
10708    unsafe { vfmlsl_high_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10709}
10710#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_high_f16)"]
10712#[inline(always)]
10713#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10714#[target_feature(enable = "neon,fp16")]
10715#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10716#[rustc_legacy_const_generics(3)]
10717#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10718#[cfg(not(target_arch = "arm64ec"))]
10719pub fn vfmlslq_lane_high_f16<const LANE: i32>(
10720    r: float32x4_t,
10721    a: float16x8_t,
10722    b: float16x4_t,
10723) -> float32x4_t {
10724    static_assert_uimm_bits!(LANE, 2);
10725    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10726}
10727#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_high_f16)"]
10729#[inline(always)]
10730#[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))]
10731#[target_feature(enable = "neon,fp16")]
10732#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10733#[rustc_legacy_const_generics(3)]
10734#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10735#[cfg(not(target_arch = "arm64ec"))]
10736pub fn vfmlslq_laneq_high_f16<const LANE: i32>(
10737    r: float32x4_t,
10738    a: float16x8_t,
10739    b: float16x8_t,
10740) -> float32x4_t {
10741    static_assert_uimm_bits!(LANE, 3);
10742    unsafe { vfmlslq_high_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10743}
10744#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_low_f16)"]
10746#[inline(always)]
10747#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10748#[target_feature(enable = "neon,fp16")]
10749#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10750#[rustc_legacy_const_generics(3)]
10751#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10752#[cfg(not(target_arch = "arm64ec"))]
10753pub fn vfmlsl_lane_low_f16<const LANE: i32>(
10754    r: float32x2_t,
10755    a: float16x4_t,
10756    b: float16x4_t,
10757) -> float32x2_t {
10758    static_assert_uimm_bits!(LANE, 2);
10759    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10760}
10761#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_low_f16)"]
10763#[inline(always)]
10764#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10765#[target_feature(enable = "neon,fp16")]
10766#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10767#[rustc_legacy_const_generics(3)]
10768#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10769#[cfg(not(target_arch = "arm64ec"))]
10770pub fn vfmlsl_laneq_low_f16<const LANE: i32>(
10771    r: float32x2_t,
10772    a: float16x4_t,
10773    b: float16x8_t,
10774) -> float32x2_t {
10775    static_assert_uimm_bits!(LANE, 3);
10776    unsafe { vfmlsl_low_f16(r, a, vdup_n_f16(simd_extract!(b, LANE as u32))) }
10777}
10778#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_low_f16)"]
10780#[inline(always)]
10781#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10782#[target_feature(enable = "neon,fp16")]
10783#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10784#[rustc_legacy_const_generics(3)]
10785#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10786#[cfg(not(target_arch = "arm64ec"))]
10787pub fn vfmlslq_lane_low_f16<const LANE: i32>(
10788    r: float32x4_t,
10789    a: float16x8_t,
10790    b: float16x4_t,
10791) -> float32x4_t {
10792    static_assert_uimm_bits!(LANE, 2);
10793    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10794}
10795#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."]
10796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_low_f16)"]
10797#[inline(always)]
10798#[cfg_attr(test, assert_instr(fmlsl, LANE = 0))]
10799#[target_feature(enable = "neon,fp16")]
10800#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10801#[rustc_legacy_const_generics(3)]
10802#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10803#[cfg(not(target_arch = "arm64ec"))]
10804pub fn vfmlslq_laneq_low_f16<const LANE: i32>(
10805    r: float32x4_t,
10806    a: float16x8_t,
10807    b: float16x8_t,
10808) -> float32x4_t {
10809    static_assert_uimm_bits!(LANE, 3);
10810    unsafe { vfmlslq_low_f16(r, a, vdupq_n_f16(simd_extract!(b, LANE as u32))) }
10811}
10812#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"]
10814#[inline(always)]
10815#[target_feature(enable = "neon,fp16")]
10816#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10817#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10818#[cfg(not(target_arch = "arm64ec"))]
10819#[cfg_attr(test, assert_instr(fmlsl))]
10820pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t {
10821    unsafe extern "unadjusted" {
10822        #[cfg_attr(
10823            any(target_arch = "aarch64", target_arch = "arm64ec"),
10824            link_name = "llvm.aarch64.neon.fmlsl.v2f32.v4f16"
10825        )]
10826        fn _vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32x2_t;
10827    }
10828    unsafe { _vfmlsl_low_f16(r, a, b) }
10829}
10830#[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."]
10831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"]
10832#[inline(always)]
10833#[target_feature(enable = "neon,fp16")]
10834#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))]
10835#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10836#[cfg(not(target_arch = "arm64ec"))]
10837#[cfg_attr(test, assert_instr(fmlsl))]
10838pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t {
10839    unsafe extern "unadjusted" {
10840        #[cfg_attr(
10841            any(target_arch = "aarch64", target_arch = "arm64ec"),
10842            link_name = "llvm.aarch64.neon.fmlsl.v4f32.v8f16"
10843        )]
10844        fn _vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float32x4_t;
10845    }
10846    unsafe { _vfmlslq_low_f16(r, a, b) }
10847}
10848#[doc = "Floating-point fused multiply-subtract from accumulator"]
10849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"]
10850#[inline(always)]
10851#[target_feature(enable = "neon")]
10852#[cfg_attr(test, assert_instr(fmsub))]
10853#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10854pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
10855    unsafe {
10856        let b: float64x1_t = simd_neg(b);
10857        vfma_f64(a, b, c)
10858    }
10859}
10860#[doc = "Floating-point fused multiply-subtract from accumulator"]
10861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f16)"]
10862#[inline(always)]
10863#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10864#[rustc_legacy_const_generics(3)]
10865#[target_feature(enable = "neon,fp16")]
10866#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10867#[cfg(not(target_arch = "arm64ec"))]
10868pub fn vfms_lane_f16<const LANE: i32>(
10869    a: float16x4_t,
10870    b: float16x4_t,
10871    c: float16x4_t,
10872) -> float16x4_t {
10873    static_assert_uimm_bits!(LANE, 2);
10874    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10875}
10876#[doc = "Floating-point fused multiply-subtract from accumulator"]
10877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f16)"]
10878#[inline(always)]
10879#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10880#[rustc_legacy_const_generics(3)]
10881#[target_feature(enable = "neon,fp16")]
10882#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10883#[cfg(not(target_arch = "arm64ec"))]
10884pub fn vfms_laneq_f16<const LANE: i32>(
10885    a: float16x4_t,
10886    b: float16x4_t,
10887    c: float16x8_t,
10888) -> float16x4_t {
10889    static_assert_uimm_bits!(LANE, 3);
10890    unsafe { vfms_f16(a, b, vdup_n_f16(simd_extract!(c, LANE as u32))) }
10891}
10892#[doc = "Floating-point fused multiply-subtract from accumulator"]
10893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f16)"]
10894#[inline(always)]
10895#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10896#[rustc_legacy_const_generics(3)]
10897#[target_feature(enable = "neon,fp16")]
10898#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10899#[cfg(not(target_arch = "arm64ec"))]
10900pub fn vfmsq_lane_f16<const LANE: i32>(
10901    a: float16x8_t,
10902    b: float16x8_t,
10903    c: float16x4_t,
10904) -> float16x8_t {
10905    static_assert_uimm_bits!(LANE, 2);
10906    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10907}
10908#[doc = "Floating-point fused multiply-subtract from accumulator"]
10909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f16)"]
10910#[inline(always)]
10911#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10912#[rustc_legacy_const_generics(3)]
10913#[target_feature(enable = "neon,fp16")]
10914#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
10915#[cfg(not(target_arch = "arm64ec"))]
10916pub fn vfmsq_laneq_f16<const LANE: i32>(
10917    a: float16x8_t,
10918    b: float16x8_t,
10919    c: float16x8_t,
10920) -> float16x8_t {
10921    static_assert_uimm_bits!(LANE, 3);
10922    unsafe { vfmsq_f16(a, b, vdupq_n_f16(simd_extract!(c, LANE as u32))) }
10923}
10924#[doc = "Floating-point fused multiply-subtract to accumulator"]
10925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"]
10926#[inline(always)]
10927#[target_feature(enable = "neon")]
10928#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10929#[rustc_legacy_const_generics(3)]
10930#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10931pub fn vfms_lane_f32<const LANE: i32>(
10932    a: float32x2_t,
10933    b: float32x2_t,
10934    c: float32x2_t,
10935) -> float32x2_t {
10936    static_assert_uimm_bits!(LANE, 1);
10937    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10938}
10939#[doc = "Floating-point fused multiply-subtract to accumulator"]
10940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"]
10941#[inline(always)]
10942#[target_feature(enable = "neon")]
10943#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10944#[rustc_legacy_const_generics(3)]
10945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10946pub fn vfms_laneq_f32<const LANE: i32>(
10947    a: float32x2_t,
10948    b: float32x2_t,
10949    c: float32x4_t,
10950) -> float32x2_t {
10951    static_assert_uimm_bits!(LANE, 2);
10952    unsafe { vfms_f32(a, b, vdup_n_f32(simd_extract!(c, LANE as u32))) }
10953}
10954#[doc = "Floating-point fused multiply-subtract to accumulator"]
10955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"]
10956#[inline(always)]
10957#[target_feature(enable = "neon")]
10958#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10959#[rustc_legacy_const_generics(3)]
10960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10961pub fn vfmsq_lane_f32<const LANE: i32>(
10962    a: float32x4_t,
10963    b: float32x4_t,
10964    c: float32x2_t,
10965) -> float32x4_t {
10966    static_assert_uimm_bits!(LANE, 1);
10967    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10968}
10969#[doc = "Floating-point fused multiply-subtract to accumulator"]
10970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"]
10971#[inline(always)]
10972#[target_feature(enable = "neon")]
10973#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10974#[rustc_legacy_const_generics(3)]
10975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10976pub fn vfmsq_laneq_f32<const LANE: i32>(
10977    a: float32x4_t,
10978    b: float32x4_t,
10979    c: float32x4_t,
10980) -> float32x4_t {
10981    static_assert_uimm_bits!(LANE, 2);
10982    unsafe { vfmsq_f32(a, b, vdupq_n_f32(simd_extract!(c, LANE as u32))) }
10983}
10984#[doc = "Floating-point fused multiply-subtract to accumulator"]
10985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"]
10986#[inline(always)]
10987#[target_feature(enable = "neon")]
10988#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
10989#[rustc_legacy_const_generics(3)]
10990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
10991pub fn vfmsq_laneq_f64<const LANE: i32>(
10992    a: float64x2_t,
10993    b: float64x2_t,
10994    c: float64x2_t,
10995) -> float64x2_t {
10996    static_assert_uimm_bits!(LANE, 1);
10997    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
10998}
10999#[doc = "Floating-point fused multiply-subtract to accumulator"]
11000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"]
11001#[inline(always)]
11002#[target_feature(enable = "neon")]
11003#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11004#[rustc_legacy_const_generics(3)]
11005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11006pub fn vfms_lane_f64<const LANE: i32>(
11007    a: float64x1_t,
11008    b: float64x1_t,
11009    c: float64x1_t,
11010) -> float64x1_t {
11011    static_assert!(LANE == 0);
11012    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11013}
11014#[doc = "Floating-point fused multiply-subtract to accumulator"]
11015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"]
11016#[inline(always)]
11017#[target_feature(enable = "neon")]
11018#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11019#[rustc_legacy_const_generics(3)]
11020#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11021pub fn vfms_laneq_f64<const LANE: i32>(
11022    a: float64x1_t,
11023    b: float64x1_t,
11024    c: float64x2_t,
11025) -> float64x1_t {
11026    static_assert_uimm_bits!(LANE, 1);
11027    unsafe { vfms_f64(a, b, vdup_n_f64(simd_extract!(c, LANE as u32))) }
11028}
11029#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f16)"]
11031#[inline(always)]
11032#[target_feature(enable = "neon,fp16")]
11033#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11034#[cfg(not(target_arch = "arm64ec"))]
11035#[cfg_attr(test, assert_instr(fmls))]
11036pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t {
11037    vfms_f16(a, b, vdup_n_f16(c))
11038}
11039#[doc = "Floating-point fused Multiply-Subtract from accumulator."]
11040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f16)"]
11041#[inline(always)]
11042#[target_feature(enable = "neon,fp16")]
11043#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11044#[cfg(not(target_arch = "arm64ec"))]
11045#[cfg_attr(test, assert_instr(fmls))]
11046pub fn vfmsq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t {
11047    vfmsq_f16(a, b, vdupq_n_f16(c))
11048}
11049#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"]
11051#[inline(always)]
11052#[target_feature(enable = "neon")]
11053#[cfg_attr(test, assert_instr(fmsub))]
11054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11055pub fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t {
11056    vfms_f64(a, b, vdup_n_f64(c))
11057}
11058#[doc = "Floating-point fused multiply-subtract from accumulator"]
11059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_f16)"]
11060#[inline(always)]
11061#[cfg_attr(test, assert_instr(fmsub))]
11062#[target_feature(enable = "neon,fp16")]
11063#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11064#[cfg(not(target_arch = "arm64ec"))]
11065pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 {
11066    vfmah_f16(a, -b, c)
11067}
11068#[doc = "Floating-point fused multiply-subtract from accumulator"]
11069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_lane_f16)"]
11070#[inline(always)]
11071#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11072#[rustc_legacy_const_generics(3)]
11073#[target_feature(enable = "neon,fp16")]
11074#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11075#[cfg(not(target_arch = "arm64ec"))]
11076pub fn vfmsh_lane_f16<const LANE: i32>(a: f16, b: f16, v: float16x4_t) -> f16 {
11077    static_assert_uimm_bits!(LANE, 2);
11078    unsafe {
11079        let c: f16 = simd_extract!(v, LANE as u32);
11080        vfmsh_f16(a, b, c)
11081    }
11082}
11083#[doc = "Floating-point fused multiply-subtract from accumulator"]
11084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_laneq_f16)"]
11085#[inline(always)]
11086#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11087#[rustc_legacy_const_generics(3)]
11088#[target_feature(enable = "neon,fp16")]
11089#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11090#[cfg(not(target_arch = "arm64ec"))]
11091pub fn vfmsh_laneq_f16<const LANE: i32>(a: f16, b: f16, v: float16x8_t) -> f16 {
11092    static_assert_uimm_bits!(LANE, 3);
11093    unsafe {
11094        let c: f16 = simd_extract!(v, LANE as u32);
11095        vfmsh_f16(a, b, c)
11096    }
11097}
11098#[doc = "Floating-point fused multiply-subtract from accumulator"]
11099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"]
11100#[inline(always)]
11101#[target_feature(enable = "neon")]
11102#[cfg_attr(test, assert_instr(fmls))]
11103#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11104pub fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
11105    unsafe {
11106        let b: float64x2_t = simd_neg(b);
11107        vfmaq_f64(a, b, c)
11108    }
11109}
11110#[doc = "Floating-point fused multiply-subtract to accumulator"]
11111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"]
11112#[inline(always)]
11113#[target_feature(enable = "neon")]
11114#[cfg_attr(test, assert_instr(fmls, LANE = 0))]
11115#[rustc_legacy_const_generics(3)]
11116#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11117pub fn vfmsq_lane_f64<const LANE: i32>(
11118    a: float64x2_t,
11119    b: float64x2_t,
11120    c: float64x1_t,
11121) -> float64x2_t {
11122    static_assert!(LANE == 0);
11123    unsafe { vfmsq_f64(a, b, vdupq_n_f64(simd_extract!(c, LANE as u32))) }
11124}
11125#[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"]
11126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"]
11127#[inline(always)]
11128#[target_feature(enable = "neon")]
11129#[cfg_attr(test, assert_instr(fmls))]
11130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11131pub fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t {
11132    vfmsq_f64(a, b, vdupq_n_f64(c))
11133}
11134#[doc = "Floating-point fused multiply-subtract to accumulator"]
11135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"]
11136#[inline(always)]
11137#[target_feature(enable = "neon")]
11138#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11139#[rustc_legacy_const_generics(3)]
11140#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11141pub fn vfmss_lane_f32<const LANE: i32>(a: f32, b: f32, c: float32x2_t) -> f32 {
11142    vfmas_lane_f32::<LANE>(a, -b, c)
11143}
11144#[doc = "Floating-point fused multiply-subtract to accumulator"]
11145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"]
11146#[inline(always)]
11147#[target_feature(enable = "neon")]
11148#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11149#[rustc_legacy_const_generics(3)]
11150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11151pub fn vfmss_laneq_f32<const LANE: i32>(a: f32, b: f32, c: float32x4_t) -> f32 {
11152    vfmas_laneq_f32::<LANE>(a, -b, c)
11153}
11154#[doc = "Floating-point fused multiply-subtract to accumulator"]
11155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"]
11156#[inline(always)]
11157#[target_feature(enable = "neon")]
11158#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11159#[rustc_legacy_const_generics(3)]
11160#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11161pub fn vfmsd_lane_f64<const LANE: i32>(a: f64, b: f64, c: float64x1_t) -> f64 {
11162    vfmad_lane_f64::<LANE>(a, -b, c)
11163}
11164#[doc = "Floating-point fused multiply-subtract to accumulator"]
11165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"]
11166#[inline(always)]
11167#[target_feature(enable = "neon")]
11168#[cfg_attr(test, assert_instr(fmsub, LANE = 0))]
11169#[rustc_legacy_const_generics(3)]
11170#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11171pub fn vfmsd_laneq_f64<const LANE: i32>(a: f64, b: f64, c: float64x2_t) -> f64 {
11172    vfmad_laneq_f64::<LANE>(a, -b, c)
11173}
11174#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"]
11176#[doc = "## Safety"]
11177#[doc = "  * Neon intrinsic unsafe"]
11178#[inline(always)]
11179#[target_feature(enable = "neon,fp16")]
11180#[cfg_attr(test, assert_instr(ldr))]
11181#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11182#[cfg(not(target_arch = "arm64ec"))]
11183pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t {
11184    crate::ptr::read_unaligned(ptr.cast())
11185}
11186#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"]
11188#[doc = "## Safety"]
11189#[doc = "  * Neon intrinsic unsafe"]
11190#[inline(always)]
11191#[target_feature(enable = "neon,fp16")]
11192#[cfg_attr(test, assert_instr(ldr))]
11193#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
11194#[cfg(not(target_arch = "arm64ec"))]
11195pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t {
11196    crate::ptr::read_unaligned(ptr.cast())
11197}
11198#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"]
11200#[doc = "## Safety"]
11201#[doc = "  * Neon intrinsic unsafe"]
11202#[inline(always)]
11203#[target_feature(enable = "neon")]
11204#[cfg_attr(test, assert_instr(ldr))]
11205#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11206pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t {
11207    crate::ptr::read_unaligned(ptr.cast())
11208}
11209#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"]
11211#[doc = "## Safety"]
11212#[doc = "  * Neon intrinsic unsafe"]
11213#[inline(always)]
11214#[target_feature(enable = "neon")]
11215#[cfg_attr(test, assert_instr(ldr))]
11216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11217pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t {
11218    crate::ptr::read_unaligned(ptr.cast())
11219}
11220#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"]
11222#[doc = "## Safety"]
11223#[doc = "  * Neon intrinsic unsafe"]
11224#[inline(always)]
11225#[target_feature(enable = "neon")]
11226#[cfg_attr(test, assert_instr(ldr))]
11227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11228pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t {
11229    crate::ptr::read_unaligned(ptr.cast())
11230}
11231#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"]
11233#[doc = "## Safety"]
11234#[doc = "  * Neon intrinsic unsafe"]
11235#[inline(always)]
11236#[target_feature(enable = "neon")]
11237#[cfg_attr(test, assert_instr(ldr))]
11238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11239pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t {
11240    crate::ptr::read_unaligned(ptr.cast())
11241}
11242#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"]
11244#[doc = "## Safety"]
11245#[doc = "  * Neon intrinsic unsafe"]
11246#[inline(always)]
11247#[target_feature(enable = "neon")]
11248#[cfg_attr(test, assert_instr(ldr))]
11249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11250pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t {
11251    crate::ptr::read_unaligned(ptr.cast())
11252}
11253#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"]
11255#[doc = "## Safety"]
11256#[doc = "  * Neon intrinsic unsafe"]
11257#[inline(always)]
11258#[target_feature(enable = "neon")]
11259#[cfg_attr(test, assert_instr(ldr))]
11260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11261pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t {
11262    crate::ptr::read_unaligned(ptr.cast())
11263}
11264#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"]
11266#[doc = "## Safety"]
11267#[doc = "  * Neon intrinsic unsafe"]
11268#[inline(always)]
11269#[target_feature(enable = "neon")]
11270#[cfg_attr(test, assert_instr(ldr))]
11271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11272pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t {
11273    crate::ptr::read_unaligned(ptr.cast())
11274}
11275#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"]
11277#[doc = "## Safety"]
11278#[doc = "  * Neon intrinsic unsafe"]
11279#[inline(always)]
11280#[target_feature(enable = "neon")]
11281#[cfg_attr(test, assert_instr(ldr))]
11282#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11283pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t {
11284    crate::ptr::read_unaligned(ptr.cast())
11285}
11286#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"]
11288#[doc = "## Safety"]
11289#[doc = "  * Neon intrinsic unsafe"]
11290#[inline(always)]
11291#[target_feature(enable = "neon")]
11292#[cfg_attr(test, assert_instr(ldr))]
11293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11294pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t {
11295    crate::ptr::read_unaligned(ptr.cast())
11296}
11297#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"]
11299#[doc = "## Safety"]
11300#[doc = "  * Neon intrinsic unsafe"]
11301#[inline(always)]
11302#[target_feature(enable = "neon")]
11303#[cfg_attr(test, assert_instr(ldr))]
11304#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11305pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t {
11306    crate::ptr::read_unaligned(ptr.cast())
11307}
11308#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"]
11310#[doc = "## Safety"]
11311#[doc = "  * Neon intrinsic unsafe"]
11312#[inline(always)]
11313#[target_feature(enable = "neon")]
11314#[cfg_attr(test, assert_instr(ldr))]
11315#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11316pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t {
11317    crate::ptr::read_unaligned(ptr.cast())
11318}
11319#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"]
11321#[doc = "## Safety"]
11322#[doc = "  * Neon intrinsic unsafe"]
11323#[inline(always)]
11324#[target_feature(enable = "neon")]
11325#[cfg_attr(test, assert_instr(ldr))]
11326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11327pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t {
11328    crate::ptr::read_unaligned(ptr.cast())
11329}
11330#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"]
11332#[doc = "## Safety"]
11333#[doc = "  * Neon intrinsic unsafe"]
11334#[inline(always)]
11335#[target_feature(enable = "neon")]
11336#[cfg_attr(test, assert_instr(ldr))]
11337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11338pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t {
11339    crate::ptr::read_unaligned(ptr.cast())
11340}
11341#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"]
11343#[doc = "## Safety"]
11344#[doc = "  * Neon intrinsic unsafe"]
11345#[inline(always)]
11346#[target_feature(enable = "neon")]
11347#[cfg_attr(test, assert_instr(ldr))]
11348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11349pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t {
11350    crate::ptr::read_unaligned(ptr.cast())
11351}
11352#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"]
11354#[doc = "## Safety"]
11355#[doc = "  * Neon intrinsic unsafe"]
11356#[inline(always)]
11357#[target_feature(enable = "neon")]
11358#[cfg_attr(test, assert_instr(ldr))]
11359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11360pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t {
11361    crate::ptr::read_unaligned(ptr.cast())
11362}
11363#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"]
11365#[doc = "## Safety"]
11366#[doc = "  * Neon intrinsic unsafe"]
11367#[inline(always)]
11368#[target_feature(enable = "neon")]
11369#[cfg_attr(test, assert_instr(ldr))]
11370#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11371pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t {
11372    crate::ptr::read_unaligned(ptr.cast())
11373}
11374#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"]
11376#[doc = "## Safety"]
11377#[doc = "  * Neon intrinsic unsafe"]
11378#[inline(always)]
11379#[target_feature(enable = "neon")]
11380#[cfg_attr(test, assert_instr(ldr))]
11381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11382pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t {
11383    crate::ptr::read_unaligned(ptr.cast())
11384}
11385#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"]
11387#[doc = "## Safety"]
11388#[doc = "  * Neon intrinsic unsafe"]
11389#[inline(always)]
11390#[target_feature(enable = "neon")]
11391#[cfg_attr(test, assert_instr(ldr))]
11392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11393pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t {
11394    crate::ptr::read_unaligned(ptr.cast())
11395}
11396#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"]
11398#[doc = "## Safety"]
11399#[doc = "  * Neon intrinsic unsafe"]
11400#[inline(always)]
11401#[target_feature(enable = "neon")]
11402#[cfg_attr(test, assert_instr(ldr))]
11403#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11404pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t {
11405    crate::ptr::read_unaligned(ptr.cast())
11406}
11407#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"]
11409#[doc = "## Safety"]
11410#[doc = "  * Neon intrinsic unsafe"]
11411#[inline(always)]
11412#[target_feature(enable = "neon")]
11413#[cfg_attr(test, assert_instr(ldr))]
11414#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11415pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t {
11416    crate::ptr::read_unaligned(ptr.cast())
11417}
11418#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"]
11420#[doc = "## Safety"]
11421#[doc = "  * Neon intrinsic unsafe"]
11422#[inline(always)]
11423#[target_feature(enable = "neon")]
11424#[cfg_attr(test, assert_instr(ldr))]
11425#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11426pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t {
11427    crate::ptr::read_unaligned(ptr.cast())
11428}
11429#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"]
11431#[doc = "## Safety"]
11432#[doc = "  * Neon intrinsic unsafe"]
11433#[inline(always)]
11434#[target_feature(enable = "neon")]
11435#[cfg_attr(test, assert_instr(ldr))]
11436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11437pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t {
11438    crate::ptr::read_unaligned(ptr.cast())
11439}
11440#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"]
11442#[doc = "## Safety"]
11443#[doc = "  * Neon intrinsic unsafe"]
11444#[inline(always)]
11445#[target_feature(enable = "neon")]
11446#[cfg_attr(test, assert_instr(ldr))]
11447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11448pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t {
11449    crate::ptr::read_unaligned(ptr.cast())
11450}
11451#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"]
11453#[doc = "## Safety"]
11454#[doc = "  * Neon intrinsic unsafe"]
11455#[inline(always)]
11456#[target_feature(enable = "neon")]
11457#[cfg_attr(test, assert_instr(ldr))]
11458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11459pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t {
11460    crate::ptr::read_unaligned(ptr.cast())
11461}
11462#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"]
11464#[doc = "## Safety"]
11465#[doc = "  * Neon intrinsic unsafe"]
11466#[inline(always)]
11467#[target_feature(enable = "neon,aes")]
11468#[cfg_attr(test, assert_instr(ldr))]
11469#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11470pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t {
11471    crate::ptr::read_unaligned(ptr.cast())
11472}
11473#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"]
11475#[doc = "## Safety"]
11476#[doc = "  * Neon intrinsic unsafe"]
11477#[inline(always)]
11478#[target_feature(enable = "neon,aes")]
11479#[cfg_attr(test, assert_instr(ldr))]
11480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11481pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t {
11482    crate::ptr::read_unaligned(ptr.cast())
11483}
11484#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"]
11486#[doc = "## Safety"]
11487#[doc = "  * Neon intrinsic unsafe"]
11488#[inline(always)]
11489#[target_feature(enable = "neon")]
11490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11491#[cfg_attr(test, assert_instr(ld1))]
11492pub unsafe fn vld1_f64_x2(a: *const f64) -> float64x1x2_t {
11493    unsafe extern "unadjusted" {
11494        #[cfg_attr(
11495            any(target_arch = "aarch64", target_arch = "arm64ec"),
11496            link_name = "llvm.aarch64.neon.ld1x2.v1f64.p0"
11497        )]
11498        fn _vld1_f64_x2(a: *const f64) -> float64x1x2_t;
11499    }
11500    _vld1_f64_x2(a)
11501}
11502#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"]
11504#[doc = "## Safety"]
11505#[doc = "  * Neon intrinsic unsafe"]
11506#[inline(always)]
11507#[target_feature(enable = "neon")]
11508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11509#[cfg_attr(test, assert_instr(ld1))]
11510pub unsafe fn vld1_f64_x3(a: *const f64) -> float64x1x3_t {
11511    unsafe extern "unadjusted" {
11512        #[cfg_attr(
11513            any(target_arch = "aarch64", target_arch = "arm64ec"),
11514            link_name = "llvm.aarch64.neon.ld1x3.v1f64.p0"
11515        )]
11516        fn _vld1_f64_x3(a: *const f64) -> float64x1x3_t;
11517    }
11518    _vld1_f64_x3(a)
11519}
11520#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"]
11522#[doc = "## Safety"]
11523#[doc = "  * Neon intrinsic unsafe"]
11524#[inline(always)]
11525#[target_feature(enable = "neon")]
11526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11527#[cfg_attr(test, assert_instr(ld1))]
11528pub unsafe fn vld1_f64_x4(a: *const f64) -> float64x1x4_t {
11529    unsafe extern "unadjusted" {
11530        #[cfg_attr(
11531            any(target_arch = "aarch64", target_arch = "arm64ec"),
11532            link_name = "llvm.aarch64.neon.ld1x4.v1f64.p0"
11533        )]
11534        fn _vld1_f64_x4(a: *const f64) -> float64x1x4_t;
11535    }
11536    _vld1_f64_x4(a)
11537}
11538#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"]
11540#[doc = "## Safety"]
11541#[doc = "  * Neon intrinsic unsafe"]
11542#[inline(always)]
11543#[target_feature(enable = "neon")]
11544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11545#[cfg_attr(test, assert_instr(ld1))]
11546pub unsafe fn vld1q_f64_x2(a: *const f64) -> float64x2x2_t {
11547    unsafe extern "unadjusted" {
11548        #[cfg_attr(
11549            any(target_arch = "aarch64", target_arch = "arm64ec"),
11550            link_name = "llvm.aarch64.neon.ld1x2.v2f64.p0"
11551        )]
11552        fn _vld1q_f64_x2(a: *const f64) -> float64x2x2_t;
11553    }
11554    _vld1q_f64_x2(a)
11555}
11556#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"]
11558#[doc = "## Safety"]
11559#[doc = "  * Neon intrinsic unsafe"]
11560#[inline(always)]
11561#[target_feature(enable = "neon")]
11562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11563#[cfg_attr(test, assert_instr(ld1))]
11564pub unsafe fn vld1q_f64_x3(a: *const f64) -> float64x2x3_t {
11565    unsafe extern "unadjusted" {
11566        #[cfg_attr(
11567            any(target_arch = "aarch64", target_arch = "arm64ec"),
11568            link_name = "llvm.aarch64.neon.ld1x3.v2f64.p0"
11569        )]
11570        fn _vld1q_f64_x3(a: *const f64) -> float64x2x3_t;
11571    }
11572    _vld1q_f64_x3(a)
11573}
11574#[doc = "Load multiple single-element structures to one, two, three, or four registers"]
11575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"]
11576#[doc = "## Safety"]
11577#[doc = "  * Neon intrinsic unsafe"]
11578#[inline(always)]
11579#[target_feature(enable = "neon")]
11580#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11581#[cfg_attr(test, assert_instr(ld1))]
11582pub unsafe fn vld1q_f64_x4(a: *const f64) -> float64x2x4_t {
11583    unsafe extern "unadjusted" {
11584        #[cfg_attr(
11585            any(target_arch = "aarch64", target_arch = "arm64ec"),
11586            link_name = "llvm.aarch64.neon.ld1x4.v2f64.p0"
11587        )]
11588        fn _vld1q_f64_x4(a: *const f64) -> float64x2x4_t;
11589    }
11590    _vld1q_f64_x4(a)
11591}
11592#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"]
11594#[doc = "## Safety"]
11595#[doc = "  * Neon intrinsic unsafe"]
11596#[inline(always)]
11597#[target_feature(enable = "neon")]
11598#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11599#[cfg_attr(test, assert_instr(ld2r))]
11600pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t {
11601    unsafe extern "unadjusted" {
11602        #[cfg_attr(
11603            any(target_arch = "aarch64", target_arch = "arm64ec"),
11604            link_name = "llvm.aarch64.neon.ld2r.v1f64.p0"
11605        )]
11606        fn _vld2_dup_f64(ptr: *const f64) -> float64x1x2_t;
11607    }
11608    _vld2_dup_f64(a as _)
11609}
11610#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"]
11612#[doc = "## Safety"]
11613#[doc = "  * Neon intrinsic unsafe"]
11614#[inline(always)]
11615#[target_feature(enable = "neon")]
11616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11617#[cfg_attr(test, assert_instr(ld2r))]
11618pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t {
11619    unsafe extern "unadjusted" {
11620        #[cfg_attr(
11621            any(target_arch = "aarch64", target_arch = "arm64ec"),
11622            link_name = "llvm.aarch64.neon.ld2r.v2f64.p0"
11623        )]
11624        fn _vld2q_dup_f64(ptr: *const f64) -> float64x2x2_t;
11625    }
11626    _vld2q_dup_f64(a as _)
11627}
11628#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"]
11630#[doc = "## Safety"]
11631#[doc = "  * Neon intrinsic unsafe"]
11632#[inline(always)]
11633#[target_feature(enable = "neon")]
11634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11635#[cfg_attr(test, assert_instr(ld2r))]
11636pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t {
11637    unsafe extern "unadjusted" {
11638        #[cfg_attr(
11639            any(target_arch = "aarch64", target_arch = "arm64ec"),
11640            link_name = "llvm.aarch64.neon.ld2r.v2i64.p0"
11641        )]
11642        fn _vld2q_dup_s64(ptr: *const i64) -> int64x2x2_t;
11643    }
11644    _vld2q_dup_s64(a as _)
11645}
11646#[doc = "Load multiple 2-element structures to two registers"]
11647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"]
11648#[doc = "## Safety"]
11649#[doc = "  * Neon intrinsic unsafe"]
11650#[inline(always)]
11651#[target_feature(enable = "neon")]
11652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11653#[cfg_attr(test, assert_instr(nop))]
11654pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t {
11655    unsafe extern "unadjusted" {
11656        #[cfg_attr(
11657            any(target_arch = "aarch64", target_arch = "arm64ec"),
11658            link_name = "llvm.aarch64.neon.ld2.v1f64.p0"
11659        )]
11660        fn _vld2_f64(ptr: *const float64x1_t) -> float64x1x2_t;
11661    }
11662    _vld2_f64(a as _)
11663}
11664#[doc = "Load multiple 2-element structures to two registers"]
11665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"]
11666#[doc = "## Safety"]
11667#[doc = "  * Neon intrinsic unsafe"]
11668#[inline(always)]
11669#[target_feature(enable = "neon")]
11670#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11671#[rustc_legacy_const_generics(2)]
11672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11673pub unsafe fn vld2_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x2_t) -> float64x1x2_t {
11674    static_assert!(LANE == 0);
11675    unsafe extern "unadjusted" {
11676        #[cfg_attr(
11677            any(target_arch = "aarch64", target_arch = "arm64ec"),
11678            link_name = "llvm.aarch64.neon.ld2lane.v1f64.p0"
11679        )]
11680        fn _vld2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *const i8) -> float64x1x2_t;
11681    }
11682    _vld2_lane_f64(b.0, b.1, LANE as i64, a as _)
11683}
11684#[doc = "Load multiple 2-element structures to two registers"]
11685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"]
11686#[doc = "## Safety"]
11687#[doc = "  * Neon intrinsic unsafe"]
11688#[inline(always)]
11689#[target_feature(enable = "neon")]
11690#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11691#[rustc_legacy_const_generics(2)]
11692#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11693pub unsafe fn vld2_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x2_t) -> int64x1x2_t {
11694    static_assert!(LANE == 0);
11695    unsafe extern "unadjusted" {
11696        #[cfg_attr(
11697            any(target_arch = "aarch64", target_arch = "arm64ec"),
11698            link_name = "llvm.aarch64.neon.ld2lane.v1i64.p0"
11699        )]
11700        fn _vld2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *const i8) -> int64x1x2_t;
11701    }
11702    _vld2_lane_s64(b.0, b.1, LANE as i64, a as _)
11703}
11704#[doc = "Load multiple 2-element structures to two registers"]
11705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"]
11706#[doc = "## Safety"]
11707#[doc = "  * Neon intrinsic unsafe"]
11708#[inline(always)]
11709#[target_feature(enable = "neon,aes")]
11710#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11711#[rustc_legacy_const_generics(2)]
11712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11713pub unsafe fn vld2_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x2_t) -> poly64x1x2_t {
11714    static_assert!(LANE == 0);
11715    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11716}
11717#[doc = "Load multiple 2-element structures to two registers"]
11718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"]
11719#[doc = "## Safety"]
11720#[doc = "  * Neon intrinsic unsafe"]
11721#[inline(always)]
11722#[target_feature(enable = "neon")]
11723#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11724#[rustc_legacy_const_generics(2)]
11725#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11726pub unsafe fn vld2_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x2_t) -> uint64x1x2_t {
11727    static_assert!(LANE == 0);
11728    transmute(vld2_lane_s64::<LANE>(transmute(a), transmute(b)))
11729}
11730#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11732#[doc = "## Safety"]
11733#[doc = "  * Neon intrinsic unsafe"]
11734#[inline(always)]
11735#[cfg(target_endian = "little")]
11736#[target_feature(enable = "neon,aes")]
11737#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11738#[cfg_attr(test, assert_instr(ld2r))]
11739pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11740    transmute(vld2q_dup_s64(transmute(a)))
11741}
11742#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"]
11744#[doc = "## Safety"]
11745#[doc = "  * Neon intrinsic unsafe"]
11746#[inline(always)]
11747#[cfg(target_endian = "big")]
11748#[target_feature(enable = "neon,aes")]
11749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11750#[cfg_attr(test, assert_instr(ld2r))]
11751pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t {
11752    let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11753    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11754    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11755    ret_val
11756}
11757#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11759#[doc = "## Safety"]
11760#[doc = "  * Neon intrinsic unsafe"]
11761#[inline(always)]
11762#[cfg(target_endian = "little")]
11763#[target_feature(enable = "neon")]
11764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11765#[cfg_attr(test, assert_instr(ld2r))]
11766pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11767    transmute(vld2q_dup_s64(transmute(a)))
11768}
11769#[doc = "Load single 2-element structure and replicate to all lanes of two registers"]
11770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"]
11771#[doc = "## Safety"]
11772#[doc = "  * Neon intrinsic unsafe"]
11773#[inline(always)]
11774#[cfg(target_endian = "big")]
11775#[target_feature(enable = "neon")]
11776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11777#[cfg_attr(test, assert_instr(ld2r))]
11778pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t {
11779    let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a)));
11780    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11781    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11782    ret_val
11783}
11784#[doc = "Load multiple 2-element structures to two registers"]
11785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"]
11786#[doc = "## Safety"]
11787#[doc = "  * Neon intrinsic unsafe"]
11788#[inline(always)]
11789#[target_feature(enable = "neon")]
11790#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11791#[cfg_attr(test, assert_instr(ld2))]
11792pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t {
11793    unsafe extern "unadjusted" {
11794        #[cfg_attr(
11795            any(target_arch = "aarch64", target_arch = "arm64ec"),
11796            link_name = "llvm.aarch64.neon.ld2.v2f64.p0"
11797        )]
11798        fn _vld2q_f64(ptr: *const float64x2_t) -> float64x2x2_t;
11799    }
11800    _vld2q_f64(a as _)
11801}
11802#[doc = "Load multiple 2-element structures to two registers"]
11803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"]
11804#[doc = "## Safety"]
11805#[doc = "  * Neon intrinsic unsafe"]
11806#[inline(always)]
11807#[target_feature(enable = "neon")]
11808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11809#[cfg_attr(test, assert_instr(ld2))]
11810pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t {
11811    unsafe extern "unadjusted" {
11812        #[cfg_attr(
11813            any(target_arch = "aarch64", target_arch = "arm64ec"),
11814            link_name = "llvm.aarch64.neon.ld2.v2i64.p0"
11815        )]
11816        fn _vld2q_s64(ptr: *const int64x2_t) -> int64x2x2_t;
11817    }
11818    _vld2q_s64(a as _)
11819}
11820#[doc = "Load multiple 2-element structures to two registers"]
11821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"]
11822#[doc = "## Safety"]
11823#[doc = "  * Neon intrinsic unsafe"]
11824#[inline(always)]
11825#[target_feature(enable = "neon")]
11826#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11827#[rustc_legacy_const_generics(2)]
11828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11829pub unsafe fn vld2q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x2_t) -> float64x2x2_t {
11830    static_assert_uimm_bits!(LANE, 1);
11831    unsafe extern "unadjusted" {
11832        #[cfg_attr(
11833            any(target_arch = "aarch64", target_arch = "arm64ec"),
11834            link_name = "llvm.aarch64.neon.ld2lane.v2f64.p0"
11835        )]
11836        fn _vld2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *const i8)
11837            -> float64x2x2_t;
11838    }
11839    _vld2q_lane_f64(b.0, b.1, LANE as i64, a as _)
11840}
11841#[doc = "Load multiple 2-element structures to two registers"]
11842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"]
11843#[doc = "## Safety"]
11844#[doc = "  * Neon intrinsic unsafe"]
11845#[inline(always)]
11846#[target_feature(enable = "neon")]
11847#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11848#[rustc_legacy_const_generics(2)]
11849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11850pub unsafe fn vld2q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x2_t) -> int8x16x2_t {
11851    static_assert_uimm_bits!(LANE, 4);
11852    unsafe extern "unadjusted" {
11853        #[cfg_attr(
11854            any(target_arch = "aarch64", target_arch = "arm64ec"),
11855            link_name = "llvm.aarch64.neon.ld2lane.v16i8.p0"
11856        )]
11857        fn _vld2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *const i8) -> int8x16x2_t;
11858    }
11859    _vld2q_lane_s8(b.0, b.1, LANE as i64, a as _)
11860}
11861#[doc = "Load multiple 2-element structures to two registers"]
11862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"]
11863#[doc = "## Safety"]
11864#[doc = "  * Neon intrinsic unsafe"]
11865#[inline(always)]
11866#[target_feature(enable = "neon")]
11867#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11868#[rustc_legacy_const_generics(2)]
11869#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11870pub unsafe fn vld2q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x2_t) -> int64x2x2_t {
11871    static_assert_uimm_bits!(LANE, 1);
11872    unsafe extern "unadjusted" {
11873        #[cfg_attr(
11874            any(target_arch = "aarch64", target_arch = "arm64ec"),
11875            link_name = "llvm.aarch64.neon.ld2lane.v2i64.p0"
11876        )]
11877        fn _vld2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *const i8) -> int64x2x2_t;
11878    }
11879    _vld2q_lane_s64(b.0, b.1, LANE as i64, a as _)
11880}
11881#[doc = "Load multiple 2-element structures to two registers"]
11882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"]
11883#[doc = "## Safety"]
11884#[doc = "  * Neon intrinsic unsafe"]
11885#[inline(always)]
11886#[target_feature(enable = "neon,aes")]
11887#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11888#[rustc_legacy_const_generics(2)]
11889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11890pub unsafe fn vld2q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x2_t) -> poly64x2x2_t {
11891    static_assert_uimm_bits!(LANE, 1);
11892    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11893}
11894#[doc = "Load multiple 2-element structures to two registers"]
11895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"]
11896#[doc = "## Safety"]
11897#[doc = "  * Neon intrinsic unsafe"]
11898#[inline(always)]
11899#[target_feature(enable = "neon")]
11900#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11901#[rustc_legacy_const_generics(2)]
11902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11903pub unsafe fn vld2q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x2_t) -> uint8x16x2_t {
11904    static_assert_uimm_bits!(LANE, 4);
11905    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11906}
11907#[doc = "Load multiple 2-element structures to two registers"]
11908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"]
11909#[doc = "## Safety"]
11910#[doc = "  * Neon intrinsic unsafe"]
11911#[inline(always)]
11912#[target_feature(enable = "neon")]
11913#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11914#[rustc_legacy_const_generics(2)]
11915#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11916pub unsafe fn vld2q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x2_t) -> uint64x2x2_t {
11917    static_assert_uimm_bits!(LANE, 1);
11918    transmute(vld2q_lane_s64::<LANE>(transmute(a), transmute(b)))
11919}
11920#[doc = "Load multiple 2-element structures to two registers"]
11921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"]
11922#[doc = "## Safety"]
11923#[doc = "  * Neon intrinsic unsafe"]
11924#[inline(always)]
11925#[target_feature(enable = "neon")]
11926#[cfg_attr(test, assert_instr(ld2, LANE = 0))]
11927#[rustc_legacy_const_generics(2)]
11928#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11929pub unsafe fn vld2q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x2_t) -> poly8x16x2_t {
11930    static_assert_uimm_bits!(LANE, 4);
11931    transmute(vld2q_lane_s8::<LANE>(transmute(a), transmute(b)))
11932}
11933#[doc = "Load multiple 2-element structures to two registers"]
11934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11935#[doc = "## Safety"]
11936#[doc = "  * Neon intrinsic unsafe"]
11937#[inline(always)]
11938#[cfg(target_endian = "little")]
11939#[target_feature(enable = "neon,aes")]
11940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11941#[cfg_attr(test, assert_instr(ld2))]
11942pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11943    transmute(vld2q_s64(transmute(a)))
11944}
11945#[doc = "Load multiple 2-element structures to two registers"]
11946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"]
11947#[doc = "## Safety"]
11948#[doc = "  * Neon intrinsic unsafe"]
11949#[inline(always)]
11950#[cfg(target_endian = "big")]
11951#[target_feature(enable = "neon,aes")]
11952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11953#[cfg_attr(test, assert_instr(ld2))]
11954pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t {
11955    let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a)));
11956    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11957    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11958    ret_val
11959}
11960#[doc = "Load multiple 2-element structures to two registers"]
11961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11962#[doc = "## Safety"]
11963#[doc = "  * Neon intrinsic unsafe"]
11964#[inline(always)]
11965#[cfg(target_endian = "little")]
11966#[target_feature(enable = "neon")]
11967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11968#[cfg_attr(test, assert_instr(ld2))]
11969pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11970    transmute(vld2q_s64(transmute(a)))
11971}
11972#[doc = "Load multiple 2-element structures to two registers"]
11973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"]
11974#[doc = "## Safety"]
11975#[doc = "  * Neon intrinsic unsafe"]
11976#[inline(always)]
11977#[cfg(target_endian = "big")]
11978#[target_feature(enable = "neon")]
11979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11980#[cfg_attr(test, assert_instr(ld2))]
11981pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t {
11982    let mut ret_val: uint64x2x2_t = transmute(vld2q_s64(transmute(a)));
11983    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
11984    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
11985    ret_val
11986}
11987#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
11988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"]
11989#[doc = "## Safety"]
11990#[doc = "  * Neon intrinsic unsafe"]
11991#[inline(always)]
11992#[target_feature(enable = "neon")]
11993#[stable(feature = "neon_intrinsics", since = "1.59.0")]
11994#[cfg_attr(test, assert_instr(ld3r))]
11995pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t {
11996    unsafe extern "unadjusted" {
11997        #[cfg_attr(
11998            any(target_arch = "aarch64", target_arch = "arm64ec"),
11999            link_name = "llvm.aarch64.neon.ld3r.v1f64.p0"
12000        )]
12001        fn _vld3_dup_f64(ptr: *const f64) -> float64x1x3_t;
12002    }
12003    _vld3_dup_f64(a as _)
12004}
12005#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"]
12007#[doc = "## Safety"]
12008#[doc = "  * Neon intrinsic unsafe"]
12009#[inline(always)]
12010#[target_feature(enable = "neon")]
12011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12012#[cfg_attr(test, assert_instr(ld3r))]
12013pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t {
12014    unsafe extern "unadjusted" {
12015        #[cfg_attr(
12016            any(target_arch = "aarch64", target_arch = "arm64ec"),
12017            link_name = "llvm.aarch64.neon.ld3r.v2f64.p0"
12018        )]
12019        fn _vld3q_dup_f64(ptr: *const f64) -> float64x2x3_t;
12020    }
12021    _vld3q_dup_f64(a as _)
12022}
12023#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"]
12025#[doc = "## Safety"]
12026#[doc = "  * Neon intrinsic unsafe"]
12027#[inline(always)]
12028#[target_feature(enable = "neon")]
12029#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12030#[cfg_attr(test, assert_instr(ld3r))]
12031pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t {
12032    unsafe extern "unadjusted" {
12033        #[cfg_attr(
12034            any(target_arch = "aarch64", target_arch = "arm64ec"),
12035            link_name = "llvm.aarch64.neon.ld3r.v2i64.p0"
12036        )]
12037        fn _vld3q_dup_s64(ptr: *const i64) -> int64x2x3_t;
12038    }
12039    _vld3q_dup_s64(a as _)
12040}
12041#[doc = "Load multiple 3-element structures to three registers"]
12042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"]
12043#[doc = "## Safety"]
12044#[doc = "  * Neon intrinsic unsafe"]
12045#[inline(always)]
12046#[target_feature(enable = "neon")]
12047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12048#[cfg_attr(test, assert_instr(nop))]
12049pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t {
12050    unsafe extern "unadjusted" {
12051        #[cfg_attr(
12052            any(target_arch = "aarch64", target_arch = "arm64ec"),
12053            link_name = "llvm.aarch64.neon.ld3.v1f64.p0"
12054        )]
12055        fn _vld3_f64(ptr: *const float64x1_t) -> float64x1x3_t;
12056    }
12057    _vld3_f64(a as _)
12058}
12059#[doc = "Load multiple 3-element structures to three registers"]
12060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"]
12061#[doc = "## Safety"]
12062#[doc = "  * Neon intrinsic unsafe"]
12063#[inline(always)]
12064#[target_feature(enable = "neon")]
12065#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12066#[rustc_legacy_const_generics(2)]
12067#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12068pub unsafe fn vld3_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x3_t) -> float64x1x3_t {
12069    static_assert!(LANE == 0);
12070    unsafe extern "unadjusted" {
12071        #[cfg_attr(
12072            any(target_arch = "aarch64", target_arch = "arm64ec"),
12073            link_name = "llvm.aarch64.neon.ld3lane.v1f64.p0"
12074        )]
12075        fn _vld3_lane_f64(
12076            a: float64x1_t,
12077            b: float64x1_t,
12078            c: float64x1_t,
12079            n: i64,
12080            ptr: *const i8,
12081        ) -> float64x1x3_t;
12082    }
12083    _vld3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12084}
12085#[doc = "Load multiple 3-element structures to three registers"]
12086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"]
12087#[doc = "## Safety"]
12088#[doc = "  * Neon intrinsic unsafe"]
12089#[inline(always)]
12090#[target_feature(enable = "neon,aes")]
12091#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12092#[rustc_legacy_const_generics(2)]
12093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12094pub unsafe fn vld3_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x3_t) -> poly64x1x3_t {
12095    static_assert!(LANE == 0);
12096    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12097}
12098#[doc = "Load multiple 3-element structures to two registers"]
12099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"]
12100#[doc = "## Safety"]
12101#[doc = "  * Neon intrinsic unsafe"]
12102#[inline(always)]
12103#[target_feature(enable = "neon")]
12104#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12105#[rustc_legacy_const_generics(2)]
12106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12107pub unsafe fn vld3_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x3_t) -> int64x1x3_t {
12108    static_assert!(LANE == 0);
12109    unsafe extern "unadjusted" {
12110        #[cfg_attr(
12111            any(target_arch = "aarch64", target_arch = "arm64ec"),
12112            link_name = "llvm.aarch64.neon.ld3lane.v1i64.p0"
12113        )]
12114        fn _vld3_lane_s64(
12115            a: int64x1_t,
12116            b: int64x1_t,
12117            c: int64x1_t,
12118            n: i64,
12119            ptr: *const i8,
12120        ) -> int64x1x3_t;
12121    }
12122    _vld3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12123}
12124#[doc = "Load multiple 3-element structures to three registers"]
12125#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"]
12126#[doc = "## Safety"]
12127#[doc = "  * Neon intrinsic unsafe"]
12128#[inline(always)]
12129#[target_feature(enable = "neon")]
12130#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12131#[rustc_legacy_const_generics(2)]
12132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12133pub unsafe fn vld3_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x3_t) -> uint64x1x3_t {
12134    static_assert!(LANE == 0);
12135    transmute(vld3_lane_s64::<LANE>(transmute(a), transmute(b)))
12136}
12137#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12139#[doc = "## Safety"]
12140#[doc = "  * Neon intrinsic unsafe"]
12141#[inline(always)]
12142#[cfg(target_endian = "little")]
12143#[target_feature(enable = "neon,aes")]
12144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12145#[cfg_attr(test, assert_instr(ld3r))]
12146pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12147    transmute(vld3q_dup_s64(transmute(a)))
12148}
12149#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"]
12151#[doc = "## Safety"]
12152#[doc = "  * Neon intrinsic unsafe"]
12153#[inline(always)]
12154#[cfg(target_endian = "big")]
12155#[target_feature(enable = "neon,aes")]
12156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12157#[cfg_attr(test, assert_instr(ld3r))]
12158pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t {
12159    let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12160    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12161    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12162    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12163    ret_val
12164}
12165#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12167#[doc = "## Safety"]
12168#[doc = "  * Neon intrinsic unsafe"]
12169#[inline(always)]
12170#[cfg(target_endian = "little")]
12171#[target_feature(enable = "neon")]
12172#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12173#[cfg_attr(test, assert_instr(ld3r))]
12174pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12175    transmute(vld3q_dup_s64(transmute(a)))
12176}
12177#[doc = "Load single 3-element structure and replicate to all lanes of three registers"]
12178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"]
12179#[doc = "## Safety"]
12180#[doc = "  * Neon intrinsic unsafe"]
12181#[inline(always)]
12182#[cfg(target_endian = "big")]
12183#[target_feature(enable = "neon")]
12184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12185#[cfg_attr(test, assert_instr(ld3r))]
12186pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t {
12187    let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a)));
12188    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12189    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12190    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12191    ret_val
12192}
12193#[doc = "Load multiple 3-element structures to three registers"]
12194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"]
12195#[doc = "## Safety"]
12196#[doc = "  * Neon intrinsic unsafe"]
12197#[inline(always)]
12198#[target_feature(enable = "neon")]
12199#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12200#[cfg_attr(test, assert_instr(ld3))]
12201pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t {
12202    unsafe extern "unadjusted" {
12203        #[cfg_attr(
12204            any(target_arch = "aarch64", target_arch = "arm64ec"),
12205            link_name = "llvm.aarch64.neon.ld3.v2f64.p0"
12206        )]
12207        fn _vld3q_f64(ptr: *const float64x2_t) -> float64x2x3_t;
12208    }
12209    _vld3q_f64(a as _)
12210}
12211#[doc = "Load multiple 3-element structures to three registers"]
12212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"]
12213#[doc = "## Safety"]
12214#[doc = "  * Neon intrinsic unsafe"]
12215#[inline(always)]
12216#[target_feature(enable = "neon")]
12217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12218#[cfg_attr(test, assert_instr(ld3))]
12219pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t {
12220    unsafe extern "unadjusted" {
12221        #[cfg_attr(
12222            any(target_arch = "aarch64", target_arch = "arm64ec"),
12223            link_name = "llvm.aarch64.neon.ld3.v2i64.p0"
12224        )]
12225        fn _vld3q_s64(ptr: *const int64x2_t) -> int64x2x3_t;
12226    }
12227    _vld3q_s64(a as _)
12228}
12229#[doc = "Load multiple 3-element structures to three registers"]
12230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"]
12231#[doc = "## Safety"]
12232#[doc = "  * Neon intrinsic unsafe"]
12233#[inline(always)]
12234#[target_feature(enable = "neon")]
12235#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12236#[rustc_legacy_const_generics(2)]
12237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12238pub unsafe fn vld3q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x3_t) -> float64x2x3_t {
12239    static_assert_uimm_bits!(LANE, 1);
12240    unsafe extern "unadjusted" {
12241        #[cfg_attr(
12242            any(target_arch = "aarch64", target_arch = "arm64ec"),
12243            link_name = "llvm.aarch64.neon.ld3lane.v2f64.p0"
12244        )]
12245        fn _vld3q_lane_f64(
12246            a: float64x2_t,
12247            b: float64x2_t,
12248            c: float64x2_t,
12249            n: i64,
12250            ptr: *const i8,
12251        ) -> float64x2x3_t;
12252    }
12253    _vld3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
12254}
12255#[doc = "Load multiple 3-element structures to three registers"]
12256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"]
12257#[doc = "## Safety"]
12258#[doc = "  * Neon intrinsic unsafe"]
12259#[inline(always)]
12260#[target_feature(enable = "neon,aes")]
12261#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12262#[rustc_legacy_const_generics(2)]
12263#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12264pub unsafe fn vld3q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x3_t) -> poly64x2x3_t {
12265    static_assert_uimm_bits!(LANE, 1);
12266    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12267}
12268#[doc = "Load multiple 3-element structures to two registers"]
12269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"]
12270#[doc = "## Safety"]
12271#[doc = "  * Neon intrinsic unsafe"]
12272#[inline(always)]
12273#[target_feature(enable = "neon")]
12274#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12275#[rustc_legacy_const_generics(2)]
12276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12277pub unsafe fn vld3q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x3_t) -> int8x16x3_t {
12278    static_assert_uimm_bits!(LANE, 3);
12279    unsafe extern "unadjusted" {
12280        #[cfg_attr(
12281            any(target_arch = "aarch64", target_arch = "arm64ec"),
12282            link_name = "llvm.aarch64.neon.ld3lane.v16i8.p0"
12283        )]
12284        fn _vld3q_lane_s8(
12285            a: int8x16_t,
12286            b: int8x16_t,
12287            c: int8x16_t,
12288            n: i64,
12289            ptr: *const i8,
12290        ) -> int8x16x3_t;
12291    }
12292    _vld3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
12293}
12294#[doc = "Load multiple 3-element structures to two registers"]
12295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"]
12296#[doc = "## Safety"]
12297#[doc = "  * Neon intrinsic unsafe"]
12298#[inline(always)]
12299#[target_feature(enable = "neon")]
12300#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12301#[rustc_legacy_const_generics(2)]
12302#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12303pub unsafe fn vld3q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x3_t) -> int64x2x3_t {
12304    static_assert_uimm_bits!(LANE, 1);
12305    unsafe extern "unadjusted" {
12306        #[cfg_attr(
12307            any(target_arch = "aarch64", target_arch = "arm64ec"),
12308            link_name = "llvm.aarch64.neon.ld3lane.v2i64.p0"
12309        )]
12310        fn _vld3q_lane_s64(
12311            a: int64x2_t,
12312            b: int64x2_t,
12313            c: int64x2_t,
12314            n: i64,
12315            ptr: *const i8,
12316        ) -> int64x2x3_t;
12317    }
12318    _vld3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
12319}
12320#[doc = "Load multiple 3-element structures to three registers"]
12321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"]
12322#[doc = "## Safety"]
12323#[doc = "  * Neon intrinsic unsafe"]
12324#[inline(always)]
12325#[target_feature(enable = "neon")]
12326#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12327#[rustc_legacy_const_generics(2)]
12328#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12329pub unsafe fn vld3q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x3_t) -> uint8x16x3_t {
12330    static_assert_uimm_bits!(LANE, 4);
12331    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12332}
12333#[doc = "Load multiple 3-element structures to three registers"]
12334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"]
12335#[doc = "## Safety"]
12336#[doc = "  * Neon intrinsic unsafe"]
12337#[inline(always)]
12338#[target_feature(enable = "neon")]
12339#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12340#[rustc_legacy_const_generics(2)]
12341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12342pub unsafe fn vld3q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x3_t) -> uint64x2x3_t {
12343    static_assert_uimm_bits!(LANE, 1);
12344    transmute(vld3q_lane_s64::<LANE>(transmute(a), transmute(b)))
12345}
12346#[doc = "Load multiple 3-element structures to three registers"]
12347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"]
12348#[doc = "## Safety"]
12349#[doc = "  * Neon intrinsic unsafe"]
12350#[inline(always)]
12351#[target_feature(enable = "neon")]
12352#[cfg_attr(test, assert_instr(ld3, LANE = 0))]
12353#[rustc_legacy_const_generics(2)]
12354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12355pub unsafe fn vld3q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x3_t) -> poly8x16x3_t {
12356    static_assert_uimm_bits!(LANE, 4);
12357    transmute(vld3q_lane_s8::<LANE>(transmute(a), transmute(b)))
12358}
12359#[doc = "Load multiple 3-element structures to three registers"]
12360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12361#[doc = "## Safety"]
12362#[doc = "  * Neon intrinsic unsafe"]
12363#[inline(always)]
12364#[cfg(target_endian = "little")]
12365#[target_feature(enable = "neon,aes")]
12366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12367#[cfg_attr(test, assert_instr(ld3))]
12368pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12369    transmute(vld3q_s64(transmute(a)))
12370}
12371#[doc = "Load multiple 3-element structures to three registers"]
12372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"]
12373#[doc = "## Safety"]
12374#[doc = "  * Neon intrinsic unsafe"]
12375#[inline(always)]
12376#[cfg(target_endian = "big")]
12377#[target_feature(enable = "neon,aes")]
12378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12379#[cfg_attr(test, assert_instr(ld3))]
12380pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t {
12381    let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a)));
12382    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12383    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12384    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12385    ret_val
12386}
12387#[doc = "Load multiple 3-element structures to three registers"]
12388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12389#[doc = "## Safety"]
12390#[doc = "  * Neon intrinsic unsafe"]
12391#[inline(always)]
12392#[cfg(target_endian = "little")]
12393#[target_feature(enable = "neon")]
12394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12395#[cfg_attr(test, assert_instr(ld3))]
12396pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12397    transmute(vld3q_s64(transmute(a)))
12398}
12399#[doc = "Load multiple 3-element structures to three registers"]
12400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"]
12401#[doc = "## Safety"]
12402#[doc = "  * Neon intrinsic unsafe"]
12403#[inline(always)]
12404#[cfg(target_endian = "big")]
12405#[target_feature(enable = "neon")]
12406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12407#[cfg_attr(test, assert_instr(ld3))]
12408pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t {
12409    let mut ret_val: uint64x2x3_t = transmute(vld3q_s64(transmute(a)));
12410    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12411    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12412    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12413    ret_val
12414}
12415#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"]
12417#[doc = "## Safety"]
12418#[doc = "  * Neon intrinsic unsafe"]
12419#[inline(always)]
12420#[target_feature(enable = "neon")]
12421#[cfg_attr(test, assert_instr(ld4r))]
12422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12423pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t {
12424    unsafe extern "unadjusted" {
12425        #[cfg_attr(
12426            any(target_arch = "aarch64", target_arch = "arm64ec"),
12427            link_name = "llvm.aarch64.neon.ld4r.v1f64.p0"
12428        )]
12429        fn _vld4_dup_f64(ptr: *const f64) -> float64x1x4_t;
12430    }
12431    _vld4_dup_f64(a as _)
12432}
12433#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"]
12435#[doc = "## Safety"]
12436#[doc = "  * Neon intrinsic unsafe"]
12437#[inline(always)]
12438#[target_feature(enable = "neon")]
12439#[cfg_attr(test, assert_instr(ld4r))]
12440#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12441pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t {
12442    unsafe extern "unadjusted" {
12443        #[cfg_attr(
12444            any(target_arch = "aarch64", target_arch = "arm64ec"),
12445            link_name = "llvm.aarch64.neon.ld4r.v2f64.p0"
12446        )]
12447        fn _vld4q_dup_f64(ptr: *const f64) -> float64x2x4_t;
12448    }
12449    _vld4q_dup_f64(a as _)
12450}
12451#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"]
12453#[doc = "## Safety"]
12454#[doc = "  * Neon intrinsic unsafe"]
12455#[inline(always)]
12456#[target_feature(enable = "neon")]
12457#[cfg_attr(test, assert_instr(ld4r))]
12458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12459pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t {
12460    unsafe extern "unadjusted" {
12461        #[cfg_attr(
12462            any(target_arch = "aarch64", target_arch = "arm64ec"),
12463            link_name = "llvm.aarch64.neon.ld4r.v2i64.p0"
12464        )]
12465        fn _vld4q_dup_s64(ptr: *const i64) -> int64x2x4_t;
12466    }
12467    _vld4q_dup_s64(a as _)
12468}
12469#[doc = "Load multiple 4-element structures to four registers"]
12470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"]
12471#[doc = "## Safety"]
12472#[doc = "  * Neon intrinsic unsafe"]
12473#[inline(always)]
12474#[target_feature(enable = "neon")]
12475#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12476#[cfg_attr(test, assert_instr(nop))]
12477pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t {
12478    unsafe extern "unadjusted" {
12479        #[cfg_attr(
12480            any(target_arch = "aarch64", target_arch = "arm64ec"),
12481            link_name = "llvm.aarch64.neon.ld4.v1f64.p0"
12482        )]
12483        fn _vld4_f64(ptr: *const float64x1_t) -> float64x1x4_t;
12484    }
12485    _vld4_f64(a as _)
12486}
12487#[doc = "Load multiple 4-element structures to four registers"]
12488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"]
12489#[doc = "## Safety"]
12490#[doc = "  * Neon intrinsic unsafe"]
12491#[inline(always)]
12492#[target_feature(enable = "neon")]
12493#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12494#[rustc_legacy_const_generics(2)]
12495#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12496pub unsafe fn vld4_lane_f64<const LANE: i32>(a: *const f64, b: float64x1x4_t) -> float64x1x4_t {
12497    static_assert!(LANE == 0);
12498    unsafe extern "unadjusted" {
12499        #[cfg_attr(
12500            any(target_arch = "aarch64", target_arch = "arm64ec"),
12501            link_name = "llvm.aarch64.neon.ld4lane.v1f64.p0"
12502        )]
12503        fn _vld4_lane_f64(
12504            a: float64x1_t,
12505            b: float64x1_t,
12506            c: float64x1_t,
12507            d: float64x1_t,
12508            n: i64,
12509            ptr: *const i8,
12510        ) -> float64x1x4_t;
12511    }
12512    _vld4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12513}
12514#[doc = "Load multiple 4-element structures to four registers"]
12515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"]
12516#[doc = "## Safety"]
12517#[doc = "  * Neon intrinsic unsafe"]
12518#[inline(always)]
12519#[target_feature(enable = "neon")]
12520#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12521#[rustc_legacy_const_generics(2)]
12522#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12523pub unsafe fn vld4_lane_s64<const LANE: i32>(a: *const i64, b: int64x1x4_t) -> int64x1x4_t {
12524    static_assert!(LANE == 0);
12525    unsafe extern "unadjusted" {
12526        #[cfg_attr(
12527            any(target_arch = "aarch64", target_arch = "arm64ec"),
12528            link_name = "llvm.aarch64.neon.ld4lane.v1i64.p0"
12529        )]
12530        fn _vld4_lane_s64(
12531            a: int64x1_t,
12532            b: int64x1_t,
12533            c: int64x1_t,
12534            d: int64x1_t,
12535            n: i64,
12536            ptr: *const i8,
12537        ) -> int64x1x4_t;
12538    }
12539    _vld4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12540}
12541#[doc = "Load multiple 4-element structures to four registers"]
12542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"]
12543#[doc = "## Safety"]
12544#[doc = "  * Neon intrinsic unsafe"]
12545#[inline(always)]
12546#[target_feature(enable = "neon,aes")]
12547#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12548#[rustc_legacy_const_generics(2)]
12549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12550pub unsafe fn vld4_lane_p64<const LANE: i32>(a: *const p64, b: poly64x1x4_t) -> poly64x1x4_t {
12551    static_assert!(LANE == 0);
12552    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12553}
12554#[doc = "Load multiple 4-element structures to four registers"]
12555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"]
12556#[doc = "## Safety"]
12557#[doc = "  * Neon intrinsic unsafe"]
12558#[inline(always)]
12559#[target_feature(enable = "neon")]
12560#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12561#[rustc_legacy_const_generics(2)]
12562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12563pub unsafe fn vld4_lane_u64<const LANE: i32>(a: *const u64, b: uint64x1x4_t) -> uint64x1x4_t {
12564    static_assert!(LANE == 0);
12565    transmute(vld4_lane_s64::<LANE>(transmute(a), transmute(b)))
12566}
12567#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12569#[doc = "## Safety"]
12570#[doc = "  * Neon intrinsic unsafe"]
12571#[inline(always)]
12572#[cfg(target_endian = "little")]
12573#[target_feature(enable = "neon,aes")]
12574#[cfg_attr(test, assert_instr(ld4r))]
12575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12576pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12577    transmute(vld4q_dup_s64(transmute(a)))
12578}
12579#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"]
12581#[doc = "## Safety"]
12582#[doc = "  * Neon intrinsic unsafe"]
12583#[inline(always)]
12584#[cfg(target_endian = "big")]
12585#[target_feature(enable = "neon,aes")]
12586#[cfg_attr(test, assert_instr(ld4r))]
12587#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12588pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t {
12589    let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12590    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12591    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12592    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12593    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12594    ret_val
12595}
12596#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12598#[doc = "## Safety"]
12599#[doc = "  * Neon intrinsic unsafe"]
12600#[inline(always)]
12601#[cfg(target_endian = "little")]
12602#[target_feature(enable = "neon")]
12603#[cfg_attr(test, assert_instr(ld4r))]
12604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12605pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12606    transmute(vld4q_dup_s64(transmute(a)))
12607}
12608#[doc = "Load single 4-element structure and replicate to all lanes of four registers"]
12609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"]
12610#[doc = "## Safety"]
12611#[doc = "  * Neon intrinsic unsafe"]
12612#[inline(always)]
12613#[cfg(target_endian = "big")]
12614#[target_feature(enable = "neon")]
12615#[cfg_attr(test, assert_instr(ld4r))]
12616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12617pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t {
12618    let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a)));
12619    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12620    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12621    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12622    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12623    ret_val
12624}
12625#[doc = "Load multiple 4-element structures to four registers"]
12626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"]
12627#[doc = "## Safety"]
12628#[doc = "  * Neon intrinsic unsafe"]
12629#[inline(always)]
12630#[target_feature(enable = "neon")]
12631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12632#[cfg_attr(test, assert_instr(ld4))]
12633pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t {
12634    unsafe extern "unadjusted" {
12635        #[cfg_attr(
12636            any(target_arch = "aarch64", target_arch = "arm64ec"),
12637            link_name = "llvm.aarch64.neon.ld4.v2f64.p0"
12638        )]
12639        fn _vld4q_f64(ptr: *const float64x2_t) -> float64x2x4_t;
12640    }
12641    _vld4q_f64(a as _)
12642}
12643#[doc = "Load multiple 4-element structures to four registers"]
12644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"]
12645#[doc = "## Safety"]
12646#[doc = "  * Neon intrinsic unsafe"]
12647#[inline(always)]
12648#[target_feature(enable = "neon")]
12649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12650#[cfg_attr(test, assert_instr(ld4))]
12651pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t {
12652    unsafe extern "unadjusted" {
12653        #[cfg_attr(
12654            any(target_arch = "aarch64", target_arch = "arm64ec"),
12655            link_name = "llvm.aarch64.neon.ld4.v2i64.p0"
12656        )]
12657        fn _vld4q_s64(ptr: *const int64x2_t) -> int64x2x4_t;
12658    }
12659    _vld4q_s64(a as _)
12660}
12661#[doc = "Load multiple 4-element structures to four registers"]
12662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"]
12663#[doc = "## Safety"]
12664#[doc = "  * Neon intrinsic unsafe"]
12665#[inline(always)]
12666#[target_feature(enable = "neon")]
12667#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12668#[rustc_legacy_const_generics(2)]
12669#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12670pub unsafe fn vld4q_lane_f64<const LANE: i32>(a: *const f64, b: float64x2x4_t) -> float64x2x4_t {
12671    static_assert_uimm_bits!(LANE, 1);
12672    unsafe extern "unadjusted" {
12673        #[cfg_attr(
12674            any(target_arch = "aarch64", target_arch = "arm64ec"),
12675            link_name = "llvm.aarch64.neon.ld4lane.v2f64.p0"
12676        )]
12677        fn _vld4q_lane_f64(
12678            a: float64x2_t,
12679            b: float64x2_t,
12680            c: float64x2_t,
12681            d: float64x2_t,
12682            n: i64,
12683            ptr: *const i8,
12684        ) -> float64x2x4_t;
12685    }
12686    _vld4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12687}
12688#[doc = "Load multiple 4-element structures to four registers"]
12689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"]
12690#[doc = "## Safety"]
12691#[doc = "  * Neon intrinsic unsafe"]
12692#[inline(always)]
12693#[target_feature(enable = "neon")]
12694#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12695#[rustc_legacy_const_generics(2)]
12696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12697pub unsafe fn vld4q_lane_s8<const LANE: i32>(a: *const i8, b: int8x16x4_t) -> int8x16x4_t {
12698    static_assert_uimm_bits!(LANE, 3);
12699    unsafe extern "unadjusted" {
12700        #[cfg_attr(
12701            any(target_arch = "aarch64", target_arch = "arm64ec"),
12702            link_name = "llvm.aarch64.neon.ld4lane.v16i8.p0"
12703        )]
12704        fn _vld4q_lane_s8(
12705            a: int8x16_t,
12706            b: int8x16_t,
12707            c: int8x16_t,
12708            d: int8x16_t,
12709            n: i64,
12710            ptr: *const i8,
12711        ) -> int8x16x4_t;
12712    }
12713    _vld4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12714}
12715#[doc = "Load multiple 4-element structures to four registers"]
12716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"]
12717#[doc = "## Safety"]
12718#[doc = "  * Neon intrinsic unsafe"]
12719#[inline(always)]
12720#[target_feature(enable = "neon")]
12721#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12722#[rustc_legacy_const_generics(2)]
12723#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12724pub unsafe fn vld4q_lane_s64<const LANE: i32>(a: *const i64, b: int64x2x4_t) -> int64x2x4_t {
12725    static_assert_uimm_bits!(LANE, 1);
12726    unsafe extern "unadjusted" {
12727        #[cfg_attr(
12728            any(target_arch = "aarch64", target_arch = "arm64ec"),
12729            link_name = "llvm.aarch64.neon.ld4lane.v2i64.p0"
12730        )]
12731        fn _vld4q_lane_s64(
12732            a: int64x2_t,
12733            b: int64x2_t,
12734            c: int64x2_t,
12735            d: int64x2_t,
12736            n: i64,
12737            ptr: *const i8,
12738        ) -> int64x2x4_t;
12739    }
12740    _vld4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
12741}
12742#[doc = "Load multiple 4-element structures to four registers"]
12743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"]
12744#[doc = "## Safety"]
12745#[doc = "  * Neon intrinsic unsafe"]
12746#[inline(always)]
12747#[target_feature(enable = "neon,aes")]
12748#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12749#[rustc_legacy_const_generics(2)]
12750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12751pub unsafe fn vld4q_lane_p64<const LANE: i32>(a: *const p64, b: poly64x2x4_t) -> poly64x2x4_t {
12752    static_assert_uimm_bits!(LANE, 1);
12753    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12754}
12755#[doc = "Load multiple 4-element structures to four registers"]
12756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"]
12757#[doc = "## Safety"]
12758#[doc = "  * Neon intrinsic unsafe"]
12759#[inline(always)]
12760#[target_feature(enable = "neon")]
12761#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12762#[rustc_legacy_const_generics(2)]
12763#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12764pub unsafe fn vld4q_lane_u8<const LANE: i32>(a: *const u8, b: uint8x16x4_t) -> uint8x16x4_t {
12765    static_assert_uimm_bits!(LANE, 4);
12766    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12767}
12768#[doc = "Load multiple 4-element structures to four registers"]
12769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"]
12770#[doc = "## Safety"]
12771#[doc = "  * Neon intrinsic unsafe"]
12772#[inline(always)]
12773#[target_feature(enable = "neon")]
12774#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12775#[rustc_legacy_const_generics(2)]
12776#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12777pub unsafe fn vld4q_lane_u64<const LANE: i32>(a: *const u64, b: uint64x2x4_t) -> uint64x2x4_t {
12778    static_assert_uimm_bits!(LANE, 1);
12779    transmute(vld4q_lane_s64::<LANE>(transmute(a), transmute(b)))
12780}
12781#[doc = "Load multiple 4-element structures to four registers"]
12782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"]
12783#[doc = "## Safety"]
12784#[doc = "  * Neon intrinsic unsafe"]
12785#[inline(always)]
12786#[target_feature(enable = "neon")]
12787#[cfg_attr(test, assert_instr(ld4, LANE = 0))]
12788#[rustc_legacy_const_generics(2)]
12789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12790pub unsafe fn vld4q_lane_p8<const LANE: i32>(a: *const p8, b: poly8x16x4_t) -> poly8x16x4_t {
12791    static_assert_uimm_bits!(LANE, 4);
12792    transmute(vld4q_lane_s8::<LANE>(transmute(a), transmute(b)))
12793}
12794#[doc = "Load multiple 4-element structures to four registers"]
12795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12796#[doc = "## Safety"]
12797#[doc = "  * Neon intrinsic unsafe"]
12798#[inline(always)]
12799#[cfg(target_endian = "little")]
12800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12801#[target_feature(enable = "neon,aes")]
12802#[cfg_attr(test, assert_instr(ld4))]
12803pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12804    transmute(vld4q_s64(transmute(a)))
12805}
12806#[doc = "Load multiple 4-element structures to four registers"]
12807#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"]
12808#[doc = "## Safety"]
12809#[doc = "  * Neon intrinsic unsafe"]
12810#[inline(always)]
12811#[cfg(target_endian = "big")]
12812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12813#[target_feature(enable = "neon,aes")]
12814#[cfg_attr(test, assert_instr(ld4))]
12815pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t {
12816    let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a)));
12817    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12818    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12819    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12820    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12821    ret_val
12822}
12823#[doc = "Load multiple 4-element structures to four registers"]
12824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12825#[doc = "## Safety"]
12826#[doc = "  * Neon intrinsic unsafe"]
12827#[inline(always)]
12828#[cfg(target_endian = "little")]
12829#[target_feature(enable = "neon")]
12830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12831#[cfg_attr(test, assert_instr(ld4))]
12832pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12833    transmute(vld4q_s64(transmute(a)))
12834}
12835#[doc = "Load multiple 4-element structures to four registers"]
12836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"]
12837#[doc = "## Safety"]
12838#[doc = "  * Neon intrinsic unsafe"]
12839#[inline(always)]
12840#[cfg(target_endian = "big")]
12841#[target_feature(enable = "neon")]
12842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
12843#[cfg_attr(test, assert_instr(ld4))]
12844pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t {
12845    let mut ret_val: uint64x2x4_t = transmute(vld4q_s64(transmute(a)));
12846    ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) };
12847    ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) };
12848    ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) };
12849    ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) };
12850    ret_val
12851}
12852#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_s64)"]
12854#[doc = "## Safety"]
12855#[doc = "  * Neon intrinsic unsafe"]
12856#[inline(always)]
12857#[target_feature(enable = "neon,rcpc3")]
12858#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12859#[rustc_legacy_const_generics(2)]
12860#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12861#[cfg(target_has_atomic = "64")]
12862pub unsafe fn vldap1_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x1_t) -> int64x1_t {
12863    static_assert!(LANE == 0);
12864    let atomic_src = crate::sync::atomic::AtomicI64::from_ptr(ptr as *mut i64);
12865    simd_insert!(
12866        src,
12867        LANE as u32,
12868        atomic_src.load(crate::sync::atomic::Ordering::Acquire)
12869    )
12870}
12871#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_s64)"]
12873#[doc = "## Safety"]
12874#[doc = "  * Neon intrinsic unsafe"]
12875#[inline(always)]
12876#[target_feature(enable = "neon,rcpc3")]
12877#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12878#[rustc_legacy_const_generics(2)]
12879#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12880#[cfg(target_has_atomic = "64")]
12881pub unsafe fn vldap1q_lane_s64<const LANE: i32>(ptr: *const i64, src: int64x2_t) -> int64x2_t {
12882    static_assert_uimm_bits!(LANE, 1);
12883    let atomic_src = crate::sync::atomic::AtomicI64::from_ptr(ptr as *mut i64);
12884    simd_insert!(
12885        src,
12886        LANE as u32,
12887        atomic_src.load(crate::sync::atomic::Ordering::Acquire)
12888    )
12889}
12890#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_f64)"]
12892#[doc = "## Safety"]
12893#[doc = "  * Neon intrinsic unsafe"]
12894#[inline(always)]
12895#[rustc_legacy_const_generics(2)]
12896#[target_feature(enable = "neon,rcpc3")]
12897#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12898#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12899#[cfg(target_has_atomic = "64")]
12900pub unsafe fn vldap1q_lane_f64<const LANE: i32>(ptr: *const f64, src: float64x2_t) -> float64x2_t {
12901    static_assert_uimm_bits!(LANE, 1);
12902    transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12903}
12904#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12905#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_u64)"]
12906#[doc = "## Safety"]
12907#[doc = "  * Neon intrinsic unsafe"]
12908#[inline(always)]
12909#[rustc_legacy_const_generics(2)]
12910#[target_feature(enable = "neon,rcpc3")]
12911#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12912#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12913#[cfg(target_has_atomic = "64")]
12914pub unsafe fn vldap1_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x1_t) -> uint64x1_t {
12915    static_assert!(LANE == 0);
12916    transmute(vldap1_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12917}
12918#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_u64)"]
12920#[doc = "## Safety"]
12921#[doc = "  * Neon intrinsic unsafe"]
12922#[inline(always)]
12923#[rustc_legacy_const_generics(2)]
12924#[target_feature(enable = "neon,rcpc3")]
12925#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12926#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12927#[cfg(target_has_atomic = "64")]
12928pub unsafe fn vldap1q_lane_u64<const LANE: i32>(ptr: *const u64, src: uint64x2_t) -> uint64x2_t {
12929    static_assert_uimm_bits!(LANE, 1);
12930    transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12931}
12932#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_p64)"]
12934#[doc = "## Safety"]
12935#[doc = "  * Neon intrinsic unsafe"]
12936#[inline(always)]
12937#[rustc_legacy_const_generics(2)]
12938#[target_feature(enable = "neon,rcpc3")]
12939#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12940#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12941#[cfg(target_has_atomic = "64")]
12942pub unsafe fn vldap1_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x1_t) -> poly64x1_t {
12943    static_assert!(LANE == 0);
12944    transmute(vldap1_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12945}
12946#[doc = "Load-acquire RCpc one single-element structure to one lane of one register"]
12947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_p64)"]
12948#[doc = "## Safety"]
12949#[doc = "  * Neon intrinsic unsafe"]
12950#[inline(always)]
12951#[rustc_legacy_const_generics(2)]
12952#[target_feature(enable = "neon,rcpc3")]
12953#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))]
12954#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
12955#[cfg(target_has_atomic = "64")]
12956pub unsafe fn vldap1q_lane_p64<const LANE: i32>(ptr: *const p64, src: poly64x2_t) -> poly64x2_t {
12957    static_assert_uimm_bits!(LANE, 1);
12958    transmute(vldap1q_lane_s64::<LANE>(ptr as *mut i64, transmute(src)))
12959}
12960#[doc = "Lookup table read with 2-bit indices"]
12961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_f16)"]
12962#[doc = "## Safety"]
12963#[doc = "  * Neon intrinsic unsafe"]
12964#[inline(always)]
12965#[target_feature(enable = "neon,lut")]
12966#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12967#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12968#[rustc_legacy_const_generics(2)]
12969pub unsafe fn vluti2_lane_f16<const INDEX: i32>(a: float16x4_t, b: uint8x8_t) -> float16x8_t {
12970    static_assert!(INDEX >= 0 && INDEX <= 3);
12971    transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
12972}
12973#[doc = "Lookup table read with 2-bit indices"]
12974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_f16)"]
12975#[doc = "## Safety"]
12976#[doc = "  * Neon intrinsic unsafe"]
12977#[inline(always)]
12978#[target_feature(enable = "neon,lut")]
12979#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12980#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12981#[rustc_legacy_const_generics(2)]
12982pub unsafe fn vluti2q_lane_f16<const INDEX: i32>(a: float16x8_t, b: uint8x8_t) -> float16x8_t {
12983    static_assert!(INDEX >= 0 && INDEX <= 3);
12984    transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
12985}
12986#[doc = "Lookup table read with 2-bit indices"]
12987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u8)"]
12988#[doc = "## Safety"]
12989#[doc = "  * Neon intrinsic unsafe"]
12990#[inline(always)]
12991#[target_feature(enable = "neon,lut")]
12992#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
12993#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
12994#[rustc_legacy_const_generics(2)]
12995pub unsafe fn vluti2_lane_u8<const INDEX: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t {
12996    static_assert!(INDEX >= 0 && INDEX <= 1);
12997    transmute(vluti2_lane_s8::<INDEX>(transmute(a), b))
12998}
12999#[doc = "Lookup table read with 2-bit indices"]
13000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u8)"]
13001#[doc = "## Safety"]
13002#[doc = "  * Neon intrinsic unsafe"]
13003#[inline(always)]
13004#[target_feature(enable = "neon,lut")]
13005#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13006#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13007#[rustc_legacy_const_generics(2)]
13008pub unsafe fn vluti2q_lane_u8<const INDEX: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
13009    static_assert!(INDEX >= 0 && INDEX <= 1);
13010    transmute(vluti2q_lane_s8::<INDEX>(transmute(a), b))
13011}
13012#[doc = "Lookup table read with 2-bit indices"]
13013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u16)"]
13014#[doc = "## Safety"]
13015#[doc = "  * Neon intrinsic unsafe"]
13016#[inline(always)]
13017#[target_feature(enable = "neon,lut")]
13018#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13019#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13020#[rustc_legacy_const_generics(2)]
13021pub unsafe fn vluti2_lane_u16<const INDEX: i32>(a: uint16x4_t, b: uint8x8_t) -> uint16x8_t {
13022    static_assert!(INDEX >= 0 && INDEX <= 3);
13023    transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
13024}
13025#[doc = "Lookup table read with 2-bit indices"]
13026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u16)"]
13027#[doc = "## Safety"]
13028#[doc = "  * Neon intrinsic unsafe"]
13029#[inline(always)]
13030#[target_feature(enable = "neon,lut")]
13031#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13032#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13033#[rustc_legacy_const_generics(2)]
13034pub unsafe fn vluti2q_lane_u16<const INDEX: i32>(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t {
13035    static_assert!(INDEX >= 0 && INDEX <= 3);
13036    transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
13037}
13038#[doc = "Lookup table read with 2-bit indices"]
13039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p8)"]
13040#[doc = "## Safety"]
13041#[doc = "  * Neon intrinsic unsafe"]
13042#[inline(always)]
13043#[target_feature(enable = "neon,lut")]
13044#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13045#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13046#[rustc_legacy_const_generics(2)]
13047pub unsafe fn vluti2_lane_p8<const INDEX: i32>(a: poly8x8_t, b: uint8x8_t) -> poly8x16_t {
13048    static_assert!(INDEX >= 0 && INDEX <= 1);
13049    transmute(vluti2_lane_s8::<INDEX>(transmute(a), b))
13050}
13051#[doc = "Lookup table read with 2-bit indices"]
13052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p8)"]
13053#[doc = "## Safety"]
13054#[doc = "  * Neon intrinsic unsafe"]
13055#[inline(always)]
13056#[target_feature(enable = "neon,lut")]
13057#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13058#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13059#[rustc_legacy_const_generics(2)]
13060pub unsafe fn vluti2q_lane_p8<const INDEX: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
13061    static_assert!(INDEX >= 0 && INDEX <= 1);
13062    transmute(vluti2q_lane_s8::<INDEX>(transmute(a), b))
13063}
13064#[doc = "Lookup table read with 2-bit indices"]
13065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p16)"]
13066#[doc = "## Safety"]
13067#[doc = "  * Neon intrinsic unsafe"]
13068#[inline(always)]
13069#[target_feature(enable = "neon,lut")]
13070#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13071#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13072#[rustc_legacy_const_generics(2)]
13073pub unsafe fn vluti2_lane_p16<const INDEX: i32>(a: poly16x4_t, b: uint8x8_t) -> poly16x8_t {
13074    static_assert!(INDEX >= 0 && INDEX <= 3);
13075    transmute(vluti2_lane_s16::<INDEX>(transmute(a), b))
13076}
13077#[doc = "Lookup table read with 2-bit indices"]
13078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p16)"]
13079#[doc = "## Safety"]
13080#[doc = "  * Neon intrinsic unsafe"]
13081#[inline(always)]
13082#[target_feature(enable = "neon,lut")]
13083#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13084#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13085#[rustc_legacy_const_generics(2)]
13086pub unsafe fn vluti2q_lane_p16<const INDEX: i32>(a: poly16x8_t, b: uint8x8_t) -> poly16x8_t {
13087    static_assert!(INDEX >= 0 && INDEX <= 3);
13088    transmute(vluti2q_lane_s16::<INDEX>(transmute(a), b))
13089}
13090#[doc = "Lookup table read with 2-bit indices"]
13091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s8)"]
13092#[doc = "## Safety"]
13093#[doc = "  * Neon intrinsic unsafe"]
13094#[inline(always)]
13095#[target_feature(enable = "neon,lut")]
13096#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13097#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13098#[rustc_legacy_const_generics(2)]
13099pub unsafe fn vluti2_lane_s8<const LANE: i32>(a: int8x8_t, b: uint8x8_t) -> int8x16_t {
13100    static_assert!(LANE >= 0 && LANE <= 1);
13101    unsafe extern "unadjusted" {
13102        #[cfg_attr(
13103            any(target_arch = "aarch64", target_arch = "arm64ec"),
13104            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v8i8"
13105        )]
13106        fn _vluti2_lane_s8(a: int8x8_t, b: uint8x8_t, n: i32) -> int8x16_t;
13107    }
13108    _vluti2_lane_s8(a, b, LANE)
13109}
13110#[doc = "Lookup table read with 2-bit indices"]
13111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s8)"]
13112#[doc = "## Safety"]
13113#[doc = "  * Neon intrinsic unsafe"]
13114#[inline(always)]
13115#[target_feature(enable = "neon,lut")]
13116#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13117#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13118#[rustc_legacy_const_generics(2)]
13119pub unsafe fn vluti2q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13120    static_assert!(LANE >= 0 && LANE <= 1);
13121    unsafe extern "unadjusted" {
13122        #[cfg_attr(
13123            any(target_arch = "aarch64", target_arch = "arm64ec"),
13124            link_name = "llvm.aarch64.neon.vluti2.lane.v16i8.v16i8"
13125        )]
13126        fn _vluti2q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13127    }
13128    _vluti2q_lane_s8(a, b, LANE)
13129}
13130#[doc = "Lookup table read with 2-bit indices"]
13131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s16)"]
13132#[doc = "## Safety"]
13133#[doc = "  * Neon intrinsic unsafe"]
13134#[inline(always)]
13135#[target_feature(enable = "neon,lut")]
13136#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13137#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13138#[rustc_legacy_const_generics(2)]
13139pub unsafe fn vluti2_lane_s16<const LANE: i32>(a: int16x4_t, b: uint8x8_t) -> int16x8_t {
13140    static_assert!(LANE >= 0 && LANE <= 3);
13141    unsafe extern "unadjusted" {
13142        #[cfg_attr(
13143            any(target_arch = "aarch64", target_arch = "arm64ec"),
13144            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v4i16"
13145        )]
13146        fn _vluti2_lane_s16(a: int16x4_t, b: uint8x8_t, n: i32) -> int16x8_t;
13147    }
13148    _vluti2_lane_s16(a, b, LANE)
13149}
13150#[doc = "Lookup table read with 2-bit indices"]
13151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s16)"]
13152#[doc = "## Safety"]
13153#[doc = "  * Neon intrinsic unsafe"]
13154#[inline(always)]
13155#[target_feature(enable = "neon,lut")]
13156#[cfg_attr(test, assert_instr(nop, LANE = 1))]
13157#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13158#[rustc_legacy_const_generics(2)]
13159pub unsafe fn vluti2q_lane_s16<const LANE: i32>(a: int16x8_t, b: uint8x8_t) -> int16x8_t {
13160    static_assert!(LANE >= 0 && LANE <= 3);
13161    unsafe extern "unadjusted" {
13162        #[cfg_attr(
13163            any(target_arch = "aarch64", target_arch = "arm64ec"),
13164            link_name = "llvm.aarch64.neon.vluti2.lane.v8i16.v8i16"
13165        )]
13166        fn _vluti2q_lane_s16(a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13167    }
13168    _vluti2q_lane_s16(a, b, LANE)
13169}
13170#[doc = "Lookup table read with 2-bit indices"]
13171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_f16)"]
13172#[doc = "## Safety"]
13173#[doc = "  * Neon intrinsic unsafe"]
13174#[inline(always)]
13175#[target_feature(enable = "neon,lut")]
13176#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13177#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13178#[rustc_legacy_const_generics(2)]
13179pub unsafe fn vluti2_laneq_f16<const INDEX: i32>(a: float16x4_t, b: uint8x16_t) -> float16x8_t {
13180    static_assert!(INDEX >= 0 && INDEX <= 7);
13181    transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13182}
13183#[doc = "Lookup table read with 2-bit indices"]
13184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_f16)"]
13185#[doc = "## Safety"]
13186#[doc = "  * Neon intrinsic unsafe"]
13187#[inline(always)]
13188#[target_feature(enable = "neon,lut")]
13189#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13190#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13191#[rustc_legacy_const_generics(2)]
13192pub unsafe fn vluti2q_laneq_f16<const INDEX: i32>(a: float16x8_t, b: uint8x16_t) -> float16x8_t {
13193    static_assert!(INDEX >= 0 && INDEX <= 7);
13194    transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13195}
13196#[doc = "Lookup table read with 2-bit indices"]
13197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_u8)"]
13198#[doc = "## Safety"]
13199#[doc = "  * Neon intrinsic unsafe"]
13200#[inline(always)]
13201#[target_feature(enable = "neon,lut")]
13202#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13203#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13204#[rustc_legacy_const_generics(2)]
13205pub unsafe fn vluti2_laneq_u8<const INDEX: i32>(a: uint8x8_t, b: uint8x16_t) -> uint8x16_t {
13206    static_assert!(INDEX >= 0 && INDEX <= 3);
13207    transmute(vluti2_laneq_s8::<INDEX>(transmute(a), b))
13208}
13209#[doc = "Lookup table read with 2-bit indices"]
13210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_u8)"]
13211#[doc = "## Safety"]
13212#[doc = "  * Neon intrinsic unsafe"]
13213#[inline(always)]
13214#[target_feature(enable = "neon,lut")]
13215#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13216#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13217#[rustc_legacy_const_generics(2)]
13218pub unsafe fn vluti2q_laneq_u8<const INDEX: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13219    static_assert!(INDEX >= 0 && INDEX <= 3);
13220    transmute(vluti2q_laneq_s8::<INDEX>(transmute(a), b))
13221}
13222#[doc = "Lookup table read with 2-bit indices"]
13223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_u16)"]
13224#[doc = "## Safety"]
13225#[doc = "  * Neon intrinsic unsafe"]
13226#[inline(always)]
13227#[target_feature(enable = "neon,lut")]
13228#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13229#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13230#[rustc_legacy_const_generics(2)]
13231pub unsafe fn vluti2_laneq_u16<const INDEX: i32>(a: uint16x4_t, b: uint8x16_t) -> uint16x8_t {
13232    static_assert!(INDEX >= 0 && INDEX <= 7);
13233    transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13234}
13235#[doc = "Lookup table read with 2-bit indices"]
13236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_u16)"]
13237#[doc = "## Safety"]
13238#[doc = "  * Neon intrinsic unsafe"]
13239#[inline(always)]
13240#[target_feature(enable = "neon,lut")]
13241#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13242#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13243#[rustc_legacy_const_generics(2)]
13244pub unsafe fn vluti2q_laneq_u16<const INDEX: i32>(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
13245    static_assert!(INDEX >= 0 && INDEX <= 7);
13246    transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13247}
13248#[doc = "Lookup table read with 2-bit indices"]
13249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_p8)"]
13250#[doc = "## Safety"]
13251#[doc = "  * Neon intrinsic unsafe"]
13252#[inline(always)]
13253#[target_feature(enable = "neon,lut")]
13254#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13255#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13256#[rustc_legacy_const_generics(2)]
13257pub unsafe fn vluti2_laneq_p8<const INDEX: i32>(a: poly8x8_t, b: uint8x16_t) -> poly8x16_t {
13258    static_assert!(INDEX >= 0 && INDEX <= 3);
13259    transmute(vluti2_laneq_s8::<INDEX>(transmute(a), b))
13260}
13261#[doc = "Lookup table read with 2-bit indices"]
13262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_p8)"]
13263#[doc = "## Safety"]
13264#[doc = "  * Neon intrinsic unsafe"]
13265#[inline(always)]
13266#[target_feature(enable = "neon,lut")]
13267#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13268#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13269#[rustc_legacy_const_generics(2)]
13270pub unsafe fn vluti2q_laneq_p8<const INDEX: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13271    static_assert!(INDEX >= 0 && INDEX <= 3);
13272    transmute(vluti2q_laneq_s8::<INDEX>(transmute(a), b))
13273}
13274#[doc = "Lookup table read with 2-bit indices"]
13275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_p16)"]
13276#[doc = "## Safety"]
13277#[doc = "  * Neon intrinsic unsafe"]
13278#[inline(always)]
13279#[target_feature(enable = "neon,lut")]
13280#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13281#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13282#[rustc_legacy_const_generics(2)]
13283pub unsafe fn vluti2_laneq_p16<const INDEX: i32>(a: poly16x4_t, b: uint8x16_t) -> poly16x8_t {
13284    static_assert!(INDEX >= 0 && INDEX <= 7);
13285    transmute(vluti2_laneq_s16::<INDEX>(transmute(a), b))
13286}
13287#[doc = "Lookup table read with 2-bit indices"]
13288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_p16)"]
13289#[doc = "## Safety"]
13290#[doc = "  * Neon intrinsic unsafe"]
13291#[inline(always)]
13292#[target_feature(enable = "neon,lut")]
13293#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13294#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13295#[rustc_legacy_const_generics(2)]
13296pub unsafe fn vluti2q_laneq_p16<const INDEX: i32>(a: poly16x8_t, b: uint8x16_t) -> poly16x8_t {
13297    static_assert!(INDEX >= 0 && INDEX <= 7);
13298    transmute(vluti2q_laneq_s16::<INDEX>(transmute(a), b))
13299}
13300#[doc = "Lookup table read with 2-bit indices"]
13301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_s8)"]
13302#[doc = "## Safety"]
13303#[doc = "  * Neon intrinsic unsafe"]
13304#[inline(always)]
13305#[target_feature(enable = "neon,lut")]
13306#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13307#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13308#[rustc_legacy_const_generics(2)]
13309pub unsafe fn vluti2_laneq_s8<const INDEX: i32>(a: int8x8_t, b: uint8x16_t) -> int8x16_t {
13310    static_assert!(INDEX >= 0 && INDEX <= 3);
13311    unsafe extern "unadjusted" {
13312        #[cfg_attr(
13313            any(target_arch = "aarch64", target_arch = "arm64ec"),
13314            link_name = "llvm.aarch64.neon.vluti2.laneq.v16i8.v8i8"
13315        )]
13316        fn _vluti2_laneq_s8(a: int8x8_t, b: uint8x16_t, n: i32) -> int8x16_t;
13317    }
13318    _vluti2_laneq_s8(a, b, INDEX)
13319}
13320#[doc = "Lookup table read with 2-bit indices"]
13321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_s8)"]
13322#[doc = "## Safety"]
13323#[doc = "  * Neon intrinsic unsafe"]
13324#[inline(always)]
13325#[target_feature(enable = "neon,lut")]
13326#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13327#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13328#[rustc_legacy_const_generics(2)]
13329pub unsafe fn vluti2q_laneq_s8<const INDEX: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13330    static_assert!(INDEX >= 0 && INDEX <= 3);
13331    unsafe extern "unadjusted" {
13332        #[cfg_attr(
13333            any(target_arch = "aarch64", target_arch = "arm64ec"),
13334            link_name = "llvm.aarch64.neon.vluti2.laneq.v16i8.v16i8"
13335        )]
13336        fn _vluti2q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13337    }
13338    _vluti2q_laneq_s8(a, b, INDEX)
13339}
13340#[doc = "Lookup table read with 2-bit indices"]
13341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_s16)"]
13342#[doc = "## Safety"]
13343#[doc = "  * Neon intrinsic unsafe"]
13344#[inline(always)]
13345#[target_feature(enable = "neon,lut")]
13346#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13347#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13348#[rustc_legacy_const_generics(2)]
13349pub unsafe fn vluti2_laneq_s16<const INDEX: i32>(a: int16x4_t, b: uint8x16_t) -> int16x8_t {
13350    static_assert!(INDEX >= 0 && INDEX <= 7);
13351    unsafe extern "unadjusted" {
13352        #[cfg_attr(
13353            any(target_arch = "aarch64", target_arch = "arm64ec"),
13354            link_name = "llvm.aarch64.neon.vluti2.laneq.v8i16.v4i16"
13355        )]
13356        fn _vluti2_laneq_s16(a: int16x4_t, b: uint8x16_t, n: i32) -> int16x8_t;
13357    }
13358    _vluti2_laneq_s16(a, b, INDEX)
13359}
13360#[doc = "Lookup table read with 2-bit indices"]
13361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_s16)"]
13362#[doc = "## Safety"]
13363#[doc = "  * Neon intrinsic unsafe"]
13364#[inline(always)]
13365#[target_feature(enable = "neon,lut")]
13366#[cfg_attr(test, assert_instr(nop, INDEX = 1))]
13367#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13368#[rustc_legacy_const_generics(2)]
13369pub unsafe fn vluti2q_laneq_s16<const INDEX: i32>(a: int16x8_t, b: uint8x16_t) -> int16x8_t {
13370    static_assert!(INDEX >= 0 && INDEX <= 7);
13371    unsafe extern "unadjusted" {
13372        #[cfg_attr(
13373            any(target_arch = "aarch64", target_arch = "arm64ec"),
13374            link_name = "llvm.aarch64.neon.vluti2.laneq.v8i16.v8i16"
13375        )]
13376        fn _vluti2q_laneq_s16(a: int16x8_t, b: uint8x16_t, n: i32) -> int16x8_t;
13377    }
13378    _vluti2q_laneq_s16(a, b, INDEX)
13379}
13380#[doc = "Lookup table read with 4-bit indices"]
13381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_f16_x2)"]
13382#[doc = "## Safety"]
13383#[doc = "  * Neon intrinsic unsafe"]
13384#[inline(always)]
13385#[target_feature(enable = "neon,lut,fp16")]
13386#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13387#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13388#[rustc_legacy_const_generics(2)]
13389pub unsafe fn vluti4q_lane_f16_x2<const LANE: i32>(a: float16x8x2_t, b: uint8x8_t) -> float16x8_t {
13390    static_assert!(LANE >= 0 && LANE <= 1);
13391    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13392}
13393#[doc = "Lookup table read with 4-bit indices"]
13394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u16_x2)"]
13395#[doc = "## Safety"]
13396#[doc = "  * Neon intrinsic unsafe"]
13397#[inline(always)]
13398#[target_feature(enable = "neon,lut")]
13399#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13400#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13401#[rustc_legacy_const_generics(2)]
13402pub unsafe fn vluti4q_lane_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x8_t) -> uint16x8_t {
13403    static_assert!(LANE >= 0 && LANE <= 1);
13404    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13405}
13406#[doc = "Lookup table read with 4-bit indices"]
13407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p16_x2)"]
13408#[doc = "## Safety"]
13409#[doc = "  * Neon intrinsic unsafe"]
13410#[inline(always)]
13411#[target_feature(enable = "neon,lut")]
13412#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13413#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13414#[rustc_legacy_const_generics(2)]
13415pub unsafe fn vluti4q_lane_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x8_t) -> poly16x8_t {
13416    static_assert!(LANE >= 0 && LANE <= 1);
13417    transmute(vluti4q_lane_s16_x2::<LANE>(transmute(a), b))
13418}
13419#[doc = "Lookup table read with 4-bit indices"]
13420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s16_x2)"]
13421#[doc = "## Safety"]
13422#[doc = "  * Neon intrinsic unsafe"]
13423#[inline(always)]
13424#[target_feature(enable = "neon,lut")]
13425#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13426#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13427#[rustc_legacy_const_generics(2)]
13428pub unsafe fn vluti4q_lane_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x8_t) -> int16x8_t {
13429    static_assert!(LANE >= 0 && LANE <= 1);
13430    unsafe extern "unadjusted" {
13431        #[cfg_attr(
13432            any(target_arch = "aarch64", target_arch = "arm64ec"),
13433            link_name = "llvm.aarch64.neon.vluti4q.lane.x2.v8i16"
13434        )]
13435        fn _vluti4q_lane_s16_x2(a: int16x8_t, a: int16x8_t, b: uint8x8_t, n: i32) -> int16x8_t;
13436    }
13437    _vluti4q_lane_s16_x2(a.0, a.1, b, LANE)
13438}
13439#[doc = "Lookup table read with 4-bit indices"]
13440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s8)"]
13441#[doc = "## Safety"]
13442#[doc = "  * Neon intrinsic unsafe"]
13443#[inline(always)]
13444#[target_feature(enable = "neon,lut")]
13445#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13446#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13447#[rustc_legacy_const_generics(2)]
13448pub unsafe fn vluti4q_lane_s8<const LANE: i32>(a: int8x16_t, b: uint8x8_t) -> int8x16_t {
13449    static_assert!(LANE == 0);
13450    unsafe extern "unadjusted" {
13451        #[cfg_attr(
13452            any(target_arch = "aarch64", target_arch = "arm64ec"),
13453            link_name = "llvm.aarch64.neon.vluti4q.lane.v8i8"
13454        )]
13455        fn _vluti4q_lane_s8(a: int8x16_t, b: uint8x8_t, n: i32) -> int8x16_t;
13456    }
13457    _vluti4q_lane_s8(a, b, LANE)
13458}
13459#[doc = "Lookup table read with 4-bit indices"]
13460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u8)"]
13461#[doc = "## Safety"]
13462#[doc = "  * Neon intrinsic unsafe"]
13463#[inline(always)]
13464#[target_feature(enable = "neon,lut")]
13465#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13466#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13467#[rustc_legacy_const_generics(2)]
13468pub unsafe fn vluti4q_lane_u8<const LANE: i32>(a: uint8x16_t, b: uint8x8_t) -> uint8x16_t {
13469    static_assert!(LANE == 0);
13470    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13471}
13472#[doc = "Lookup table read with 4-bit indices"]
13473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p8)"]
13474#[doc = "## Safety"]
13475#[doc = "  * Neon intrinsic unsafe"]
13476#[inline(always)]
13477#[target_feature(enable = "neon,lut")]
13478#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13479#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13480#[rustc_legacy_const_generics(2)]
13481pub unsafe fn vluti4q_lane_p8<const LANE: i32>(a: poly8x16_t, b: uint8x8_t) -> poly8x16_t {
13482    static_assert!(LANE == 0);
13483    transmute(vluti4q_lane_s8::<LANE>(transmute(a), b))
13484}
13485#[doc = "Lookup table read with 4-bit indices"]
13486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_f16_x2)"]
13487#[doc = "## Safety"]
13488#[doc = "  * Neon intrinsic unsafe"]
13489#[inline(always)]
13490#[target_feature(enable = "neon,lut,fp16")]
13491#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13492#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13493#[rustc_legacy_const_generics(2)]
13494pub unsafe fn vluti4q_laneq_f16_x2<const LANE: i32>(
13495    a: float16x8x2_t,
13496    b: uint8x16_t,
13497) -> float16x8_t {
13498    static_assert!(LANE >= 0 && LANE <= 3);
13499    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13500}
13501#[doc = "Lookup table read with 4-bit indices"]
13502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u16_x2)"]
13503#[doc = "## Safety"]
13504#[doc = "  * Neon intrinsic unsafe"]
13505#[inline(always)]
13506#[target_feature(enable = "neon,lut")]
13507#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13508#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13509#[rustc_legacy_const_generics(2)]
13510pub unsafe fn vluti4q_laneq_u16_x2<const LANE: i32>(a: uint16x8x2_t, b: uint8x16_t) -> uint16x8_t {
13511    static_assert!(LANE >= 0 && LANE <= 3);
13512    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13513}
13514#[doc = "Lookup table read with 4-bit indices"]
13515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p16_x2)"]
13516#[doc = "## Safety"]
13517#[doc = "  * Neon intrinsic unsafe"]
13518#[inline(always)]
13519#[target_feature(enable = "neon,lut")]
13520#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13521#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13522#[rustc_legacy_const_generics(2)]
13523pub unsafe fn vluti4q_laneq_p16_x2<const LANE: i32>(a: poly16x8x2_t, b: uint8x16_t) -> poly16x8_t {
13524    static_assert!(LANE >= 0 && LANE <= 3);
13525    transmute(vluti4q_laneq_s16_x2::<LANE>(transmute(a), b))
13526}
13527#[doc = "Lookup table read with 4-bit indices"]
13528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s16_x2)"]
13529#[doc = "## Safety"]
13530#[doc = "  * Neon intrinsic unsafe"]
13531#[inline(always)]
13532#[target_feature(enable = "neon,lut")]
13533#[cfg_attr(test, assert_instr(nop, LANE = 3))]
13534#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13535#[rustc_legacy_const_generics(2)]
13536pub unsafe fn vluti4q_laneq_s16_x2<const LANE: i32>(a: int16x8x2_t, b: uint8x16_t) -> int16x8_t {
13537    static_assert!(LANE >= 0 && LANE <= 3);
13538    unsafe extern "unadjusted" {
13539        #[cfg_attr(
13540            any(target_arch = "aarch64", target_arch = "arm64ec"),
13541            link_name = "llvm.aarch64.neon.vluti4q.laneq.x2.v8i16"
13542        )]
13543        fn _vluti4q_laneq_s16_x2(a: int16x8_t, b: int16x8_t, c: uint8x16_t, n: i32) -> int16x8_t;
13544    }
13545    _vluti4q_laneq_s16_x2(a.0, a.1, b, LANE)
13546}
13547#[doc = "Lookup table read with 4-bit indices"]
13548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s8)"]
13549#[doc = "## Safety"]
13550#[doc = "  * Neon intrinsic unsafe"]
13551#[inline(always)]
13552#[target_feature(enable = "neon,lut")]
13553#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13554#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13555#[rustc_legacy_const_generics(2)]
13556pub unsafe fn vluti4q_laneq_s8<const LANE: i32>(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
13557    static_assert!(LANE >= 0 && LANE <= 1);
13558    unsafe extern "unadjusted" {
13559        #[cfg_attr(
13560            any(target_arch = "aarch64", target_arch = "arm64ec"),
13561            link_name = "llvm.aarch64.neon.vluti4q.laneq.v16i8"
13562        )]
13563        fn _vluti4q_laneq_s8(a: int8x16_t, b: uint8x16_t, n: i32) -> int8x16_t;
13564    }
13565    _vluti4q_laneq_s8(a, b, LANE)
13566}
13567#[doc = "Lookup table read with 4-bit indices"]
13568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u8)"]
13569#[doc = "## Safety"]
13570#[doc = "  * Neon intrinsic unsafe"]
13571#[inline(always)]
13572#[target_feature(enable = "neon,lut")]
13573#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13574#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13575#[rustc_legacy_const_generics(2)]
13576pub unsafe fn vluti4q_laneq_u8<const LANE: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
13577    static_assert!(LANE >= 0 && LANE <= 1);
13578    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13579}
13580#[doc = "Lookup table read with 4-bit indices"]
13581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p8)"]
13582#[doc = "## Safety"]
13583#[doc = "  * Neon intrinsic unsafe"]
13584#[inline(always)]
13585#[target_feature(enable = "neon,lut")]
13586#[cfg_attr(test, assert_instr(nop, LANE = 0))]
13587#[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")]
13588#[rustc_legacy_const_generics(2)]
13589pub unsafe fn vluti4q_laneq_p8<const LANE: i32>(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
13590    static_assert!(LANE >= 0 && LANE <= 1);
13591    transmute(vluti4q_laneq_s8::<LANE>(transmute(a), b))
13592}
13593#[doc = "Maximum (vector)"]
13594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"]
13595#[inline(always)]
13596#[target_feature(enable = "neon")]
13597#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13598#[cfg_attr(test, assert_instr(fmax))]
13599pub fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13600    unsafe extern "unadjusted" {
13601        #[cfg_attr(
13602            any(target_arch = "aarch64", target_arch = "arm64ec"),
13603            link_name = "llvm.aarch64.neon.fmax.v1f64"
13604        )]
13605        fn _vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13606    }
13607    unsafe { _vmax_f64(a, b) }
13608}
13609#[doc = "Maximum (vector)"]
13610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"]
13611#[inline(always)]
13612#[target_feature(enable = "neon")]
13613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13614#[cfg_attr(test, assert_instr(fmax))]
13615pub fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13616    unsafe extern "unadjusted" {
13617        #[cfg_attr(
13618            any(target_arch = "aarch64", target_arch = "arm64ec"),
13619            link_name = "llvm.aarch64.neon.fmax.v2f64"
13620        )]
13621        fn _vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13622    }
13623    unsafe { _vmaxq_f64(a, b) }
13624}
13625#[doc = "Maximum (vector)"]
13626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxh_f16)"]
13627#[inline(always)]
13628#[target_feature(enable = "neon,fp16")]
13629#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13630#[cfg(not(target_arch = "arm64ec"))]
13631#[cfg_attr(test, assert_instr(fmax))]
13632pub fn vmaxh_f16(a: f16, b: f16) -> f16 {
13633    unsafe extern "unadjusted" {
13634        #[cfg_attr(
13635            any(target_arch = "aarch64", target_arch = "arm64ec"),
13636            link_name = "llvm.aarch64.neon.fmax.f16"
13637        )]
13638        fn _vmaxh_f16(a: f16, b: f16) -> f16;
13639    }
13640    unsafe { _vmaxh_f16(a, b) }
13641}
13642#[doc = "Floating-point Maximum Number (vector)"]
13643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"]
13644#[inline(always)]
13645#[target_feature(enable = "neon")]
13646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13647#[cfg_attr(test, assert_instr(fmaxnm))]
13648pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13649    unsafe { simd_fmax(a, b) }
13650}
13651#[doc = "Floating-point Maximum Number (vector)"]
13652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"]
13653#[inline(always)]
13654#[target_feature(enable = "neon")]
13655#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13656#[cfg_attr(test, assert_instr(fmaxnm))]
13657pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13658    unsafe { simd_fmax(a, b) }
13659}
13660#[doc = "Floating-point Maximum Number"]
13661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmh_f16)"]
13662#[inline(always)]
13663#[target_feature(enable = "neon,fp16")]
13664#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13665#[cfg(not(target_arch = "arm64ec"))]
13666#[cfg_attr(test, assert_instr(fmaxnm))]
13667pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 {
13668    f16::max(a, b)
13669}
13670#[doc = "Floating-point maximum number across vector"]
13671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f16)"]
13672#[inline(always)]
13673#[target_feature(enable = "neon,fp16")]
13674#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13675#[cfg(not(target_arch = "arm64ec"))]
13676#[cfg_attr(test, assert_instr(fmaxnmv))]
13677pub fn vmaxnmv_f16(a: float16x4_t) -> f16 {
13678    unsafe { simd_reduce_max(a) }
13679}
13680#[doc = "Floating-point maximum number across vector"]
13681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f16)"]
13682#[inline(always)]
13683#[target_feature(enable = "neon,fp16")]
13684#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13685#[cfg(not(target_arch = "arm64ec"))]
13686#[cfg_attr(test, assert_instr(fmaxnmv))]
13687pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 {
13688    unsafe { simd_reduce_max(a) }
13689}
13690#[doc = "Floating-point maximum number across vector"]
13691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"]
13692#[inline(always)]
13693#[target_feature(enable = "neon")]
13694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13695#[cfg_attr(test, assert_instr(fmaxnmp))]
13696pub fn vmaxnmv_f32(a: float32x2_t) -> f32 {
13697    unsafe { simd_reduce_max(a) }
13698}
13699#[doc = "Floating-point maximum number across vector"]
13700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"]
13701#[inline(always)]
13702#[target_feature(enable = "neon")]
13703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13704#[cfg_attr(test, assert_instr(fmaxnmp))]
13705pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 {
13706    unsafe { simd_reduce_max(a) }
13707}
13708#[doc = "Floating-point maximum number across vector"]
13709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"]
13710#[inline(always)]
13711#[target_feature(enable = "neon")]
13712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13713#[cfg_attr(test, assert_instr(fmaxnmv))]
13714pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 {
13715    unsafe { simd_reduce_max(a) }
13716}
13717#[doc = "Floating-point maximum number across vector"]
13718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f16)"]
13719#[inline(always)]
13720#[target_feature(enable = "neon,fp16")]
13721#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13722#[cfg(not(target_arch = "arm64ec"))]
13723#[cfg_attr(test, assert_instr(fmaxv))]
13724pub fn vmaxv_f16(a: float16x4_t) -> f16 {
13725    unsafe extern "unadjusted" {
13726        #[cfg_attr(
13727            any(target_arch = "aarch64", target_arch = "arm64ec"),
13728            link_name = "llvm.aarch64.neon.fmaxv.f16.v4f16"
13729        )]
13730        fn _vmaxv_f16(a: float16x4_t) -> f16;
13731    }
13732    unsafe { _vmaxv_f16(a) }
13733}
13734#[doc = "Floating-point maximum number across vector"]
13735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f16)"]
13736#[inline(always)]
13737#[target_feature(enable = "neon,fp16")]
13738#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13739#[cfg(not(target_arch = "arm64ec"))]
13740#[cfg_attr(test, assert_instr(fmaxv))]
13741pub fn vmaxvq_f16(a: float16x8_t) -> f16 {
13742    unsafe extern "unadjusted" {
13743        #[cfg_attr(
13744            any(target_arch = "aarch64", target_arch = "arm64ec"),
13745            link_name = "llvm.aarch64.neon.fmaxv.f16.v8f16"
13746        )]
13747        fn _vmaxvq_f16(a: float16x8_t) -> f16;
13748    }
13749    unsafe { _vmaxvq_f16(a) }
13750}
13751#[doc = "Horizontal vector max."]
13752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"]
13753#[inline(always)]
13754#[target_feature(enable = "neon")]
13755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13756#[cfg_attr(test, assert_instr(fmaxp))]
13757pub fn vmaxv_f32(a: float32x2_t) -> f32 {
13758    unsafe extern "unadjusted" {
13759        #[cfg_attr(
13760            any(target_arch = "aarch64", target_arch = "arm64ec"),
13761            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
13762        )]
13763        fn _vmaxv_f32(a: float32x2_t) -> f32;
13764    }
13765    unsafe { _vmaxv_f32(a) }
13766}
13767#[doc = "Horizontal vector max."]
13768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"]
13769#[inline(always)]
13770#[target_feature(enable = "neon")]
13771#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13772#[cfg_attr(test, assert_instr(fmaxv))]
13773pub fn vmaxvq_f32(a: float32x4_t) -> f32 {
13774    unsafe extern "unadjusted" {
13775        #[cfg_attr(
13776            any(target_arch = "aarch64", target_arch = "arm64ec"),
13777            link_name = "llvm.aarch64.neon.fmaxv.f32.v4f32"
13778        )]
13779        fn _vmaxvq_f32(a: float32x4_t) -> f32;
13780    }
13781    unsafe { _vmaxvq_f32(a) }
13782}
13783#[doc = "Horizontal vector max."]
13784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"]
13785#[inline(always)]
13786#[target_feature(enable = "neon")]
13787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13788#[cfg_attr(test, assert_instr(fmaxp))]
13789pub fn vmaxvq_f64(a: float64x2_t) -> f64 {
13790    unsafe extern "unadjusted" {
13791        #[cfg_attr(
13792            any(target_arch = "aarch64", target_arch = "arm64ec"),
13793            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
13794        )]
13795        fn _vmaxvq_f64(a: float64x2_t) -> f64;
13796    }
13797    unsafe { _vmaxvq_f64(a) }
13798}
13799#[doc = "Horizontal vector max."]
13800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"]
13801#[inline(always)]
13802#[target_feature(enable = "neon")]
13803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13804#[cfg_attr(test, assert_instr(smaxv))]
13805pub fn vmaxv_s8(a: int8x8_t) -> i8 {
13806    unsafe { simd_reduce_max(a) }
13807}
13808#[doc = "Horizontal vector max."]
13809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"]
13810#[inline(always)]
13811#[target_feature(enable = "neon")]
13812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13813#[cfg_attr(test, assert_instr(smaxv))]
13814pub fn vmaxvq_s8(a: int8x16_t) -> i8 {
13815    unsafe { simd_reduce_max(a) }
13816}
13817#[doc = "Horizontal vector max."]
13818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"]
13819#[inline(always)]
13820#[target_feature(enable = "neon")]
13821#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13822#[cfg_attr(test, assert_instr(smaxv))]
13823pub fn vmaxv_s16(a: int16x4_t) -> i16 {
13824    unsafe { simd_reduce_max(a) }
13825}
13826#[doc = "Horizontal vector max."]
13827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"]
13828#[inline(always)]
13829#[target_feature(enable = "neon")]
13830#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13831#[cfg_attr(test, assert_instr(smaxv))]
13832pub fn vmaxvq_s16(a: int16x8_t) -> i16 {
13833    unsafe { simd_reduce_max(a) }
13834}
13835#[doc = "Horizontal vector max."]
13836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"]
13837#[inline(always)]
13838#[target_feature(enable = "neon")]
13839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13840#[cfg_attr(test, assert_instr(smaxp))]
13841pub fn vmaxv_s32(a: int32x2_t) -> i32 {
13842    unsafe { simd_reduce_max(a) }
13843}
13844#[doc = "Horizontal vector max."]
13845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"]
13846#[inline(always)]
13847#[target_feature(enable = "neon")]
13848#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13849#[cfg_attr(test, assert_instr(smaxv))]
13850pub fn vmaxvq_s32(a: int32x4_t) -> i32 {
13851    unsafe { simd_reduce_max(a) }
13852}
13853#[doc = "Horizontal vector max."]
13854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"]
13855#[inline(always)]
13856#[target_feature(enable = "neon")]
13857#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13858#[cfg_attr(test, assert_instr(umaxv))]
13859pub fn vmaxv_u8(a: uint8x8_t) -> u8 {
13860    unsafe { simd_reduce_max(a) }
13861}
13862#[doc = "Horizontal vector max."]
13863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"]
13864#[inline(always)]
13865#[target_feature(enable = "neon")]
13866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13867#[cfg_attr(test, assert_instr(umaxv))]
13868pub fn vmaxvq_u8(a: uint8x16_t) -> u8 {
13869    unsafe { simd_reduce_max(a) }
13870}
13871#[doc = "Horizontal vector max."]
13872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"]
13873#[inline(always)]
13874#[target_feature(enable = "neon")]
13875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13876#[cfg_attr(test, assert_instr(umaxv))]
13877pub fn vmaxv_u16(a: uint16x4_t) -> u16 {
13878    unsafe { simd_reduce_max(a) }
13879}
13880#[doc = "Horizontal vector max."]
13881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"]
13882#[inline(always)]
13883#[target_feature(enable = "neon")]
13884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13885#[cfg_attr(test, assert_instr(umaxv))]
13886pub fn vmaxvq_u16(a: uint16x8_t) -> u16 {
13887    unsafe { simd_reduce_max(a) }
13888}
13889#[doc = "Horizontal vector max."]
13890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"]
13891#[inline(always)]
13892#[target_feature(enable = "neon")]
13893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13894#[cfg_attr(test, assert_instr(umaxp))]
13895pub fn vmaxv_u32(a: uint32x2_t) -> u32 {
13896    unsafe { simd_reduce_max(a) }
13897}
13898#[doc = "Horizontal vector max."]
13899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"]
13900#[inline(always)]
13901#[target_feature(enable = "neon")]
13902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13903#[cfg_attr(test, assert_instr(umaxv))]
13904pub fn vmaxvq_u32(a: uint32x4_t) -> u32 {
13905    unsafe { simd_reduce_max(a) }
13906}
13907#[doc = "Minimum (vector)"]
13908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"]
13909#[inline(always)]
13910#[target_feature(enable = "neon")]
13911#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13912#[cfg_attr(test, assert_instr(fmin))]
13913pub fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13914    unsafe extern "unadjusted" {
13915        #[cfg_attr(
13916            any(target_arch = "aarch64", target_arch = "arm64ec"),
13917            link_name = "llvm.aarch64.neon.fmin.v1f64"
13918        )]
13919        fn _vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
13920    }
13921    unsafe { _vmin_f64(a, b) }
13922}
13923#[doc = "Minimum (vector)"]
13924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"]
13925#[inline(always)]
13926#[target_feature(enable = "neon")]
13927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13928#[cfg_attr(test, assert_instr(fmin))]
13929pub fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13930    unsafe extern "unadjusted" {
13931        #[cfg_attr(
13932            any(target_arch = "aarch64", target_arch = "arm64ec"),
13933            link_name = "llvm.aarch64.neon.fmin.v2f64"
13934        )]
13935        fn _vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
13936    }
13937    unsafe { _vminq_f64(a, b) }
13938}
13939#[doc = "Minimum (vector)"]
13940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminh_f16)"]
13941#[inline(always)]
13942#[target_feature(enable = "neon,fp16")]
13943#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13944#[cfg(not(target_arch = "arm64ec"))]
13945#[cfg_attr(test, assert_instr(fmin))]
13946pub fn vminh_f16(a: f16, b: f16) -> f16 {
13947    unsafe extern "unadjusted" {
13948        #[cfg_attr(
13949            any(target_arch = "aarch64", target_arch = "arm64ec"),
13950            link_name = "llvm.aarch64.neon.fmin.f16"
13951        )]
13952        fn _vminh_f16(a: f16, b: f16) -> f16;
13953    }
13954    unsafe { _vminh_f16(a, b) }
13955}
13956#[doc = "Floating-point Minimum Number (vector)"]
13957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"]
13958#[inline(always)]
13959#[target_feature(enable = "neon")]
13960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13961#[cfg_attr(test, assert_instr(fminnm))]
13962pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
13963    unsafe { simd_fmin(a, b) }
13964}
13965#[doc = "Floating-point Minimum Number (vector)"]
13966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"]
13967#[inline(always)]
13968#[target_feature(enable = "neon")]
13969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
13970#[cfg_attr(test, assert_instr(fminnm))]
13971pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
13972    unsafe { simd_fmin(a, b) }
13973}
13974#[doc = "Floating-point Minimum Number"]
13975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmh_f16)"]
13976#[inline(always)]
13977#[target_feature(enable = "neon,fp16")]
13978#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13979#[cfg(not(target_arch = "arm64ec"))]
13980#[cfg_attr(test, assert_instr(fminnm))]
13981pub fn vminnmh_f16(a: f16, b: f16) -> f16 {
13982    f16::min(a, b)
13983}
13984#[doc = "Floating-point minimum number across vector"]
13985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f16)"]
13986#[inline(always)]
13987#[target_feature(enable = "neon,fp16")]
13988#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13989#[cfg(not(target_arch = "arm64ec"))]
13990#[cfg_attr(test, assert_instr(fminnmv))]
13991pub fn vminnmv_f16(a: float16x4_t) -> f16 {
13992    unsafe { simd_reduce_min(a) }
13993}
13994#[doc = "Floating-point minimum number across vector"]
13995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f16)"]
13996#[inline(always)]
13997#[target_feature(enable = "neon,fp16")]
13998#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
13999#[cfg(not(target_arch = "arm64ec"))]
14000#[cfg_attr(test, assert_instr(fminnmv))]
14001pub fn vminnmvq_f16(a: float16x8_t) -> f16 {
14002    unsafe { simd_reduce_min(a) }
14003}
14004#[doc = "Floating-point minimum number across vector"]
14005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"]
14006#[inline(always)]
14007#[target_feature(enable = "neon")]
14008#[cfg_attr(test, assert_instr(fminnmp))]
14009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14010pub fn vminnmv_f32(a: float32x2_t) -> f32 {
14011    unsafe { simd_reduce_min(a) }
14012}
14013#[doc = "Floating-point minimum number across vector"]
14014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"]
14015#[inline(always)]
14016#[target_feature(enable = "neon")]
14017#[cfg_attr(test, assert_instr(fminnmp))]
14018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14019pub fn vminnmvq_f64(a: float64x2_t) -> f64 {
14020    unsafe { simd_reduce_min(a) }
14021}
14022#[doc = "Floating-point minimum number across vector"]
14023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"]
14024#[inline(always)]
14025#[target_feature(enable = "neon")]
14026#[cfg_attr(test, assert_instr(fminnmv))]
14027#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14028pub fn vminnmvq_f32(a: float32x4_t) -> f32 {
14029    unsafe { simd_reduce_min(a) }
14030}
14031#[doc = "Floating-point minimum number across vector"]
14032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f16)"]
14033#[inline(always)]
14034#[target_feature(enable = "neon,fp16")]
14035#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14036#[cfg(not(target_arch = "arm64ec"))]
14037#[cfg_attr(test, assert_instr(fminv))]
14038pub fn vminv_f16(a: float16x4_t) -> f16 {
14039    unsafe extern "unadjusted" {
14040        #[cfg_attr(
14041            any(target_arch = "aarch64", target_arch = "arm64ec"),
14042            link_name = "llvm.aarch64.neon.fminv.f16.v4f16"
14043        )]
14044        fn _vminv_f16(a: float16x4_t) -> f16;
14045    }
14046    unsafe { _vminv_f16(a) }
14047}
14048#[doc = "Floating-point minimum number across vector"]
14049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f16)"]
14050#[inline(always)]
14051#[target_feature(enable = "neon,fp16")]
14052#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
14053#[cfg(not(target_arch = "arm64ec"))]
14054#[cfg_attr(test, assert_instr(fminv))]
14055pub fn vminvq_f16(a: float16x8_t) -> f16 {
14056    unsafe extern "unadjusted" {
14057        #[cfg_attr(
14058            any(target_arch = "aarch64", target_arch = "arm64ec"),
14059            link_name = "llvm.aarch64.neon.fminv.f16.v8f16"
14060        )]
14061        fn _vminvq_f16(a: float16x8_t) -> f16;
14062    }
14063    unsafe { _vminvq_f16(a) }
14064}
14065#[doc = "Horizontal vector min."]
14066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"]
14067#[inline(always)]
14068#[target_feature(enable = "neon")]
14069#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14070#[cfg_attr(test, assert_instr(fminp))]
14071pub fn vminv_f32(a: float32x2_t) -> f32 {
14072    unsafe extern "unadjusted" {
14073        #[cfg_attr(
14074            any(target_arch = "aarch64", target_arch = "arm64ec"),
14075            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
14076        )]
14077        fn _vminv_f32(a: float32x2_t) -> f32;
14078    }
14079    unsafe { _vminv_f32(a) }
14080}
14081#[doc = "Horizontal vector min."]
14082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"]
14083#[inline(always)]
14084#[target_feature(enable = "neon")]
14085#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14086#[cfg_attr(test, assert_instr(fminv))]
14087pub fn vminvq_f32(a: float32x4_t) -> f32 {
14088    unsafe extern "unadjusted" {
14089        #[cfg_attr(
14090            any(target_arch = "aarch64", target_arch = "arm64ec"),
14091            link_name = "llvm.aarch64.neon.fminv.f32.v4f32"
14092        )]
14093        fn _vminvq_f32(a: float32x4_t) -> f32;
14094    }
14095    unsafe { _vminvq_f32(a) }
14096}
14097#[doc = "Horizontal vector min."]
14098#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"]
14099#[inline(always)]
14100#[target_feature(enable = "neon")]
14101#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14102#[cfg_attr(test, assert_instr(fminp))]
14103pub fn vminvq_f64(a: float64x2_t) -> f64 {
14104    unsafe extern "unadjusted" {
14105        #[cfg_attr(
14106            any(target_arch = "aarch64", target_arch = "arm64ec"),
14107            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
14108        )]
14109        fn _vminvq_f64(a: float64x2_t) -> f64;
14110    }
14111    unsafe { _vminvq_f64(a) }
14112}
14113#[doc = "Horizontal vector min."]
14114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"]
14115#[inline(always)]
14116#[target_feature(enable = "neon")]
14117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14118#[cfg_attr(test, assert_instr(sminv))]
14119pub fn vminv_s8(a: int8x8_t) -> i8 {
14120    unsafe { simd_reduce_min(a) }
14121}
14122#[doc = "Horizontal vector min."]
14123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"]
14124#[inline(always)]
14125#[target_feature(enable = "neon")]
14126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14127#[cfg_attr(test, assert_instr(sminv))]
14128pub fn vminvq_s8(a: int8x16_t) -> i8 {
14129    unsafe { simd_reduce_min(a) }
14130}
14131#[doc = "Horizontal vector min."]
14132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"]
14133#[inline(always)]
14134#[target_feature(enable = "neon")]
14135#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14136#[cfg_attr(test, assert_instr(sminv))]
14137pub fn vminv_s16(a: int16x4_t) -> i16 {
14138    unsafe { simd_reduce_min(a) }
14139}
14140#[doc = "Horizontal vector min."]
14141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"]
14142#[inline(always)]
14143#[target_feature(enable = "neon")]
14144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14145#[cfg_attr(test, assert_instr(sminv))]
14146pub fn vminvq_s16(a: int16x8_t) -> i16 {
14147    unsafe { simd_reduce_min(a) }
14148}
14149#[doc = "Horizontal vector min."]
14150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"]
14151#[inline(always)]
14152#[target_feature(enable = "neon")]
14153#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14154#[cfg_attr(test, assert_instr(sminp))]
14155pub fn vminv_s32(a: int32x2_t) -> i32 {
14156    unsafe { simd_reduce_min(a) }
14157}
14158#[doc = "Horizontal vector min."]
14159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"]
14160#[inline(always)]
14161#[target_feature(enable = "neon")]
14162#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14163#[cfg_attr(test, assert_instr(sminv))]
14164pub fn vminvq_s32(a: int32x4_t) -> i32 {
14165    unsafe { simd_reduce_min(a) }
14166}
14167#[doc = "Horizontal vector min."]
14168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"]
14169#[inline(always)]
14170#[target_feature(enable = "neon")]
14171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14172#[cfg_attr(test, assert_instr(uminv))]
14173pub fn vminv_u8(a: uint8x8_t) -> u8 {
14174    unsafe { simd_reduce_min(a) }
14175}
14176#[doc = "Horizontal vector min."]
14177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"]
14178#[inline(always)]
14179#[target_feature(enable = "neon")]
14180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14181#[cfg_attr(test, assert_instr(uminv))]
14182pub fn vminvq_u8(a: uint8x16_t) -> u8 {
14183    unsafe { simd_reduce_min(a) }
14184}
14185#[doc = "Horizontal vector min."]
14186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"]
14187#[inline(always)]
14188#[target_feature(enable = "neon")]
14189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14190#[cfg_attr(test, assert_instr(uminv))]
14191pub fn vminv_u16(a: uint16x4_t) -> u16 {
14192    unsafe { simd_reduce_min(a) }
14193}
14194#[doc = "Horizontal vector min."]
14195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"]
14196#[inline(always)]
14197#[target_feature(enable = "neon")]
14198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14199#[cfg_attr(test, assert_instr(uminv))]
14200pub fn vminvq_u16(a: uint16x8_t) -> u16 {
14201    unsafe { simd_reduce_min(a) }
14202}
14203#[doc = "Horizontal vector min."]
14204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"]
14205#[inline(always)]
14206#[target_feature(enable = "neon")]
14207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14208#[cfg_attr(test, assert_instr(uminp))]
14209pub fn vminv_u32(a: uint32x2_t) -> u32 {
14210    unsafe { simd_reduce_min(a) }
14211}
14212#[doc = "Horizontal vector min."]
14213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"]
14214#[inline(always)]
14215#[target_feature(enable = "neon")]
14216#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14217#[cfg_attr(test, assert_instr(uminv))]
14218pub fn vminvq_u32(a: uint32x4_t) -> u32 {
14219    unsafe { simd_reduce_min(a) }
14220}
14221#[doc = "Floating-point multiply-add to accumulator"]
14222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"]
14223#[inline(always)]
14224#[target_feature(enable = "neon")]
14225#[cfg_attr(test, assert_instr(fmul))]
14226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14227pub fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14228    unsafe { simd_add(a, simd_mul(b, c)) }
14229}
14230#[doc = "Floating-point multiply-add to accumulator"]
14231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"]
14232#[inline(always)]
14233#[target_feature(enable = "neon")]
14234#[cfg_attr(test, assert_instr(fmul))]
14235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14236pub fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14237    unsafe { simd_add(a, simd_mul(b, c)) }
14238}
14239#[doc = "Multiply-add long"]
14240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"]
14241#[inline(always)]
14242#[target_feature(enable = "neon")]
14243#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14244#[rustc_legacy_const_generics(3)]
14245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14246pub fn vmlal_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14247    static_assert_uimm_bits!(LANE, 2);
14248    unsafe {
14249        vmlal_high_s16(
14250            a,
14251            b,
14252            simd_shuffle!(
14253                c,
14254                c,
14255                [
14256                    LANE as u32,
14257                    LANE as u32,
14258                    LANE as u32,
14259                    LANE as u32,
14260                    LANE as u32,
14261                    LANE as u32,
14262                    LANE as u32,
14263                    LANE as u32
14264                ]
14265            ),
14266        )
14267    }
14268}
14269#[doc = "Multiply-add long"]
14270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"]
14271#[inline(always)]
14272#[target_feature(enable = "neon")]
14273#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14274#[rustc_legacy_const_generics(3)]
14275#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14276pub fn vmlal_high_laneq_s16<const LANE: i32>(
14277    a: int32x4_t,
14278    b: int16x8_t,
14279    c: int16x8_t,
14280) -> int32x4_t {
14281    static_assert_uimm_bits!(LANE, 3);
14282    unsafe {
14283        vmlal_high_s16(
14284            a,
14285            b,
14286            simd_shuffle!(
14287                c,
14288                c,
14289                [
14290                    LANE as u32,
14291                    LANE as u32,
14292                    LANE as u32,
14293                    LANE as u32,
14294                    LANE as u32,
14295                    LANE as u32,
14296                    LANE as u32,
14297                    LANE as u32
14298                ]
14299            ),
14300        )
14301    }
14302}
14303#[doc = "Multiply-add long"]
14304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"]
14305#[inline(always)]
14306#[target_feature(enable = "neon")]
14307#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14308#[rustc_legacy_const_generics(3)]
14309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14310pub fn vmlal_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14311    static_assert_uimm_bits!(LANE, 1);
14312    unsafe {
14313        vmlal_high_s32(
14314            a,
14315            b,
14316            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14317        )
14318    }
14319}
14320#[doc = "Multiply-add long"]
14321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"]
14322#[inline(always)]
14323#[target_feature(enable = "neon")]
14324#[cfg_attr(test, assert_instr(smlal2, LANE = 1))]
14325#[rustc_legacy_const_generics(3)]
14326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14327pub fn vmlal_high_laneq_s32<const LANE: i32>(
14328    a: int64x2_t,
14329    b: int32x4_t,
14330    c: int32x4_t,
14331) -> int64x2_t {
14332    static_assert_uimm_bits!(LANE, 2);
14333    unsafe {
14334        vmlal_high_s32(
14335            a,
14336            b,
14337            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14338        )
14339    }
14340}
14341#[doc = "Multiply-add long"]
14342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"]
14343#[inline(always)]
14344#[target_feature(enable = "neon")]
14345#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14346#[rustc_legacy_const_generics(3)]
14347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14348pub fn vmlal_high_lane_u16<const LANE: i32>(
14349    a: uint32x4_t,
14350    b: uint16x8_t,
14351    c: uint16x4_t,
14352) -> uint32x4_t {
14353    static_assert_uimm_bits!(LANE, 2);
14354    unsafe {
14355        vmlal_high_u16(
14356            a,
14357            b,
14358            simd_shuffle!(
14359                c,
14360                c,
14361                [
14362                    LANE as u32,
14363                    LANE as u32,
14364                    LANE as u32,
14365                    LANE as u32,
14366                    LANE as u32,
14367                    LANE as u32,
14368                    LANE as u32,
14369                    LANE as u32
14370                ]
14371            ),
14372        )
14373    }
14374}
14375#[doc = "Multiply-add long"]
14376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"]
14377#[inline(always)]
14378#[target_feature(enable = "neon")]
14379#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14380#[rustc_legacy_const_generics(3)]
14381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14382pub fn vmlal_high_laneq_u16<const LANE: i32>(
14383    a: uint32x4_t,
14384    b: uint16x8_t,
14385    c: uint16x8_t,
14386) -> uint32x4_t {
14387    static_assert_uimm_bits!(LANE, 3);
14388    unsafe {
14389        vmlal_high_u16(
14390            a,
14391            b,
14392            simd_shuffle!(
14393                c,
14394                c,
14395                [
14396                    LANE as u32,
14397                    LANE as u32,
14398                    LANE as u32,
14399                    LANE as u32,
14400                    LANE as u32,
14401                    LANE as u32,
14402                    LANE as u32,
14403                    LANE as u32
14404                ]
14405            ),
14406        )
14407    }
14408}
14409#[doc = "Multiply-add long"]
14410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"]
14411#[inline(always)]
14412#[target_feature(enable = "neon")]
14413#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14414#[rustc_legacy_const_generics(3)]
14415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14416pub fn vmlal_high_lane_u32<const LANE: i32>(
14417    a: uint64x2_t,
14418    b: uint32x4_t,
14419    c: uint32x2_t,
14420) -> uint64x2_t {
14421    static_assert_uimm_bits!(LANE, 1);
14422    unsafe {
14423        vmlal_high_u32(
14424            a,
14425            b,
14426            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14427        )
14428    }
14429}
14430#[doc = "Multiply-add long"]
14431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"]
14432#[inline(always)]
14433#[target_feature(enable = "neon")]
14434#[cfg_attr(test, assert_instr(umlal2, LANE = 1))]
14435#[rustc_legacy_const_generics(3)]
14436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14437pub fn vmlal_high_laneq_u32<const LANE: i32>(
14438    a: uint64x2_t,
14439    b: uint32x4_t,
14440    c: uint32x4_t,
14441) -> uint64x2_t {
14442    static_assert_uimm_bits!(LANE, 2);
14443    unsafe {
14444        vmlal_high_u32(
14445            a,
14446            b,
14447            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14448        )
14449    }
14450}
14451#[doc = "Multiply-add long"]
14452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"]
14453#[inline(always)]
14454#[target_feature(enable = "neon")]
14455#[cfg_attr(test, assert_instr(smlal2))]
14456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14457pub fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14458    vmlal_high_s16(a, b, vdupq_n_s16(c))
14459}
14460#[doc = "Multiply-add long"]
14461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"]
14462#[inline(always)]
14463#[target_feature(enable = "neon")]
14464#[cfg_attr(test, assert_instr(smlal2))]
14465#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14466pub fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14467    vmlal_high_s32(a, b, vdupq_n_s32(c))
14468}
14469#[doc = "Multiply-add long"]
14470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"]
14471#[inline(always)]
14472#[target_feature(enable = "neon")]
14473#[cfg_attr(test, assert_instr(umlal2))]
14474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14475pub fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14476    vmlal_high_u16(a, b, vdupq_n_u16(c))
14477}
14478#[doc = "Multiply-add long"]
14479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"]
14480#[inline(always)]
14481#[target_feature(enable = "neon")]
14482#[cfg_attr(test, assert_instr(umlal2))]
14483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14484pub fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14485    vmlal_high_u32(a, b, vdupq_n_u32(c))
14486}
14487#[doc = "Signed multiply-add long"]
14488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"]
14489#[inline(always)]
14490#[target_feature(enable = "neon")]
14491#[cfg_attr(test, assert_instr(smlal2))]
14492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14493pub fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14494    unsafe {
14495        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14496        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14497        vmlal_s8(a, b, c)
14498    }
14499}
14500#[doc = "Signed multiply-add long"]
14501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"]
14502#[inline(always)]
14503#[target_feature(enable = "neon")]
14504#[cfg_attr(test, assert_instr(smlal2))]
14505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14506pub fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14507    unsafe {
14508        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14509        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14510        vmlal_s16(a, b, c)
14511    }
14512}
14513#[doc = "Signed multiply-add long"]
14514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"]
14515#[inline(always)]
14516#[target_feature(enable = "neon")]
14517#[cfg_attr(test, assert_instr(smlal2))]
14518#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14519pub fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14520    unsafe {
14521        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14522        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14523        vmlal_s32(a, b, c)
14524    }
14525}
14526#[doc = "Unsigned multiply-add long"]
14527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"]
14528#[inline(always)]
14529#[target_feature(enable = "neon")]
14530#[cfg_attr(test, assert_instr(umlal2))]
14531#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14532pub fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14533    unsafe {
14534        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14535        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14536        vmlal_u8(a, b, c)
14537    }
14538}
14539#[doc = "Unsigned multiply-add long"]
14540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"]
14541#[inline(always)]
14542#[target_feature(enable = "neon")]
14543#[cfg_attr(test, assert_instr(umlal2))]
14544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14545pub fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14546    unsafe {
14547        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14548        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14549        vmlal_u16(a, b, c)
14550    }
14551}
14552#[doc = "Unsigned multiply-add long"]
14553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"]
14554#[inline(always)]
14555#[target_feature(enable = "neon")]
14556#[cfg_attr(test, assert_instr(umlal2))]
14557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14558pub fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14559    unsafe {
14560        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14561        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14562        vmlal_u32(a, b, c)
14563    }
14564}
14565#[doc = "Floating-point multiply-subtract from accumulator"]
14566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"]
14567#[inline(always)]
14568#[target_feature(enable = "neon")]
14569#[cfg_attr(test, assert_instr(fmul))]
14570#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14571pub fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t {
14572    unsafe { simd_sub(a, simd_mul(b, c)) }
14573}
14574#[doc = "Floating-point multiply-subtract from accumulator"]
14575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"]
14576#[inline(always)]
14577#[target_feature(enable = "neon")]
14578#[cfg_attr(test, assert_instr(fmul))]
14579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14580pub fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t {
14581    unsafe { simd_sub(a, simd_mul(b, c)) }
14582}
14583#[doc = "Multiply-subtract long"]
14584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"]
14585#[inline(always)]
14586#[target_feature(enable = "neon")]
14587#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14588#[rustc_legacy_const_generics(3)]
14589#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14590pub fn vmlsl_high_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
14591    static_assert_uimm_bits!(LANE, 2);
14592    unsafe {
14593        vmlsl_high_s16(
14594            a,
14595            b,
14596            simd_shuffle!(
14597                c,
14598                c,
14599                [
14600                    LANE as u32,
14601                    LANE as u32,
14602                    LANE as u32,
14603                    LANE as u32,
14604                    LANE as u32,
14605                    LANE as u32,
14606                    LANE as u32,
14607                    LANE as u32
14608                ]
14609            ),
14610        )
14611    }
14612}
14613#[doc = "Multiply-subtract long"]
14614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"]
14615#[inline(always)]
14616#[target_feature(enable = "neon")]
14617#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14618#[rustc_legacy_const_generics(3)]
14619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14620pub fn vmlsl_high_laneq_s16<const LANE: i32>(
14621    a: int32x4_t,
14622    b: int16x8_t,
14623    c: int16x8_t,
14624) -> int32x4_t {
14625    static_assert_uimm_bits!(LANE, 3);
14626    unsafe {
14627        vmlsl_high_s16(
14628            a,
14629            b,
14630            simd_shuffle!(
14631                c,
14632                c,
14633                [
14634                    LANE as u32,
14635                    LANE as u32,
14636                    LANE as u32,
14637                    LANE as u32,
14638                    LANE as u32,
14639                    LANE as u32,
14640                    LANE as u32,
14641                    LANE as u32
14642                ]
14643            ),
14644        )
14645    }
14646}
14647#[doc = "Multiply-subtract long"]
14648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"]
14649#[inline(always)]
14650#[target_feature(enable = "neon")]
14651#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14652#[rustc_legacy_const_generics(3)]
14653#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14654pub fn vmlsl_high_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
14655    static_assert_uimm_bits!(LANE, 1);
14656    unsafe {
14657        vmlsl_high_s32(
14658            a,
14659            b,
14660            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14661        )
14662    }
14663}
14664#[doc = "Multiply-subtract long"]
14665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"]
14666#[inline(always)]
14667#[target_feature(enable = "neon")]
14668#[cfg_attr(test, assert_instr(smlsl2, LANE = 1))]
14669#[rustc_legacy_const_generics(3)]
14670#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14671pub fn vmlsl_high_laneq_s32<const LANE: i32>(
14672    a: int64x2_t,
14673    b: int32x4_t,
14674    c: int32x4_t,
14675) -> int64x2_t {
14676    static_assert_uimm_bits!(LANE, 2);
14677    unsafe {
14678        vmlsl_high_s32(
14679            a,
14680            b,
14681            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14682        )
14683    }
14684}
14685#[doc = "Multiply-subtract long"]
14686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"]
14687#[inline(always)]
14688#[target_feature(enable = "neon")]
14689#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14690#[rustc_legacy_const_generics(3)]
14691#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14692pub fn vmlsl_high_lane_u16<const LANE: i32>(
14693    a: uint32x4_t,
14694    b: uint16x8_t,
14695    c: uint16x4_t,
14696) -> uint32x4_t {
14697    static_assert_uimm_bits!(LANE, 2);
14698    unsafe {
14699        vmlsl_high_u16(
14700            a,
14701            b,
14702            simd_shuffle!(
14703                c,
14704                c,
14705                [
14706                    LANE as u32,
14707                    LANE as u32,
14708                    LANE as u32,
14709                    LANE as u32,
14710                    LANE as u32,
14711                    LANE as u32,
14712                    LANE as u32,
14713                    LANE as u32
14714                ]
14715            ),
14716        )
14717    }
14718}
14719#[doc = "Multiply-subtract long"]
14720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"]
14721#[inline(always)]
14722#[target_feature(enable = "neon")]
14723#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14724#[rustc_legacy_const_generics(3)]
14725#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14726pub fn vmlsl_high_laneq_u16<const LANE: i32>(
14727    a: uint32x4_t,
14728    b: uint16x8_t,
14729    c: uint16x8_t,
14730) -> uint32x4_t {
14731    static_assert_uimm_bits!(LANE, 3);
14732    unsafe {
14733        vmlsl_high_u16(
14734            a,
14735            b,
14736            simd_shuffle!(
14737                c,
14738                c,
14739                [
14740                    LANE as u32,
14741                    LANE as u32,
14742                    LANE as u32,
14743                    LANE as u32,
14744                    LANE as u32,
14745                    LANE as u32,
14746                    LANE as u32,
14747                    LANE as u32
14748                ]
14749            ),
14750        )
14751    }
14752}
14753#[doc = "Multiply-subtract long"]
14754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"]
14755#[inline(always)]
14756#[target_feature(enable = "neon")]
14757#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14758#[rustc_legacy_const_generics(3)]
14759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14760pub fn vmlsl_high_lane_u32<const LANE: i32>(
14761    a: uint64x2_t,
14762    b: uint32x4_t,
14763    c: uint32x2_t,
14764) -> uint64x2_t {
14765    static_assert_uimm_bits!(LANE, 1);
14766    unsafe {
14767        vmlsl_high_u32(
14768            a,
14769            b,
14770            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14771        )
14772    }
14773}
14774#[doc = "Multiply-subtract long"]
14775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"]
14776#[inline(always)]
14777#[target_feature(enable = "neon")]
14778#[cfg_attr(test, assert_instr(umlsl2, LANE = 1))]
14779#[rustc_legacy_const_generics(3)]
14780#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14781pub fn vmlsl_high_laneq_u32<const LANE: i32>(
14782    a: uint64x2_t,
14783    b: uint32x4_t,
14784    c: uint32x4_t,
14785) -> uint64x2_t {
14786    static_assert_uimm_bits!(LANE, 2);
14787    unsafe {
14788        vmlsl_high_u32(
14789            a,
14790            b,
14791            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
14792        )
14793    }
14794}
14795#[doc = "Multiply-subtract long"]
14796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"]
14797#[inline(always)]
14798#[target_feature(enable = "neon")]
14799#[cfg_attr(test, assert_instr(smlsl2))]
14800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14801pub fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
14802    vmlsl_high_s16(a, b, vdupq_n_s16(c))
14803}
14804#[doc = "Multiply-subtract long"]
14805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"]
14806#[inline(always)]
14807#[target_feature(enable = "neon")]
14808#[cfg_attr(test, assert_instr(smlsl2))]
14809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14810pub fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
14811    vmlsl_high_s32(a, b, vdupq_n_s32(c))
14812}
14813#[doc = "Multiply-subtract long"]
14814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"]
14815#[inline(always)]
14816#[target_feature(enable = "neon")]
14817#[cfg_attr(test, assert_instr(umlsl2))]
14818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14819pub fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t {
14820    vmlsl_high_u16(a, b, vdupq_n_u16(c))
14821}
14822#[doc = "Multiply-subtract long"]
14823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"]
14824#[inline(always)]
14825#[target_feature(enable = "neon")]
14826#[cfg_attr(test, assert_instr(umlsl2))]
14827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14828pub fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t {
14829    vmlsl_high_u32(a, b, vdupq_n_u32(c))
14830}
14831#[doc = "Signed multiply-subtract long"]
14832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"]
14833#[inline(always)]
14834#[target_feature(enable = "neon")]
14835#[cfg_attr(test, assert_instr(smlsl2))]
14836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14837pub fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t {
14838    unsafe {
14839        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14840        let c: int8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14841        vmlsl_s8(a, b, c)
14842    }
14843}
14844#[doc = "Signed multiply-subtract long"]
14845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"]
14846#[inline(always)]
14847#[target_feature(enable = "neon")]
14848#[cfg_attr(test, assert_instr(smlsl2))]
14849#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14850pub fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
14851    unsafe {
14852        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14853        let c: int16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14854        vmlsl_s16(a, b, c)
14855    }
14856}
14857#[doc = "Signed multiply-subtract long"]
14858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"]
14859#[inline(always)]
14860#[target_feature(enable = "neon")]
14861#[cfg_attr(test, assert_instr(smlsl2))]
14862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14863pub fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
14864    unsafe {
14865        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
14866        let c: int32x2_t = simd_shuffle!(c, c, [2, 3]);
14867        vmlsl_s32(a, b, c)
14868    }
14869}
14870#[doc = "Unsigned multiply-subtract long"]
14871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"]
14872#[inline(always)]
14873#[target_feature(enable = "neon")]
14874#[cfg_attr(test, assert_instr(umlsl2))]
14875#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14876pub fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t {
14877    unsafe {
14878        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
14879        let c: uint8x8_t = simd_shuffle!(c, c, [8, 9, 10, 11, 12, 13, 14, 15]);
14880        vmlsl_u8(a, b, c)
14881    }
14882}
14883#[doc = "Unsigned multiply-subtract long"]
14884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"]
14885#[inline(always)]
14886#[target_feature(enable = "neon")]
14887#[cfg_attr(test, assert_instr(umlsl2))]
14888#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14889pub fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t {
14890    unsafe {
14891        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
14892        let c: uint16x4_t = simd_shuffle!(c, c, [4, 5, 6, 7]);
14893        vmlsl_u16(a, b, c)
14894    }
14895}
14896#[doc = "Unsigned multiply-subtract long"]
14897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"]
14898#[inline(always)]
14899#[target_feature(enable = "neon")]
14900#[cfg_attr(test, assert_instr(umlsl2))]
14901#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14902pub fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t {
14903    unsafe {
14904        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
14905        let c: uint32x2_t = simd_shuffle!(c, c, [2, 3]);
14906        vmlsl_u32(a, b, c)
14907    }
14908}
14909#[doc = "Vector move"]
14910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"]
14911#[inline(always)]
14912#[target_feature(enable = "neon")]
14913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14914#[cfg_attr(test, assert_instr(sxtl2))]
14915pub fn vmovl_high_s8(a: int8x16_t) -> int16x8_t {
14916    unsafe {
14917        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14918        vmovl_s8(a)
14919    }
14920}
14921#[doc = "Vector move"]
14922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"]
14923#[inline(always)]
14924#[target_feature(enable = "neon")]
14925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14926#[cfg_attr(test, assert_instr(sxtl2))]
14927pub fn vmovl_high_s16(a: int16x8_t) -> int32x4_t {
14928    unsafe {
14929        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14930        vmovl_s16(a)
14931    }
14932}
14933#[doc = "Vector move"]
14934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"]
14935#[inline(always)]
14936#[target_feature(enable = "neon")]
14937#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14938#[cfg_attr(test, assert_instr(sxtl2))]
14939pub fn vmovl_high_s32(a: int32x4_t) -> int64x2_t {
14940    unsafe {
14941        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
14942        vmovl_s32(a)
14943    }
14944}
14945#[doc = "Vector move"]
14946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"]
14947#[inline(always)]
14948#[target_feature(enable = "neon")]
14949#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14950#[cfg_attr(test, assert_instr(uxtl2))]
14951pub fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t {
14952    unsafe {
14953        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
14954        vmovl_u8(a)
14955    }
14956}
14957#[doc = "Vector move"]
14958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"]
14959#[inline(always)]
14960#[target_feature(enable = "neon")]
14961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14962#[cfg_attr(test, assert_instr(uxtl2))]
14963pub fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t {
14964    unsafe {
14965        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
14966        vmovl_u16(a)
14967    }
14968}
14969#[doc = "Vector move"]
14970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"]
14971#[inline(always)]
14972#[target_feature(enable = "neon")]
14973#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14974#[cfg_attr(test, assert_instr(uxtl2))]
14975pub fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t {
14976    unsafe {
14977        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
14978        vmovl_u32(a)
14979    }
14980}
14981#[doc = "Extract narrow"]
14982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"]
14983#[inline(always)]
14984#[target_feature(enable = "neon")]
14985#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14986#[cfg_attr(test, assert_instr(xtn2))]
14987pub fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
14988    unsafe {
14989        let c: int8x8_t = simd_cast(b);
14990        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
14991    }
14992}
14993#[doc = "Extract narrow"]
14994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"]
14995#[inline(always)]
14996#[target_feature(enable = "neon")]
14997#[stable(feature = "neon_intrinsics", since = "1.59.0")]
14998#[cfg_attr(test, assert_instr(xtn2))]
14999pub fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
15000    unsafe {
15001        let c: int16x4_t = simd_cast(b);
15002        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
15003    }
15004}
15005#[doc = "Extract narrow"]
15006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"]
15007#[inline(always)]
15008#[target_feature(enable = "neon")]
15009#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15010#[cfg_attr(test, assert_instr(xtn2))]
15011pub fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
15012    unsafe {
15013        let c: int32x2_t = simd_cast(b);
15014        simd_shuffle!(a, c, [0, 1, 2, 3])
15015    }
15016}
15017#[doc = "Extract narrow"]
15018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"]
15019#[inline(always)]
15020#[target_feature(enable = "neon")]
15021#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15022#[cfg_attr(test, assert_instr(xtn2))]
15023pub fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
15024    unsafe {
15025        let c: uint8x8_t = simd_cast(b);
15026        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
15027    }
15028}
15029#[doc = "Extract narrow"]
15030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"]
15031#[inline(always)]
15032#[target_feature(enable = "neon")]
15033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15034#[cfg_attr(test, assert_instr(xtn2))]
15035pub fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
15036    unsafe {
15037        let c: uint16x4_t = simd_cast(b);
15038        simd_shuffle!(a, c, [0, 1, 2, 3, 4, 5, 6, 7])
15039    }
15040}
15041#[doc = "Extract narrow"]
15042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"]
15043#[inline(always)]
15044#[target_feature(enable = "neon")]
15045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15046#[cfg_attr(test, assert_instr(xtn2))]
15047pub fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
15048    unsafe {
15049        let c: uint32x2_t = simd_cast(b);
15050        simd_shuffle!(a, c, [0, 1, 2, 3])
15051    }
15052}
15053#[doc = "Multiply"]
15054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"]
15055#[inline(always)]
15056#[target_feature(enable = "neon")]
15057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15058#[cfg_attr(test, assert_instr(fmul))]
15059pub fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15060    unsafe { simd_mul(a, b) }
15061}
15062#[doc = "Multiply"]
15063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"]
15064#[inline(always)]
15065#[target_feature(enable = "neon")]
15066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15067#[cfg_attr(test, assert_instr(fmul))]
15068pub fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15069    unsafe { simd_mul(a, b) }
15070}
15071#[doc = "Floating-point multiply"]
15072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"]
15073#[inline(always)]
15074#[target_feature(enable = "neon")]
15075#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15076#[rustc_legacy_const_generics(2)]
15077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15078pub fn vmul_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15079    static_assert!(LANE == 0);
15080    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15081}
15082#[doc = "Floating-point multiply"]
15083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f16)"]
15084#[inline(always)]
15085#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15086#[rustc_legacy_const_generics(2)]
15087#[target_feature(enable = "neon,fp16")]
15088#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15089#[cfg(not(target_arch = "arm64ec"))]
15090pub fn vmul_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
15091    static_assert_uimm_bits!(LANE, 3);
15092    unsafe {
15093        simd_mul(
15094            a,
15095            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15096        )
15097    }
15098}
15099#[doc = "Floating-point multiply"]
15100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f16)"]
15101#[inline(always)]
15102#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15103#[rustc_legacy_const_generics(2)]
15104#[target_feature(enable = "neon,fp16")]
15105#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15106#[cfg(not(target_arch = "arm64ec"))]
15107pub fn vmulq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15108    static_assert_uimm_bits!(LANE, 3);
15109    unsafe {
15110        simd_mul(
15111            a,
15112            simd_shuffle!(
15113                b,
15114                b,
15115                [
15116                    LANE as u32,
15117                    LANE as u32,
15118                    LANE as u32,
15119                    LANE as u32,
15120                    LANE as u32,
15121                    LANE as u32,
15122                    LANE as u32,
15123                    LANE as u32
15124                ]
15125            ),
15126        )
15127    }
15128}
15129#[doc = "Floating-point multiply"]
15130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"]
15131#[inline(always)]
15132#[target_feature(enable = "neon")]
15133#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15134#[rustc_legacy_const_generics(2)]
15135#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15136pub fn vmul_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15137    static_assert_uimm_bits!(LANE, 1);
15138    unsafe { simd_mul(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15139}
15140#[doc = "Vector multiply by scalar"]
15141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"]
15142#[inline(always)]
15143#[target_feature(enable = "neon")]
15144#[cfg_attr(test, assert_instr(fmul))]
15145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15146pub fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t {
15147    unsafe { simd_mul(a, vdup_n_f64(b)) }
15148}
15149#[doc = "Vector multiply by scalar"]
15150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"]
15151#[inline(always)]
15152#[target_feature(enable = "neon")]
15153#[cfg_attr(test, assert_instr(fmul))]
15154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15155pub fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t {
15156    unsafe { simd_mul(a, vdupq_n_f64(b)) }
15157}
15158#[doc = "Floating-point multiply"]
15159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"]
15160#[inline(always)]
15161#[target_feature(enable = "neon")]
15162#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15163#[rustc_legacy_const_generics(2)]
15164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15165pub fn vmuld_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15166    static_assert!(LANE == 0);
15167    unsafe {
15168        let b: f64 = simd_extract!(b, LANE as u32);
15169        a * b
15170    }
15171}
15172#[doc = "Add"]
15173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_f16)"]
15174#[inline(always)]
15175#[target_feature(enable = "neon,fp16")]
15176#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15177#[cfg(not(target_arch = "arm64ec"))]
15178#[cfg_attr(test, assert_instr(fmul))]
15179pub fn vmulh_f16(a: f16, b: f16) -> f16 {
15180    a * b
15181}
15182#[doc = "Floating-point multiply"]
15183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_lane_f16)"]
15184#[inline(always)]
15185#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15186#[rustc_legacy_const_generics(2)]
15187#[target_feature(enable = "neon,fp16")]
15188#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15189#[cfg(not(target_arch = "arm64ec"))]
15190pub fn vmulh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
15191    static_assert_uimm_bits!(LANE, 2);
15192    unsafe {
15193        let b: f16 = simd_extract!(b, LANE as u32);
15194        a * b
15195    }
15196}
15197#[doc = "Floating-point multiply"]
15198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_laneq_f16)"]
15199#[inline(always)]
15200#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15201#[rustc_legacy_const_generics(2)]
15202#[target_feature(enable = "neon,fp16")]
15203#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15204#[cfg(not(target_arch = "arm64ec"))]
15205pub fn vmulh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
15206    static_assert_uimm_bits!(LANE, 3);
15207    unsafe {
15208        let b: f16 = simd_extract!(b, LANE as u32);
15209        a * b
15210    }
15211}
15212#[doc = "Multiply long"]
15213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"]
15214#[inline(always)]
15215#[target_feature(enable = "neon")]
15216#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15217#[rustc_legacy_const_generics(2)]
15218#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15219pub fn vmull_high_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
15220    static_assert_uimm_bits!(LANE, 2);
15221    unsafe {
15222        vmull_high_s16(
15223            a,
15224            simd_shuffle!(
15225                b,
15226                b,
15227                [
15228                    LANE as u32,
15229                    LANE as u32,
15230                    LANE as u32,
15231                    LANE as u32,
15232                    LANE as u32,
15233                    LANE as u32,
15234                    LANE as u32,
15235                    LANE as u32
15236                ]
15237            ),
15238        )
15239    }
15240}
15241#[doc = "Multiply long"]
15242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"]
15243#[inline(always)]
15244#[target_feature(enable = "neon")]
15245#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15246#[rustc_legacy_const_generics(2)]
15247#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15248pub fn vmull_high_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15249    static_assert_uimm_bits!(LANE, 3);
15250    unsafe {
15251        vmull_high_s16(
15252            a,
15253            simd_shuffle!(
15254                b,
15255                b,
15256                [
15257                    LANE as u32,
15258                    LANE as u32,
15259                    LANE as u32,
15260                    LANE as u32,
15261                    LANE as u32,
15262                    LANE as u32,
15263                    LANE as u32,
15264                    LANE as u32
15265                ]
15266            ),
15267        )
15268    }
15269}
15270#[doc = "Multiply long"]
15271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"]
15272#[inline(always)]
15273#[target_feature(enable = "neon")]
15274#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15275#[rustc_legacy_const_generics(2)]
15276#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15277pub fn vmull_high_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
15278    static_assert_uimm_bits!(LANE, 1);
15279    unsafe {
15280        vmull_high_s32(
15281            a,
15282            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15283        )
15284    }
15285}
15286#[doc = "Multiply long"]
15287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"]
15288#[inline(always)]
15289#[target_feature(enable = "neon")]
15290#[cfg_attr(test, assert_instr(smull2, LANE = 1))]
15291#[rustc_legacy_const_generics(2)]
15292#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15293pub fn vmull_high_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15294    static_assert_uimm_bits!(LANE, 2);
15295    unsafe {
15296        vmull_high_s32(
15297            a,
15298            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15299        )
15300    }
15301}
15302#[doc = "Multiply long"]
15303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"]
15304#[inline(always)]
15305#[target_feature(enable = "neon")]
15306#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15307#[rustc_legacy_const_generics(2)]
15308#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15309pub fn vmull_high_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint32x4_t {
15310    static_assert_uimm_bits!(LANE, 2);
15311    unsafe {
15312        vmull_high_u16(
15313            a,
15314            simd_shuffle!(
15315                b,
15316                b,
15317                [
15318                    LANE as u32,
15319                    LANE as u32,
15320                    LANE as u32,
15321                    LANE as u32,
15322                    LANE as u32,
15323                    LANE as u32,
15324                    LANE as u32,
15325                    LANE as u32
15326                ]
15327            ),
15328        )
15329    }
15330}
15331#[doc = "Multiply long"]
15332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"]
15333#[inline(always)]
15334#[target_feature(enable = "neon")]
15335#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15336#[rustc_legacy_const_generics(2)]
15337#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15338pub fn vmull_high_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15339    static_assert_uimm_bits!(LANE, 3);
15340    unsafe {
15341        vmull_high_u16(
15342            a,
15343            simd_shuffle!(
15344                b,
15345                b,
15346                [
15347                    LANE as u32,
15348                    LANE as u32,
15349                    LANE as u32,
15350                    LANE as u32,
15351                    LANE as u32,
15352                    LANE as u32,
15353                    LANE as u32,
15354                    LANE as u32
15355                ]
15356            ),
15357        )
15358    }
15359}
15360#[doc = "Multiply long"]
15361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"]
15362#[inline(always)]
15363#[target_feature(enable = "neon")]
15364#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15365#[rustc_legacy_const_generics(2)]
15366#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15367pub fn vmull_high_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint64x2_t {
15368    static_assert_uimm_bits!(LANE, 1);
15369    unsafe {
15370        vmull_high_u32(
15371            a,
15372            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15373        )
15374    }
15375}
15376#[doc = "Multiply long"]
15377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"]
15378#[inline(always)]
15379#[target_feature(enable = "neon")]
15380#[cfg_attr(test, assert_instr(umull2, LANE = 1))]
15381#[rustc_legacy_const_generics(2)]
15382#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15383pub fn vmull_high_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15384    static_assert_uimm_bits!(LANE, 2);
15385    unsafe {
15386        vmull_high_u32(
15387            a,
15388            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15389        )
15390    }
15391}
15392#[doc = "Multiply long"]
15393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"]
15394#[inline(always)]
15395#[target_feature(enable = "neon")]
15396#[cfg_attr(test, assert_instr(smull2))]
15397#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15398pub fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
15399    vmull_high_s16(a, vdupq_n_s16(b))
15400}
15401#[doc = "Multiply long"]
15402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"]
15403#[inline(always)]
15404#[target_feature(enable = "neon")]
15405#[cfg_attr(test, assert_instr(smull2))]
15406#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15407pub fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
15408    vmull_high_s32(a, vdupq_n_s32(b))
15409}
15410#[doc = "Multiply long"]
15411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"]
15412#[inline(always)]
15413#[target_feature(enable = "neon")]
15414#[cfg_attr(test, assert_instr(umull2))]
15415#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15416pub fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t {
15417    vmull_high_u16(a, vdupq_n_u16(b))
15418}
15419#[doc = "Multiply long"]
15420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"]
15421#[inline(always)]
15422#[target_feature(enable = "neon")]
15423#[cfg_attr(test, assert_instr(umull2))]
15424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15425pub fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t {
15426    vmull_high_u32(a, vdupq_n_u32(b))
15427}
15428#[doc = "Polynomial multiply long"]
15429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"]
15430#[inline(always)]
15431#[target_feature(enable = "neon,aes")]
15432#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15433#[cfg_attr(test, assert_instr(pmull2))]
15434pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 {
15435    unsafe { vmull_p64(simd_extract!(a, 1), simd_extract!(b, 1)) }
15436}
15437#[doc = "Polynomial multiply long"]
15438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"]
15439#[inline(always)]
15440#[target_feature(enable = "neon")]
15441#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15442#[cfg_attr(test, assert_instr(pmull2))]
15443pub fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t {
15444    unsafe {
15445        let a: poly8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15446        let b: poly8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15447        vmull_p8(a, b)
15448    }
15449}
15450#[doc = "Signed multiply long"]
15451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"]
15452#[inline(always)]
15453#[target_feature(enable = "neon")]
15454#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15455#[cfg_attr(test, assert_instr(smull2))]
15456pub fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
15457    unsafe {
15458        let a: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15459        let b: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15460        vmull_s8(a, b)
15461    }
15462}
15463#[doc = "Signed multiply long"]
15464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"]
15465#[inline(always)]
15466#[target_feature(enable = "neon")]
15467#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15468#[cfg_attr(test, assert_instr(smull2))]
15469pub fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
15470    unsafe {
15471        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15472        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15473        vmull_s16(a, b)
15474    }
15475}
15476#[doc = "Signed multiply long"]
15477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"]
15478#[inline(always)]
15479#[target_feature(enable = "neon")]
15480#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15481#[cfg_attr(test, assert_instr(smull2))]
15482pub fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
15483    unsafe {
15484        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
15485        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
15486        vmull_s32(a, b)
15487    }
15488}
15489#[doc = "Unsigned multiply long"]
15490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"]
15491#[inline(always)]
15492#[target_feature(enable = "neon")]
15493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15494#[cfg_attr(test, assert_instr(umull2))]
15495pub fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
15496    unsafe {
15497        let a: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
15498        let b: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
15499        vmull_u8(a, b)
15500    }
15501}
15502#[doc = "Unsigned multiply long"]
15503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"]
15504#[inline(always)]
15505#[target_feature(enable = "neon")]
15506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15507#[cfg_attr(test, assert_instr(umull2))]
15508pub fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
15509    unsafe {
15510        let a: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
15511        let b: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
15512        vmull_u16(a, b)
15513    }
15514}
15515#[doc = "Unsigned multiply long"]
15516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"]
15517#[inline(always)]
15518#[target_feature(enable = "neon")]
15519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15520#[cfg_attr(test, assert_instr(umull2))]
15521pub fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
15522    unsafe {
15523        let a: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
15524        let b: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
15525        vmull_u32(a, b)
15526    }
15527}
15528#[doc = "Polynomial multiply long"]
15529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"]
15530#[inline(always)]
15531#[target_feature(enable = "neon,aes")]
15532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15533#[cfg_attr(test, assert_instr(pmull))]
15534pub fn vmull_p64(a: p64, b: p64) -> p128 {
15535    unsafe extern "unadjusted" {
15536        #[cfg_attr(
15537            any(target_arch = "aarch64", target_arch = "arm64ec"),
15538            link_name = "llvm.aarch64.neon.pmull64"
15539        )]
15540        fn _vmull_p64(a: p64, b: p64) -> int8x16_t;
15541    }
15542    unsafe { transmute(_vmull_p64(a, b)) }
15543}
15544#[doc = "Floating-point multiply"]
15545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"]
15546#[inline(always)]
15547#[target_feature(enable = "neon")]
15548#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15549#[rustc_legacy_const_generics(2)]
15550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15551pub fn vmulq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
15552    static_assert!(LANE == 0);
15553    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15554}
15555#[doc = "Floating-point multiply"]
15556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"]
15557#[inline(always)]
15558#[target_feature(enable = "neon")]
15559#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15560#[rustc_legacy_const_generics(2)]
15561#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15562pub fn vmulq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15563    static_assert_uimm_bits!(LANE, 1);
15564    unsafe { simd_mul(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15565}
15566#[doc = "Floating-point multiply"]
15567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"]
15568#[inline(always)]
15569#[target_feature(enable = "neon")]
15570#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15571#[rustc_legacy_const_generics(2)]
15572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15573pub fn vmuls_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15574    static_assert_uimm_bits!(LANE, 1);
15575    unsafe {
15576        let b: f32 = simd_extract!(b, LANE as u32);
15577        a * b
15578    }
15579}
15580#[doc = "Floating-point multiply"]
15581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"]
15582#[inline(always)]
15583#[target_feature(enable = "neon")]
15584#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15585#[rustc_legacy_const_generics(2)]
15586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15587pub fn vmuls_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15588    static_assert_uimm_bits!(LANE, 2);
15589    unsafe {
15590        let b: f32 = simd_extract!(b, LANE as u32);
15591        a * b
15592    }
15593}
15594#[doc = "Floating-point multiply"]
15595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"]
15596#[inline(always)]
15597#[target_feature(enable = "neon")]
15598#[cfg_attr(test, assert_instr(fmul, LANE = 0))]
15599#[rustc_legacy_const_generics(2)]
15600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15601pub fn vmuld_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15602    static_assert_uimm_bits!(LANE, 1);
15603    unsafe {
15604        let b: f64 = simd_extract!(b, LANE as u32);
15605        a * b
15606    }
15607}
15608#[doc = "Floating-point multiply extended"]
15609#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f16)"]
15610#[inline(always)]
15611#[target_feature(enable = "neon,fp16")]
15612#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15613#[cfg(not(target_arch = "arm64ec"))]
15614#[cfg_attr(test, assert_instr(fmulx))]
15615pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15616    unsafe extern "unadjusted" {
15617        #[cfg_attr(
15618            any(target_arch = "aarch64", target_arch = "arm64ec"),
15619            link_name = "llvm.aarch64.neon.fmulx.v4f16"
15620        )]
15621        fn _vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
15622    }
15623    unsafe { _vmulx_f16(a, b) }
15624}
15625#[doc = "Floating-point multiply extended"]
15626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f16)"]
15627#[inline(always)]
15628#[target_feature(enable = "neon,fp16")]
15629#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15630#[cfg(not(target_arch = "arm64ec"))]
15631#[cfg_attr(test, assert_instr(fmulx))]
15632pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15633    unsafe extern "unadjusted" {
15634        #[cfg_attr(
15635            any(target_arch = "aarch64", target_arch = "arm64ec"),
15636            link_name = "llvm.aarch64.neon.fmulx.v8f16"
15637        )]
15638        fn _vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
15639    }
15640    unsafe { _vmulxq_f16(a, b) }
15641}
15642#[doc = "Floating-point multiply extended"]
15643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"]
15644#[inline(always)]
15645#[target_feature(enable = "neon")]
15646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15647#[cfg_attr(test, assert_instr(fmulx))]
15648pub fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15649    unsafe extern "unadjusted" {
15650        #[cfg_attr(
15651            any(target_arch = "aarch64", target_arch = "arm64ec"),
15652            link_name = "llvm.aarch64.neon.fmulx.v2f32"
15653        )]
15654        fn _vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
15655    }
15656    unsafe { _vmulx_f32(a, b) }
15657}
15658#[doc = "Floating-point multiply extended"]
15659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"]
15660#[inline(always)]
15661#[target_feature(enable = "neon")]
15662#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15663#[cfg_attr(test, assert_instr(fmulx))]
15664pub fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15665    unsafe extern "unadjusted" {
15666        #[cfg_attr(
15667            any(target_arch = "aarch64", target_arch = "arm64ec"),
15668            link_name = "llvm.aarch64.neon.fmulx.v4f32"
15669        )]
15670        fn _vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
15671    }
15672    unsafe { _vmulxq_f32(a, b) }
15673}
15674#[doc = "Floating-point multiply extended"]
15675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"]
15676#[inline(always)]
15677#[target_feature(enable = "neon")]
15678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15679#[cfg_attr(test, assert_instr(fmulx))]
15680pub fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15681    unsafe extern "unadjusted" {
15682        #[cfg_attr(
15683            any(target_arch = "aarch64", target_arch = "arm64ec"),
15684            link_name = "llvm.aarch64.neon.fmulx.v1f64"
15685        )]
15686        fn _vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
15687    }
15688    unsafe { _vmulx_f64(a, b) }
15689}
15690#[doc = "Floating-point multiply extended"]
15691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"]
15692#[inline(always)]
15693#[target_feature(enable = "neon")]
15694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15695#[cfg_attr(test, assert_instr(fmulx))]
15696pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15697    unsafe extern "unadjusted" {
15698        #[cfg_attr(
15699            any(target_arch = "aarch64", target_arch = "arm64ec"),
15700            link_name = "llvm.aarch64.neon.fmulx.v2f64"
15701        )]
15702        fn _vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
15703    }
15704    unsafe { _vmulxq_f64(a, b) }
15705}
15706#[doc = "Floating-point multiply extended"]
15707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f16)"]
15708#[inline(always)]
15709#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15710#[rustc_legacy_const_generics(2)]
15711#[target_feature(enable = "neon,fp16")]
15712#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15713#[cfg(not(target_arch = "arm64ec"))]
15714pub fn vmulx_lane_f16<const LANE: i32>(a: float16x4_t, b: float16x4_t) -> float16x4_t {
15715    static_assert_uimm_bits!(LANE, 2);
15716    unsafe {
15717        vmulx_f16(
15718            a,
15719            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15720        )
15721    }
15722}
15723#[doc = "Floating-point multiply extended"]
15724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f16)"]
15725#[inline(always)]
15726#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15727#[rustc_legacy_const_generics(2)]
15728#[target_feature(enable = "neon,fp16")]
15729#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15730#[cfg(not(target_arch = "arm64ec"))]
15731pub fn vmulx_laneq_f16<const LANE: i32>(a: float16x4_t, b: float16x8_t) -> float16x4_t {
15732    static_assert_uimm_bits!(LANE, 3);
15733    unsafe {
15734        vmulx_f16(
15735            a,
15736            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15737        )
15738    }
15739}
15740#[doc = "Floating-point multiply extended"]
15741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f16)"]
15742#[inline(always)]
15743#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15744#[rustc_legacy_const_generics(2)]
15745#[target_feature(enable = "neon,fp16")]
15746#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15747#[cfg(not(target_arch = "arm64ec"))]
15748pub fn vmulxq_lane_f16<const LANE: i32>(a: float16x8_t, b: float16x4_t) -> float16x8_t {
15749    static_assert_uimm_bits!(LANE, 2);
15750    unsafe {
15751        vmulxq_f16(
15752            a,
15753            simd_shuffle!(
15754                b,
15755                b,
15756                [
15757                    LANE as u32,
15758                    LANE as u32,
15759                    LANE as u32,
15760                    LANE as u32,
15761                    LANE as u32,
15762                    LANE as u32,
15763                    LANE as u32,
15764                    LANE as u32
15765                ]
15766            ),
15767        )
15768    }
15769}
15770#[doc = "Floating-point multiply extended"]
15771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f16)"]
15772#[inline(always)]
15773#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15774#[rustc_legacy_const_generics(2)]
15775#[target_feature(enable = "neon,fp16")]
15776#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
15777#[cfg(not(target_arch = "arm64ec"))]
15778pub fn vmulxq_laneq_f16<const LANE: i32>(a: float16x8_t, b: float16x8_t) -> float16x8_t {
15779    static_assert_uimm_bits!(LANE, 3);
15780    unsafe {
15781        vmulxq_f16(
15782            a,
15783            simd_shuffle!(
15784                b,
15785                b,
15786                [
15787                    LANE as u32,
15788                    LANE as u32,
15789                    LANE as u32,
15790                    LANE as u32,
15791                    LANE as u32,
15792                    LANE as u32,
15793                    LANE as u32,
15794                    LANE as u32
15795                ]
15796            ),
15797        )
15798    }
15799}
15800#[doc = "Floating-point multiply extended"]
15801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"]
15802#[inline(always)]
15803#[target_feature(enable = "neon")]
15804#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15805#[rustc_legacy_const_generics(2)]
15806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15807pub fn vmulx_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t {
15808    static_assert_uimm_bits!(LANE, 1);
15809    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15810}
15811#[doc = "Floating-point multiply extended"]
15812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"]
15813#[inline(always)]
15814#[target_feature(enable = "neon")]
15815#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15816#[rustc_legacy_const_generics(2)]
15817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15818pub fn vmulx_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t {
15819    static_assert_uimm_bits!(LANE, 2);
15820    unsafe { vmulx_f32(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15821}
15822#[doc = "Floating-point multiply extended"]
15823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"]
15824#[inline(always)]
15825#[target_feature(enable = "neon")]
15826#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15827#[rustc_legacy_const_generics(2)]
15828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15829pub fn vmulxq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t {
15830    static_assert_uimm_bits!(LANE, 1);
15831    unsafe {
15832        vmulxq_f32(
15833            a,
15834            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15835        )
15836    }
15837}
15838#[doc = "Floating-point multiply extended"]
15839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"]
15840#[inline(always)]
15841#[target_feature(enable = "neon")]
15842#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15843#[rustc_legacy_const_generics(2)]
15844#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15845pub fn vmulxq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t {
15846    static_assert_uimm_bits!(LANE, 2);
15847    unsafe {
15848        vmulxq_f32(
15849            a,
15850            simd_shuffle!(b, b, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]),
15851        )
15852    }
15853}
15854#[doc = "Floating-point multiply extended"]
15855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"]
15856#[inline(always)]
15857#[target_feature(enable = "neon")]
15858#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15859#[rustc_legacy_const_generics(2)]
15860#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15861pub fn vmulxq_laneq_f64<const LANE: i32>(a: float64x2_t, b: float64x2_t) -> float64x2_t {
15862    static_assert_uimm_bits!(LANE, 1);
15863    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
15864}
15865#[doc = "Floating-point multiply extended"]
15866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"]
15867#[inline(always)]
15868#[target_feature(enable = "neon")]
15869#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15870#[rustc_legacy_const_generics(2)]
15871#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15872pub fn vmulx_lane_f64<const LANE: i32>(a: float64x1_t, b: float64x1_t) -> float64x1_t {
15873    static_assert!(LANE == 0);
15874    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15875}
15876#[doc = "Floating-point multiply extended"]
15877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"]
15878#[inline(always)]
15879#[target_feature(enable = "neon")]
15880#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15881#[rustc_legacy_const_generics(2)]
15882#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15883pub fn vmulx_laneq_f64<const LANE: i32>(a: float64x1_t, b: float64x2_t) -> float64x1_t {
15884    static_assert_uimm_bits!(LANE, 1);
15885    unsafe { vmulx_f64(a, transmute::<f64, _>(simd_extract!(b, LANE as u32))) }
15886}
15887#[doc = "Vector multiply by scalar"]
15888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_n_f16)"]
15889#[inline(always)]
15890#[cfg_attr(test, assert_instr(fmulx))]
15891#[target_feature(enable = "neon,fp16")]
15892#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15893#[cfg(not(target_arch = "arm64ec"))]
15894pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t {
15895    vmulx_f16(a, vdup_n_f16(b))
15896}
15897#[doc = "Vector multiply by scalar"]
15898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_n_f16)"]
15899#[inline(always)]
15900#[cfg_attr(test, assert_instr(fmulx))]
15901#[target_feature(enable = "neon,fp16")]
15902#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15903#[cfg(not(target_arch = "arm64ec"))]
15904pub fn vmulxq_n_f16(a: float16x8_t, b: f16) -> float16x8_t {
15905    vmulxq_f16(a, vdupq_n_f16(b))
15906}
15907#[doc = "Floating-point multiply extended"]
15908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"]
15909#[inline(always)]
15910#[target_feature(enable = "neon")]
15911#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15912#[cfg_attr(test, assert_instr(fmulx))]
15913pub fn vmulxd_f64(a: f64, b: f64) -> f64 {
15914    unsafe extern "unadjusted" {
15915        #[cfg_attr(
15916            any(target_arch = "aarch64", target_arch = "arm64ec"),
15917            link_name = "llvm.aarch64.neon.fmulx.f64"
15918        )]
15919        fn _vmulxd_f64(a: f64, b: f64) -> f64;
15920    }
15921    unsafe { _vmulxd_f64(a, b) }
15922}
15923#[doc = "Floating-point multiply extended"]
15924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"]
15925#[inline(always)]
15926#[target_feature(enable = "neon")]
15927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15928#[cfg_attr(test, assert_instr(fmulx))]
15929pub fn vmulxs_f32(a: f32, b: f32) -> f32 {
15930    unsafe extern "unadjusted" {
15931        #[cfg_attr(
15932            any(target_arch = "aarch64", target_arch = "arm64ec"),
15933            link_name = "llvm.aarch64.neon.fmulx.f32"
15934        )]
15935        fn _vmulxs_f32(a: f32, b: f32) -> f32;
15936    }
15937    unsafe { _vmulxs_f32(a, b) }
15938}
15939#[doc = "Floating-point multiply extended"]
15940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"]
15941#[inline(always)]
15942#[target_feature(enable = "neon")]
15943#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15944#[rustc_legacy_const_generics(2)]
15945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15946pub fn vmulxd_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> f64 {
15947    static_assert!(LANE == 0);
15948    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15949}
15950#[doc = "Floating-point multiply extended"]
15951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"]
15952#[inline(always)]
15953#[target_feature(enable = "neon")]
15954#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15955#[rustc_legacy_const_generics(2)]
15956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15957pub fn vmulxd_laneq_f64<const LANE: i32>(a: f64, b: float64x2_t) -> f64 {
15958    static_assert_uimm_bits!(LANE, 1);
15959    unsafe { vmulxd_f64(a, simd_extract!(b, LANE as u32)) }
15960}
15961#[doc = "Floating-point multiply extended"]
15962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"]
15963#[inline(always)]
15964#[target_feature(enable = "neon")]
15965#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15966#[rustc_legacy_const_generics(2)]
15967#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15968pub fn vmulxs_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> f32 {
15969    static_assert_uimm_bits!(LANE, 1);
15970    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15971}
15972#[doc = "Floating-point multiply extended"]
15973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"]
15974#[inline(always)]
15975#[target_feature(enable = "neon")]
15976#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
15977#[rustc_legacy_const_generics(2)]
15978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
15979pub fn vmulxs_laneq_f32<const LANE: i32>(a: f32, b: float32x4_t) -> f32 {
15980    static_assert_uimm_bits!(LANE, 2);
15981    unsafe { vmulxs_f32(a, simd_extract!(b, LANE as u32)) }
15982}
15983#[doc = "Floating-point multiply extended"]
15984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_f16)"]
15985#[inline(always)]
15986#[target_feature(enable = "neon,fp16")]
15987#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
15988#[cfg(not(target_arch = "arm64ec"))]
15989#[cfg_attr(test, assert_instr(fmulx))]
15990pub fn vmulxh_f16(a: f16, b: f16) -> f16 {
15991    unsafe extern "unadjusted" {
15992        #[cfg_attr(
15993            any(target_arch = "aarch64", target_arch = "arm64ec"),
15994            link_name = "llvm.aarch64.neon.fmulx.f16"
15995        )]
15996        fn _vmulxh_f16(a: f16, b: f16) -> f16;
15997    }
15998    unsafe { _vmulxh_f16(a, b) }
15999}
16000#[doc = "Floating-point multiply extended"]
16001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_lane_f16)"]
16002#[inline(always)]
16003#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
16004#[rustc_legacy_const_generics(2)]
16005#[target_feature(enable = "neon,fp16")]
16006#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16007#[cfg(not(target_arch = "arm64ec"))]
16008pub fn vmulxh_lane_f16<const LANE: i32>(a: f16, b: float16x4_t) -> f16 {
16009    static_assert_uimm_bits!(LANE, 2);
16010    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
16011}
16012#[doc = "Floating-point multiply extended"]
16013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_laneq_f16)"]
16014#[inline(always)]
16015#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
16016#[rustc_legacy_const_generics(2)]
16017#[target_feature(enable = "neon,fp16")]
16018#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16019#[cfg(not(target_arch = "arm64ec"))]
16020pub fn vmulxh_laneq_f16<const LANE: i32>(a: f16, b: float16x8_t) -> f16 {
16021    static_assert_uimm_bits!(LANE, 3);
16022    unsafe { vmulxh_f16(a, simd_extract!(b, LANE as u32)) }
16023}
16024#[doc = "Floating-point multiply extended"]
16025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"]
16026#[inline(always)]
16027#[target_feature(enable = "neon")]
16028#[cfg_attr(test, assert_instr(fmulx, LANE = 0))]
16029#[rustc_legacy_const_generics(2)]
16030#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16031pub fn vmulxq_lane_f64<const LANE: i32>(a: float64x2_t, b: float64x1_t) -> float64x2_t {
16032    static_assert!(LANE == 0);
16033    unsafe { vmulxq_f64(a, simd_shuffle!(b, b, [LANE as u32, LANE as u32])) }
16034}
16035#[doc = "Negate"]
16036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"]
16037#[inline(always)]
16038#[target_feature(enable = "neon")]
16039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16040#[cfg_attr(test, assert_instr(fneg))]
16041pub fn vneg_f64(a: float64x1_t) -> float64x1_t {
16042    unsafe { simd_neg(a) }
16043}
16044#[doc = "Negate"]
16045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"]
16046#[inline(always)]
16047#[target_feature(enable = "neon")]
16048#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16049#[cfg_attr(test, assert_instr(fneg))]
16050pub fn vnegq_f64(a: float64x2_t) -> float64x2_t {
16051    unsafe { simd_neg(a) }
16052}
16053#[doc = "Negate"]
16054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"]
16055#[inline(always)]
16056#[target_feature(enable = "neon")]
16057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16058#[cfg_attr(test, assert_instr(neg))]
16059pub fn vneg_s64(a: int64x1_t) -> int64x1_t {
16060    unsafe { simd_neg(a) }
16061}
16062#[doc = "Negate"]
16063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"]
16064#[inline(always)]
16065#[target_feature(enable = "neon")]
16066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16067#[cfg_attr(test, assert_instr(neg))]
16068pub fn vnegq_s64(a: int64x2_t) -> int64x2_t {
16069    unsafe { simd_neg(a) }
16070}
16071#[doc = "Negate"]
16072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"]
16073#[inline(always)]
16074#[target_feature(enable = "neon")]
16075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16076#[cfg_attr(test, assert_instr(neg))]
16077pub fn vnegd_s64(a: i64) -> i64 {
16078    a.wrapping_neg()
16079}
16080#[doc = "Negate"]
16081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegh_f16)"]
16082#[inline(always)]
16083#[target_feature(enable = "neon,fp16")]
16084#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
16085#[cfg(not(target_arch = "arm64ec"))]
16086#[cfg_attr(test, assert_instr(fneg))]
16087pub fn vnegh_f16(a: f16) -> f16 {
16088    -a
16089}
16090#[doc = "Floating-point add pairwise"]
16091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"]
16092#[inline(always)]
16093#[target_feature(enable = "neon")]
16094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16095#[cfg_attr(test, assert_instr(nop))]
16096pub fn vpaddd_f64(a: float64x2_t) -> f64 {
16097    unsafe {
16098        let a1: f64 = simd_extract!(a, 0);
16099        let a2: f64 = simd_extract!(a, 1);
16100        a1 + a2
16101    }
16102}
16103#[doc = "Floating-point add pairwise"]
16104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"]
16105#[inline(always)]
16106#[target_feature(enable = "neon")]
16107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16108#[cfg_attr(test, assert_instr(nop))]
16109pub fn vpadds_f32(a: float32x2_t) -> f32 {
16110    unsafe {
16111        let a1: f32 = simd_extract!(a, 0);
16112        let a2: f32 = simd_extract!(a, 1);
16113        a1 + a2
16114    }
16115}
16116#[doc = "Add pairwise"]
16117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"]
16118#[inline(always)]
16119#[target_feature(enable = "neon")]
16120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16121#[cfg_attr(test, assert_instr(addp))]
16122pub fn vpaddd_s64(a: int64x2_t) -> i64 {
16123    unsafe { simd_reduce_add_ordered(a, 0) }
16124}
16125#[doc = "Add pairwise"]
16126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"]
16127#[inline(always)]
16128#[target_feature(enable = "neon")]
16129#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16130#[cfg_attr(test, assert_instr(addp))]
16131pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
16132    unsafe { simd_reduce_add_ordered(a, 0) }
16133}
16134#[doc = "Floating-point add pairwise"]
16135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
16136#[inline(always)]
16137#[target_feature(enable = "neon,fp16")]
16138#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16139#[cfg(not(target_arch = "arm64ec"))]
16140#[cfg_attr(test, assert_instr(faddp))]
16141pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16142    unsafe extern "unadjusted" {
16143        #[cfg_attr(
16144            any(target_arch = "aarch64", target_arch = "arm64ec"),
16145            link_name = "llvm.aarch64.neon.faddp.v8f16"
16146        )]
16147        fn _vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16148    }
16149    unsafe { _vpaddq_f16(a, b) }
16150}
16151#[doc = "Floating-point add pairwise"]
16152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"]
16153#[inline(always)]
16154#[target_feature(enable = "neon")]
16155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16156#[cfg_attr(test, assert_instr(faddp))]
16157pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16158    unsafe extern "unadjusted" {
16159        #[cfg_attr(
16160            any(target_arch = "aarch64", target_arch = "arm64ec"),
16161            link_name = "llvm.aarch64.neon.faddp.v4f32"
16162        )]
16163        fn _vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16164    }
16165    unsafe { _vpaddq_f32(a, b) }
16166}
16167#[doc = "Floating-point add pairwise"]
16168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"]
16169#[inline(always)]
16170#[target_feature(enable = "neon")]
16171#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16172#[cfg_attr(test, assert_instr(faddp))]
16173pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16174    unsafe extern "unadjusted" {
16175        #[cfg_attr(
16176            any(target_arch = "aarch64", target_arch = "arm64ec"),
16177            link_name = "llvm.aarch64.neon.faddp.v2f64"
16178        )]
16179        fn _vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16180    }
16181    unsafe { _vpaddq_f64(a, b) }
16182}
16183#[doc = "Add Pairwise"]
16184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"]
16185#[inline(always)]
16186#[target_feature(enable = "neon")]
16187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16188#[cfg_attr(test, assert_instr(addp))]
16189pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16190    unsafe extern "unadjusted" {
16191        #[cfg_attr(
16192            any(target_arch = "aarch64", target_arch = "arm64ec"),
16193            link_name = "llvm.aarch64.neon.addp.v16i8"
16194        )]
16195        fn _vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16196    }
16197    unsafe { _vpaddq_s8(a, b) }
16198}
16199#[doc = "Add Pairwise"]
16200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"]
16201#[inline(always)]
16202#[target_feature(enable = "neon")]
16203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16204#[cfg_attr(test, assert_instr(addp))]
16205pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16206    unsafe extern "unadjusted" {
16207        #[cfg_attr(
16208            any(target_arch = "aarch64", target_arch = "arm64ec"),
16209            link_name = "llvm.aarch64.neon.addp.v8i16"
16210        )]
16211        fn _vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16212    }
16213    unsafe { _vpaddq_s16(a, b) }
16214}
16215#[doc = "Add Pairwise"]
16216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"]
16217#[inline(always)]
16218#[target_feature(enable = "neon")]
16219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16220#[cfg_attr(test, assert_instr(addp))]
16221pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16222    unsafe extern "unadjusted" {
16223        #[cfg_attr(
16224            any(target_arch = "aarch64", target_arch = "arm64ec"),
16225            link_name = "llvm.aarch64.neon.addp.v4i32"
16226        )]
16227        fn _vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16228    }
16229    unsafe { _vpaddq_s32(a, b) }
16230}
16231#[doc = "Add Pairwise"]
16232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"]
16233#[inline(always)]
16234#[target_feature(enable = "neon")]
16235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16236#[cfg_attr(test, assert_instr(addp))]
16237pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
16238    unsafe extern "unadjusted" {
16239        #[cfg_attr(
16240            any(target_arch = "aarch64", target_arch = "arm64ec"),
16241            link_name = "llvm.aarch64.neon.addp.v2i64"
16242        )]
16243        fn _vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t;
16244    }
16245    unsafe { _vpaddq_s64(a, b) }
16246}
16247#[doc = "Add Pairwise"]
16248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
16249#[inline(always)]
16250#[cfg(target_endian = "little")]
16251#[target_feature(enable = "neon")]
16252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16253#[cfg_attr(test, assert_instr(addp))]
16254pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16255    unsafe { transmute(vpaddq_s8(transmute(a), transmute(b))) }
16256}
16257#[doc = "Add Pairwise"]
16258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"]
16259#[inline(always)]
16260#[cfg(target_endian = "big")]
16261#[target_feature(enable = "neon")]
16262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16263#[cfg_attr(test, assert_instr(addp))]
16264pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16265    let a: uint8x16_t =
16266        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
16267    let b: uint8x16_t =
16268        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
16269    unsafe {
16270        let ret_val: uint8x16_t = transmute(vpaddq_s8(transmute(a), transmute(b)));
16271        simd_shuffle!(
16272            ret_val,
16273            ret_val,
16274            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
16275        )
16276    }
16277}
16278#[doc = "Add Pairwise"]
16279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
16280#[inline(always)]
16281#[cfg(target_endian = "little")]
16282#[target_feature(enable = "neon")]
16283#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16284#[cfg_attr(test, assert_instr(addp))]
16285pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16286    unsafe { transmute(vpaddq_s16(transmute(a), transmute(b))) }
16287}
16288#[doc = "Add Pairwise"]
16289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"]
16290#[inline(always)]
16291#[cfg(target_endian = "big")]
16292#[target_feature(enable = "neon")]
16293#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16294#[cfg_attr(test, assert_instr(addp))]
16295pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16296    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
16297    let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
16298    unsafe {
16299        let ret_val: uint16x8_t = transmute(vpaddq_s16(transmute(a), transmute(b)));
16300        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
16301    }
16302}
16303#[doc = "Add Pairwise"]
16304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
16305#[inline(always)]
16306#[cfg(target_endian = "little")]
16307#[target_feature(enable = "neon")]
16308#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16309#[cfg_attr(test, assert_instr(addp))]
16310pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16311    unsafe { transmute(vpaddq_s32(transmute(a), transmute(b))) }
16312}
16313#[doc = "Add Pairwise"]
16314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"]
16315#[inline(always)]
16316#[cfg(target_endian = "big")]
16317#[target_feature(enable = "neon")]
16318#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16319#[cfg_attr(test, assert_instr(addp))]
16320pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16321    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
16322    let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [3, 2, 1, 0]) };
16323    unsafe {
16324        let ret_val: uint32x4_t = transmute(vpaddq_s32(transmute(a), transmute(b)));
16325        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
16326    }
16327}
16328#[doc = "Add Pairwise"]
16329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
16330#[inline(always)]
16331#[cfg(target_endian = "little")]
16332#[target_feature(enable = "neon")]
16333#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16334#[cfg_attr(test, assert_instr(addp))]
16335pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16336    unsafe { transmute(vpaddq_s64(transmute(a), transmute(b))) }
16337}
16338#[doc = "Add Pairwise"]
16339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"]
16340#[inline(always)]
16341#[cfg(target_endian = "big")]
16342#[target_feature(enable = "neon")]
16343#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16344#[cfg_attr(test, assert_instr(addp))]
16345pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
16346    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
16347    let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [1, 0]) };
16348    unsafe {
16349        let ret_val: uint64x2_t = transmute(vpaddq_s64(transmute(a), transmute(b)));
16350        simd_shuffle!(ret_val, ret_val, [1, 0])
16351    }
16352}
16353#[doc = "Floating-point add pairwise"]
16354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"]
16355#[inline(always)]
16356#[target_feature(enable = "neon,fp16")]
16357#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16358#[cfg(not(target_arch = "arm64ec"))]
16359#[cfg_attr(test, assert_instr(fmaxp))]
16360pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16361    unsafe extern "unadjusted" {
16362        #[cfg_attr(
16363            any(target_arch = "aarch64", target_arch = "arm64ec"),
16364            link_name = "llvm.aarch64.neon.fmaxp.v4f16"
16365        )]
16366        fn _vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16367    }
16368    unsafe { _vpmax_f16(a, b) }
16369}
16370#[doc = "Floating-point add pairwise"]
16371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"]
16372#[inline(always)]
16373#[target_feature(enable = "neon,fp16")]
16374#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16375#[cfg(not(target_arch = "arm64ec"))]
16376#[cfg_attr(test, assert_instr(fmaxp))]
16377pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16378    unsafe extern "unadjusted" {
16379        #[cfg_attr(
16380            any(target_arch = "aarch64", target_arch = "arm64ec"),
16381            link_name = "llvm.aarch64.neon.fmaxp.v8f16"
16382        )]
16383        fn _vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16384    }
16385    unsafe { _vpmaxq_f16(a, b) }
16386}
16387#[doc = "Floating-point add pairwise"]
16388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"]
16389#[inline(always)]
16390#[target_feature(enable = "neon,fp16")]
16391#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16392#[cfg(not(target_arch = "arm64ec"))]
16393#[cfg_attr(test, assert_instr(fmaxnmp))]
16394pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16395    unsafe extern "unadjusted" {
16396        #[cfg_attr(
16397            any(target_arch = "aarch64", target_arch = "arm64ec"),
16398            link_name = "llvm.aarch64.neon.fmaxnmp.v4f16"
16399        )]
16400        fn _vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16401    }
16402    unsafe { _vpmaxnm_f16(a, b) }
16403}
16404#[doc = "Floating-point add pairwise"]
16405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"]
16406#[inline(always)]
16407#[target_feature(enable = "neon,fp16")]
16408#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16409#[cfg(not(target_arch = "arm64ec"))]
16410#[cfg_attr(test, assert_instr(fmaxnmp))]
16411pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16412    unsafe extern "unadjusted" {
16413        #[cfg_attr(
16414            any(target_arch = "aarch64", target_arch = "arm64ec"),
16415            link_name = "llvm.aarch64.neon.fmaxnmp.v8f16"
16416        )]
16417        fn _vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16418    }
16419    unsafe { _vpmaxnmq_f16(a, b) }
16420}
16421#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"]
16423#[inline(always)]
16424#[target_feature(enable = "neon")]
16425#[cfg_attr(test, assert_instr(fmaxnmp))]
16426#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16427pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16428    unsafe extern "unadjusted" {
16429        #[cfg_attr(
16430            any(target_arch = "aarch64", target_arch = "arm64ec"),
16431            link_name = "llvm.aarch64.neon.fmaxnmp.v2f32"
16432        )]
16433        fn _vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16434    }
16435    unsafe { _vpmaxnm_f32(a, b) }
16436}
16437#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"]
16439#[inline(always)]
16440#[target_feature(enable = "neon")]
16441#[cfg_attr(test, assert_instr(fmaxnmp))]
16442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16443pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16444    unsafe extern "unadjusted" {
16445        #[cfg_attr(
16446            any(target_arch = "aarch64", target_arch = "arm64ec"),
16447            link_name = "llvm.aarch64.neon.fmaxnmp.v4f32"
16448        )]
16449        fn _vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16450    }
16451    unsafe { _vpmaxnmq_f32(a, b) }
16452}
16453#[doc = "Floating-point Maximum Number Pairwise (vector)."]
16454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"]
16455#[inline(always)]
16456#[target_feature(enable = "neon")]
16457#[cfg_attr(test, assert_instr(fmaxnmp))]
16458#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16459pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16460    unsafe extern "unadjusted" {
16461        #[cfg_attr(
16462            any(target_arch = "aarch64", target_arch = "arm64ec"),
16463            link_name = "llvm.aarch64.neon.fmaxnmp.v2f64"
16464        )]
16465        fn _vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16466    }
16467    unsafe { _vpmaxnmq_f64(a, b) }
16468}
16469#[doc = "Floating-point maximum number pairwise"]
16470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"]
16471#[inline(always)]
16472#[target_feature(enable = "neon")]
16473#[cfg_attr(test, assert_instr(fmaxnmp))]
16474#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16475pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 {
16476    unsafe extern "unadjusted" {
16477        #[cfg_attr(
16478            any(target_arch = "aarch64", target_arch = "arm64ec"),
16479            link_name = "llvm.aarch64.neon.fmaxnmv.f64.v2f64"
16480        )]
16481        fn _vpmaxnmqd_f64(a: float64x2_t) -> f64;
16482    }
16483    unsafe { _vpmaxnmqd_f64(a) }
16484}
16485#[doc = "Floating-point maximum number pairwise"]
16486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"]
16487#[inline(always)]
16488#[target_feature(enable = "neon")]
16489#[cfg_attr(test, assert_instr(fmaxnmp))]
16490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16491pub fn vpmaxnms_f32(a: float32x2_t) -> f32 {
16492    unsafe extern "unadjusted" {
16493        #[cfg_attr(
16494            any(target_arch = "aarch64", target_arch = "arm64ec"),
16495            link_name = "llvm.aarch64.neon.fmaxnmv.f32.v2f32"
16496        )]
16497        fn _vpmaxnms_f32(a: float32x2_t) -> f32;
16498    }
16499    unsafe { _vpmaxnms_f32(a) }
16500}
16501#[doc = "Folding maximum of adjacent pairs"]
16502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"]
16503#[inline(always)]
16504#[target_feature(enable = "neon")]
16505#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16506#[cfg_attr(test, assert_instr(fmaxp))]
16507pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16508    unsafe extern "unadjusted" {
16509        #[cfg_attr(
16510            any(target_arch = "aarch64", target_arch = "arm64ec"),
16511            link_name = "llvm.aarch64.neon.fmaxp.v4f32"
16512        )]
16513        fn _vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16514    }
16515    unsafe { _vpmaxq_f32(a, b) }
16516}
16517#[doc = "Folding maximum of adjacent pairs"]
16518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"]
16519#[inline(always)]
16520#[target_feature(enable = "neon")]
16521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16522#[cfg_attr(test, assert_instr(fmaxp))]
16523pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16524    unsafe extern "unadjusted" {
16525        #[cfg_attr(
16526            any(target_arch = "aarch64", target_arch = "arm64ec"),
16527            link_name = "llvm.aarch64.neon.fmaxp.v2f64"
16528        )]
16529        fn _vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16530    }
16531    unsafe { _vpmaxq_f64(a, b) }
16532}
16533#[doc = "Folding maximum of adjacent pairs"]
16534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"]
16535#[inline(always)]
16536#[target_feature(enable = "neon")]
16537#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16538#[cfg_attr(test, assert_instr(smaxp))]
16539pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16540    unsafe extern "unadjusted" {
16541        #[cfg_attr(
16542            any(target_arch = "aarch64", target_arch = "arm64ec"),
16543            link_name = "llvm.aarch64.neon.smaxp.v16i8"
16544        )]
16545        fn _vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16546    }
16547    unsafe { _vpmaxq_s8(a, b) }
16548}
16549#[doc = "Folding maximum of adjacent pairs"]
16550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"]
16551#[inline(always)]
16552#[target_feature(enable = "neon")]
16553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16554#[cfg_attr(test, assert_instr(smaxp))]
16555pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16556    unsafe extern "unadjusted" {
16557        #[cfg_attr(
16558            any(target_arch = "aarch64", target_arch = "arm64ec"),
16559            link_name = "llvm.aarch64.neon.smaxp.v8i16"
16560        )]
16561        fn _vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16562    }
16563    unsafe { _vpmaxq_s16(a, b) }
16564}
16565#[doc = "Folding maximum of adjacent pairs"]
16566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"]
16567#[inline(always)]
16568#[target_feature(enable = "neon")]
16569#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16570#[cfg_attr(test, assert_instr(smaxp))]
16571pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16572    unsafe extern "unadjusted" {
16573        #[cfg_attr(
16574            any(target_arch = "aarch64", target_arch = "arm64ec"),
16575            link_name = "llvm.aarch64.neon.smaxp.v4i32"
16576        )]
16577        fn _vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16578    }
16579    unsafe { _vpmaxq_s32(a, b) }
16580}
16581#[doc = "Folding maximum of adjacent pairs"]
16582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"]
16583#[inline(always)]
16584#[target_feature(enable = "neon")]
16585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16586#[cfg_attr(test, assert_instr(umaxp))]
16587pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16588    unsafe extern "unadjusted" {
16589        #[cfg_attr(
16590            any(target_arch = "aarch64", target_arch = "arm64ec"),
16591            link_name = "llvm.aarch64.neon.umaxp.v16i8"
16592        )]
16593        fn _vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16594    }
16595    unsafe { _vpmaxq_u8(a, b) }
16596}
16597#[doc = "Folding maximum of adjacent pairs"]
16598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"]
16599#[inline(always)]
16600#[target_feature(enable = "neon")]
16601#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16602#[cfg_attr(test, assert_instr(umaxp))]
16603pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16604    unsafe extern "unadjusted" {
16605        #[cfg_attr(
16606            any(target_arch = "aarch64", target_arch = "arm64ec"),
16607            link_name = "llvm.aarch64.neon.umaxp.v8i16"
16608        )]
16609        fn _vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16610    }
16611    unsafe { _vpmaxq_u16(a, b) }
16612}
16613#[doc = "Folding maximum of adjacent pairs"]
16614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"]
16615#[inline(always)]
16616#[target_feature(enable = "neon")]
16617#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16618#[cfg_attr(test, assert_instr(umaxp))]
16619pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16620    unsafe extern "unadjusted" {
16621        #[cfg_attr(
16622            any(target_arch = "aarch64", target_arch = "arm64ec"),
16623            link_name = "llvm.aarch64.neon.umaxp.v4i32"
16624        )]
16625        fn _vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16626    }
16627    unsafe { _vpmaxq_u32(a, b) }
16628}
16629#[doc = "Floating-point maximum pairwise"]
16630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"]
16631#[inline(always)]
16632#[target_feature(enable = "neon")]
16633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16634#[cfg_attr(test, assert_instr(fmaxp))]
16635pub fn vpmaxqd_f64(a: float64x2_t) -> f64 {
16636    unsafe extern "unadjusted" {
16637        #[cfg_attr(
16638            any(target_arch = "aarch64", target_arch = "arm64ec"),
16639            link_name = "llvm.aarch64.neon.fmaxv.f64.v2f64"
16640        )]
16641        fn _vpmaxqd_f64(a: float64x2_t) -> f64;
16642    }
16643    unsafe { _vpmaxqd_f64(a) }
16644}
16645#[doc = "Floating-point maximum pairwise"]
16646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"]
16647#[inline(always)]
16648#[target_feature(enable = "neon")]
16649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16650#[cfg_attr(test, assert_instr(fmaxp))]
16651pub fn vpmaxs_f32(a: float32x2_t) -> f32 {
16652    unsafe extern "unadjusted" {
16653        #[cfg_attr(
16654            any(target_arch = "aarch64", target_arch = "arm64ec"),
16655            link_name = "llvm.aarch64.neon.fmaxv.f32.v2f32"
16656        )]
16657        fn _vpmaxs_f32(a: float32x2_t) -> f32;
16658    }
16659    unsafe { _vpmaxs_f32(a) }
16660}
16661#[doc = "Floating-point add pairwise"]
16662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"]
16663#[inline(always)]
16664#[target_feature(enable = "neon,fp16")]
16665#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16666#[cfg(not(target_arch = "arm64ec"))]
16667#[cfg_attr(test, assert_instr(fminp))]
16668pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16669    unsafe extern "unadjusted" {
16670        #[cfg_attr(
16671            any(target_arch = "aarch64", target_arch = "arm64ec"),
16672            link_name = "llvm.aarch64.neon.fminp.v4f16"
16673        )]
16674        fn _vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16675    }
16676    unsafe { _vpmin_f16(a, b) }
16677}
16678#[doc = "Floating-point add pairwise"]
16679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"]
16680#[inline(always)]
16681#[target_feature(enable = "neon,fp16")]
16682#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16683#[cfg(not(target_arch = "arm64ec"))]
16684#[cfg_attr(test, assert_instr(fminp))]
16685pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16686    unsafe extern "unadjusted" {
16687        #[cfg_attr(
16688            any(target_arch = "aarch64", target_arch = "arm64ec"),
16689            link_name = "llvm.aarch64.neon.fminp.v8f16"
16690        )]
16691        fn _vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16692    }
16693    unsafe { _vpminq_f16(a, b) }
16694}
16695#[doc = "Floating-point add pairwise"]
16696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"]
16697#[inline(always)]
16698#[target_feature(enable = "neon,fp16")]
16699#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16700#[cfg(not(target_arch = "arm64ec"))]
16701#[cfg_attr(test, assert_instr(fminnmp))]
16702pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
16703    unsafe extern "unadjusted" {
16704        #[cfg_attr(
16705            any(target_arch = "aarch64", target_arch = "arm64ec"),
16706            link_name = "llvm.aarch64.neon.fminnmp.v4f16"
16707        )]
16708        fn _vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t;
16709    }
16710    unsafe { _vpminnm_f16(a, b) }
16711}
16712#[doc = "Floating-point add pairwise"]
16713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"]
16714#[inline(always)]
16715#[target_feature(enable = "neon,fp16")]
16716#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
16717#[cfg(not(target_arch = "arm64ec"))]
16718#[cfg_attr(test, assert_instr(fminnmp))]
16719pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
16720    unsafe extern "unadjusted" {
16721        #[cfg_attr(
16722            any(target_arch = "aarch64", target_arch = "arm64ec"),
16723            link_name = "llvm.aarch64.neon.fminnmp.v8f16"
16724        )]
16725        fn _vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t;
16726    }
16727    unsafe { _vpminnmq_f16(a, b) }
16728}
16729#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"]
16731#[inline(always)]
16732#[target_feature(enable = "neon")]
16733#[cfg_attr(test, assert_instr(fminnmp))]
16734#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16735pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
16736    unsafe extern "unadjusted" {
16737        #[cfg_attr(
16738            any(target_arch = "aarch64", target_arch = "arm64ec"),
16739            link_name = "llvm.aarch64.neon.fminnmp.v2f32"
16740        )]
16741        fn _vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t;
16742    }
16743    unsafe { _vpminnm_f32(a, b) }
16744}
16745#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"]
16747#[inline(always)]
16748#[target_feature(enable = "neon")]
16749#[cfg_attr(test, assert_instr(fminnmp))]
16750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16751pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16752    unsafe extern "unadjusted" {
16753        #[cfg_attr(
16754            any(target_arch = "aarch64", target_arch = "arm64ec"),
16755            link_name = "llvm.aarch64.neon.fminnmp.v4f32"
16756        )]
16757        fn _vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16758    }
16759    unsafe { _vpminnmq_f32(a, b) }
16760}
16761#[doc = "Floating-point Minimum Number Pairwise (vector)."]
16762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"]
16763#[inline(always)]
16764#[target_feature(enable = "neon")]
16765#[cfg_attr(test, assert_instr(fminnmp))]
16766#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16767pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16768    unsafe extern "unadjusted" {
16769        #[cfg_attr(
16770            any(target_arch = "aarch64", target_arch = "arm64ec"),
16771            link_name = "llvm.aarch64.neon.fminnmp.v2f64"
16772        )]
16773        fn _vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16774    }
16775    unsafe { _vpminnmq_f64(a, b) }
16776}
16777#[doc = "Floating-point minimum number pairwise"]
16778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"]
16779#[inline(always)]
16780#[target_feature(enable = "neon")]
16781#[cfg_attr(test, assert_instr(fminnmp))]
16782#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16783pub fn vpminnmqd_f64(a: float64x2_t) -> f64 {
16784    unsafe extern "unadjusted" {
16785        #[cfg_attr(
16786            any(target_arch = "aarch64", target_arch = "arm64ec"),
16787            link_name = "llvm.aarch64.neon.fminnmv.f64.v2f64"
16788        )]
16789        fn _vpminnmqd_f64(a: float64x2_t) -> f64;
16790    }
16791    unsafe { _vpminnmqd_f64(a) }
16792}
16793#[doc = "Floating-point minimum number pairwise"]
16794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"]
16795#[inline(always)]
16796#[target_feature(enable = "neon")]
16797#[cfg_attr(test, assert_instr(fminnmp))]
16798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16799pub fn vpminnms_f32(a: float32x2_t) -> f32 {
16800    unsafe extern "unadjusted" {
16801        #[cfg_attr(
16802            any(target_arch = "aarch64", target_arch = "arm64ec"),
16803            link_name = "llvm.aarch64.neon.fminnmv.f32.v2f32"
16804        )]
16805        fn _vpminnms_f32(a: float32x2_t) -> f32;
16806    }
16807    unsafe { _vpminnms_f32(a) }
16808}
16809#[doc = "Folding minimum of adjacent pairs"]
16810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"]
16811#[inline(always)]
16812#[target_feature(enable = "neon")]
16813#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16814#[cfg_attr(test, assert_instr(fminp))]
16815pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
16816    unsafe extern "unadjusted" {
16817        #[cfg_attr(
16818            any(target_arch = "aarch64", target_arch = "arm64ec"),
16819            link_name = "llvm.aarch64.neon.fminp.v4f32"
16820        )]
16821        fn _vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t;
16822    }
16823    unsafe { _vpminq_f32(a, b) }
16824}
16825#[doc = "Folding minimum of adjacent pairs"]
16826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"]
16827#[inline(always)]
16828#[target_feature(enable = "neon")]
16829#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16830#[cfg_attr(test, assert_instr(fminp))]
16831pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
16832    unsafe extern "unadjusted" {
16833        #[cfg_attr(
16834            any(target_arch = "aarch64", target_arch = "arm64ec"),
16835            link_name = "llvm.aarch64.neon.fminp.v2f64"
16836        )]
16837        fn _vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
16838    }
16839    unsafe { _vpminq_f64(a, b) }
16840}
16841#[doc = "Folding minimum of adjacent pairs"]
16842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"]
16843#[inline(always)]
16844#[target_feature(enable = "neon")]
16845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16846#[cfg_attr(test, assert_instr(sminp))]
16847pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
16848    unsafe extern "unadjusted" {
16849        #[cfg_attr(
16850            any(target_arch = "aarch64", target_arch = "arm64ec"),
16851            link_name = "llvm.aarch64.neon.sminp.v16i8"
16852        )]
16853        fn _vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t;
16854    }
16855    unsafe { _vpminq_s8(a, b) }
16856}
16857#[doc = "Folding minimum of adjacent pairs"]
16858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"]
16859#[inline(always)]
16860#[target_feature(enable = "neon")]
16861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16862#[cfg_attr(test, assert_instr(sminp))]
16863pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
16864    unsafe extern "unadjusted" {
16865        #[cfg_attr(
16866            any(target_arch = "aarch64", target_arch = "arm64ec"),
16867            link_name = "llvm.aarch64.neon.sminp.v8i16"
16868        )]
16869        fn _vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t;
16870    }
16871    unsafe { _vpminq_s16(a, b) }
16872}
16873#[doc = "Folding minimum of adjacent pairs"]
16874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"]
16875#[inline(always)]
16876#[target_feature(enable = "neon")]
16877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16878#[cfg_attr(test, assert_instr(sminp))]
16879pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
16880    unsafe extern "unadjusted" {
16881        #[cfg_attr(
16882            any(target_arch = "aarch64", target_arch = "arm64ec"),
16883            link_name = "llvm.aarch64.neon.sminp.v4i32"
16884        )]
16885        fn _vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t;
16886    }
16887    unsafe { _vpminq_s32(a, b) }
16888}
16889#[doc = "Folding minimum of adjacent pairs"]
16890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"]
16891#[inline(always)]
16892#[target_feature(enable = "neon")]
16893#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16894#[cfg_attr(test, assert_instr(uminp))]
16895pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
16896    unsafe extern "unadjusted" {
16897        #[cfg_attr(
16898            any(target_arch = "aarch64", target_arch = "arm64ec"),
16899            link_name = "llvm.aarch64.neon.uminp.v16i8"
16900        )]
16901        fn _vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t;
16902    }
16903    unsafe { _vpminq_u8(a, b) }
16904}
16905#[doc = "Folding minimum of adjacent pairs"]
16906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"]
16907#[inline(always)]
16908#[target_feature(enable = "neon")]
16909#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16910#[cfg_attr(test, assert_instr(uminp))]
16911pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
16912    unsafe extern "unadjusted" {
16913        #[cfg_attr(
16914            any(target_arch = "aarch64", target_arch = "arm64ec"),
16915            link_name = "llvm.aarch64.neon.uminp.v8i16"
16916        )]
16917        fn _vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t;
16918    }
16919    unsafe { _vpminq_u16(a, b) }
16920}
16921#[doc = "Folding minimum of adjacent pairs"]
16922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"]
16923#[inline(always)]
16924#[target_feature(enable = "neon")]
16925#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16926#[cfg_attr(test, assert_instr(uminp))]
16927pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
16928    unsafe extern "unadjusted" {
16929        #[cfg_attr(
16930            any(target_arch = "aarch64", target_arch = "arm64ec"),
16931            link_name = "llvm.aarch64.neon.uminp.v4i32"
16932        )]
16933        fn _vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
16934    }
16935    unsafe { _vpminq_u32(a, b) }
16936}
16937#[doc = "Floating-point minimum pairwise"]
16938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"]
16939#[inline(always)]
16940#[target_feature(enable = "neon")]
16941#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16942#[cfg_attr(test, assert_instr(fminp))]
16943pub fn vpminqd_f64(a: float64x2_t) -> f64 {
16944    unsafe extern "unadjusted" {
16945        #[cfg_attr(
16946            any(target_arch = "aarch64", target_arch = "arm64ec"),
16947            link_name = "llvm.aarch64.neon.fminv.f64.v2f64"
16948        )]
16949        fn _vpminqd_f64(a: float64x2_t) -> f64;
16950    }
16951    unsafe { _vpminqd_f64(a) }
16952}
16953#[doc = "Floating-point minimum pairwise"]
16954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"]
16955#[inline(always)]
16956#[target_feature(enable = "neon")]
16957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16958#[cfg_attr(test, assert_instr(fminp))]
16959pub fn vpmins_f32(a: float32x2_t) -> f32 {
16960    unsafe extern "unadjusted" {
16961        #[cfg_attr(
16962            any(target_arch = "aarch64", target_arch = "arm64ec"),
16963            link_name = "llvm.aarch64.neon.fminv.f32.v2f32"
16964        )]
16965        fn _vpmins_f32(a: float32x2_t) -> f32;
16966    }
16967    unsafe { _vpmins_f32(a) }
16968}
16969#[doc = "Signed saturating Absolute value"]
16970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"]
16971#[inline(always)]
16972#[target_feature(enable = "neon")]
16973#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16974#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16975pub fn vqabs_s64(a: int64x1_t) -> int64x1_t {
16976    unsafe extern "unadjusted" {
16977        #[cfg_attr(
16978            any(target_arch = "aarch64", target_arch = "arm64ec"),
16979            link_name = "llvm.aarch64.neon.sqabs.v1i64"
16980        )]
16981        fn _vqabs_s64(a: int64x1_t) -> int64x1_t;
16982    }
16983    unsafe { _vqabs_s64(a) }
16984}
16985#[doc = "Signed saturating Absolute value"]
16986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"]
16987#[inline(always)]
16988#[target_feature(enable = "neon")]
16989#[stable(feature = "neon_intrinsics", since = "1.59.0")]
16990#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
16991pub fn vqabsq_s64(a: int64x2_t) -> int64x2_t {
16992    unsafe extern "unadjusted" {
16993        #[cfg_attr(
16994            any(target_arch = "aarch64", target_arch = "arm64ec"),
16995            link_name = "llvm.aarch64.neon.sqabs.v2i64"
16996        )]
16997        fn _vqabsq_s64(a: int64x2_t) -> int64x2_t;
16998    }
16999    unsafe { _vqabsq_s64(a) }
17000}
17001#[doc = "Signed saturating absolute value"]
17002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"]
17003#[inline(always)]
17004#[target_feature(enable = "neon")]
17005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17006#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
17007pub fn vqabsb_s8(a: i8) -> i8 {
17008    unsafe { simd_extract!(vqabs_s8(vdup_n_s8(a)), 0) }
17009}
17010#[doc = "Signed saturating absolute value"]
17011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"]
17012#[inline(always)]
17013#[target_feature(enable = "neon")]
17014#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17015#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
17016pub fn vqabsh_s16(a: i16) -> i16 {
17017    unsafe { simd_extract!(vqabs_s16(vdup_n_s16(a)), 0) }
17018}
17019#[doc = "Signed saturating absolute value"]
17020#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"]
17021#[inline(always)]
17022#[target_feature(enable = "neon")]
17023#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17024#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
17025pub fn vqabss_s32(a: i32) -> i32 {
17026    unsafe extern "unadjusted" {
17027        #[cfg_attr(
17028            any(target_arch = "aarch64", target_arch = "arm64ec"),
17029            link_name = "llvm.aarch64.neon.sqabs.i32"
17030        )]
17031        fn _vqabss_s32(a: i32) -> i32;
17032    }
17033    unsafe { _vqabss_s32(a) }
17034}
17035#[doc = "Signed saturating absolute value"]
17036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"]
17037#[inline(always)]
17038#[target_feature(enable = "neon")]
17039#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17040#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))]
17041pub fn vqabsd_s64(a: i64) -> i64 {
17042    unsafe extern "unadjusted" {
17043        #[cfg_attr(
17044            any(target_arch = "aarch64", target_arch = "arm64ec"),
17045            link_name = "llvm.aarch64.neon.sqabs.i64"
17046        )]
17047        fn _vqabsd_s64(a: i64) -> i64;
17048    }
17049    unsafe { _vqabsd_s64(a) }
17050}
17051#[doc = "Saturating add"]
17052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"]
17053#[inline(always)]
17054#[target_feature(enable = "neon")]
17055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17056#[cfg_attr(test, assert_instr(sqadd))]
17057pub fn vqaddb_s8(a: i8, b: i8) -> i8 {
17058    let a: int8x8_t = vdup_n_s8(a);
17059    let b: int8x8_t = vdup_n_s8(b);
17060    unsafe { simd_extract!(vqadd_s8(a, b), 0) }
17061}
17062#[doc = "Saturating add"]
17063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"]
17064#[inline(always)]
17065#[target_feature(enable = "neon")]
17066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17067#[cfg_attr(test, assert_instr(sqadd))]
17068pub fn vqaddh_s16(a: i16, b: i16) -> i16 {
17069    let a: int16x4_t = vdup_n_s16(a);
17070    let b: int16x4_t = vdup_n_s16(b);
17071    unsafe { simd_extract!(vqadd_s16(a, b), 0) }
17072}
17073#[doc = "Saturating add"]
17074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"]
17075#[inline(always)]
17076#[target_feature(enable = "neon")]
17077#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17078#[cfg_attr(test, assert_instr(uqadd))]
17079pub fn vqaddb_u8(a: u8, b: u8) -> u8 {
17080    let a: uint8x8_t = vdup_n_u8(a);
17081    let b: uint8x8_t = vdup_n_u8(b);
17082    unsafe { simd_extract!(vqadd_u8(a, b), 0) }
17083}
17084#[doc = "Saturating add"]
17085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"]
17086#[inline(always)]
17087#[target_feature(enable = "neon")]
17088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17089#[cfg_attr(test, assert_instr(uqadd))]
17090pub fn vqaddh_u16(a: u16, b: u16) -> u16 {
17091    let a: uint16x4_t = vdup_n_u16(a);
17092    let b: uint16x4_t = vdup_n_u16(b);
17093    unsafe { simd_extract!(vqadd_u16(a, b), 0) }
17094}
17095#[doc = "Saturating add"]
17096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"]
17097#[inline(always)]
17098#[target_feature(enable = "neon")]
17099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17100#[cfg_attr(test, assert_instr(sqadd))]
17101pub fn vqadds_s32(a: i32, b: i32) -> i32 {
17102    unsafe extern "unadjusted" {
17103        #[cfg_attr(
17104            any(target_arch = "aarch64", target_arch = "arm64ec"),
17105            link_name = "llvm.aarch64.neon.sqadd.i32"
17106        )]
17107        fn _vqadds_s32(a: i32, b: i32) -> i32;
17108    }
17109    unsafe { _vqadds_s32(a, b) }
17110}
17111#[doc = "Saturating add"]
17112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"]
17113#[inline(always)]
17114#[target_feature(enable = "neon")]
17115#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17116#[cfg_attr(test, assert_instr(sqadd))]
17117pub fn vqaddd_s64(a: i64, b: i64) -> i64 {
17118    unsafe extern "unadjusted" {
17119        #[cfg_attr(
17120            any(target_arch = "aarch64", target_arch = "arm64ec"),
17121            link_name = "llvm.aarch64.neon.sqadd.i64"
17122        )]
17123        fn _vqaddd_s64(a: i64, b: i64) -> i64;
17124    }
17125    unsafe { _vqaddd_s64(a, b) }
17126}
17127#[doc = "Saturating add"]
17128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"]
17129#[inline(always)]
17130#[target_feature(enable = "neon")]
17131#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17132#[cfg_attr(test, assert_instr(uqadd))]
17133pub fn vqadds_u32(a: u32, b: u32) -> u32 {
17134    unsafe extern "unadjusted" {
17135        #[cfg_attr(
17136            any(target_arch = "aarch64", target_arch = "arm64ec"),
17137            link_name = "llvm.aarch64.neon.uqadd.i32"
17138        )]
17139        fn _vqadds_u32(a: u32, b: u32) -> u32;
17140    }
17141    unsafe { _vqadds_u32(a, b) }
17142}
17143#[doc = "Saturating add"]
17144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"]
17145#[inline(always)]
17146#[target_feature(enable = "neon")]
17147#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17148#[cfg_attr(test, assert_instr(uqadd))]
17149pub fn vqaddd_u64(a: u64, b: u64) -> u64 {
17150    unsafe extern "unadjusted" {
17151        #[cfg_attr(
17152            any(target_arch = "aarch64", target_arch = "arm64ec"),
17153            link_name = "llvm.aarch64.neon.uqadd.i64"
17154        )]
17155        fn _vqaddd_u64(a: u64, b: u64) -> u64;
17156    }
17157    unsafe { _vqaddd_u64(a, b) }
17158}
17159#[doc = "Signed saturating doubling multiply-add long"]
17160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"]
17161#[inline(always)]
17162#[target_feature(enable = "neon")]
17163#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17164#[rustc_legacy_const_generics(3)]
17165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17166pub fn vqdmlal_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
17167    static_assert_uimm_bits!(N, 2);
17168    vqaddq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
17169}
17170#[doc = "Signed saturating doubling multiply-add long"]
17171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"]
17172#[inline(always)]
17173#[target_feature(enable = "neon")]
17174#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17175#[rustc_legacy_const_generics(3)]
17176#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17177pub fn vqdmlal_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17178    static_assert_uimm_bits!(N, 3);
17179    vqaddq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
17180}
17181#[doc = "Signed saturating doubling multiply-add long"]
17182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"]
17183#[inline(always)]
17184#[target_feature(enable = "neon")]
17185#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17186#[rustc_legacy_const_generics(3)]
17187#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17188pub fn vqdmlal_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
17189    static_assert_uimm_bits!(N, 1);
17190    vqaddq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17191}
17192#[doc = "Signed saturating doubling multiply-add long"]
17193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"]
17194#[inline(always)]
17195#[target_feature(enable = "neon")]
17196#[cfg_attr(test, assert_instr(sqdmlal2, N = 1))]
17197#[rustc_legacy_const_generics(3)]
17198#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17199pub fn vqdmlal_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17200    static_assert_uimm_bits!(N, 2);
17201    vqaddq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17202}
17203#[doc = "Signed saturating doubling multiply-add long"]
17204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"]
17205#[inline(always)]
17206#[target_feature(enable = "neon")]
17207#[cfg_attr(test, assert_instr(sqdmlal2))]
17208#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17209pub fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
17210    vqaddq_s32(a, vqdmull_high_n_s16(b, c))
17211}
17212#[doc = "Signed saturating doubling multiply-add long"]
17213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"]
17214#[inline(always)]
17215#[target_feature(enable = "neon")]
17216#[cfg_attr(test, assert_instr(sqdmlal2))]
17217#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17218pub fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17219    vqaddq_s32(a, vqdmull_high_s16(b, c))
17220}
17221#[doc = "Signed saturating doubling multiply-add long"]
17222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"]
17223#[inline(always)]
17224#[target_feature(enable = "neon")]
17225#[cfg_attr(test, assert_instr(sqdmlal2))]
17226#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17227pub fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
17228    vqaddq_s64(a, vqdmull_high_n_s32(b, c))
17229}
17230#[doc = "Signed saturating doubling multiply-add long"]
17231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"]
17232#[inline(always)]
17233#[target_feature(enable = "neon")]
17234#[cfg_attr(test, assert_instr(sqdmlal2))]
17235#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17236pub fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17237    vqaddq_s64(a, vqdmull_high_s32(b, c))
17238}
17239#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
17240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"]
17241#[inline(always)]
17242#[target_feature(enable = "neon")]
17243#[cfg_attr(test, assert_instr(sqdmlal, N = 2))]
17244#[rustc_legacy_const_generics(3)]
17245#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17246pub fn vqdmlal_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
17247    static_assert_uimm_bits!(N, 3);
17248    vqaddq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17249}
17250#[doc = "Vector widening saturating doubling multiply accumulate with scalar"]
17251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"]
17252#[inline(always)]
17253#[target_feature(enable = "neon")]
17254#[cfg_attr(test, assert_instr(sqdmlal, N = 1))]
17255#[rustc_legacy_const_generics(3)]
17256#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17257pub fn vqdmlal_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
17258    static_assert_uimm_bits!(N, 2);
17259    vqaddq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17260}
17261#[doc = "Signed saturating doubling multiply-add long"]
17262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"]
17263#[inline(always)]
17264#[target_feature(enable = "neon")]
17265#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17266#[rustc_legacy_const_generics(3)]
17267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17268pub fn vqdmlalh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
17269    static_assert_uimm_bits!(LANE, 2);
17270    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
17271}
17272#[doc = "Signed saturating doubling multiply-add long"]
17273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"]
17274#[inline(always)]
17275#[target_feature(enable = "neon")]
17276#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17277#[rustc_legacy_const_generics(3)]
17278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17279pub fn vqdmlalh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
17280    static_assert_uimm_bits!(LANE, 3);
17281    unsafe { vqdmlalh_s16(a, b, simd_extract!(c, LANE as u32)) }
17282}
17283#[doc = "Signed saturating doubling multiply-add long"]
17284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"]
17285#[inline(always)]
17286#[target_feature(enable = "neon")]
17287#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17288#[rustc_legacy_const_generics(3)]
17289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17290pub fn vqdmlals_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17291    static_assert_uimm_bits!(LANE, 1);
17292    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
17293}
17294#[doc = "Signed saturating doubling multiply-add long"]
17295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"]
17296#[inline(always)]
17297#[target_feature(enable = "neon")]
17298#[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))]
17299#[rustc_legacy_const_generics(3)]
17300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17301pub fn vqdmlals_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17302    static_assert_uimm_bits!(LANE, 2);
17303    unsafe { vqdmlals_s32(a, b, simd_extract!(c, LANE as u32)) }
17304}
17305#[doc = "Signed saturating doubling multiply-add long"]
17306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"]
17307#[inline(always)]
17308#[target_feature(enable = "neon")]
17309#[cfg_attr(test, assert_instr(sqdmlal))]
17310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17311pub fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 {
17312    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17313    unsafe { vqadds_s32(a, simd_extract!(x, 0)) }
17314}
17315#[doc = "Signed saturating doubling multiply-add long"]
17316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"]
17317#[inline(always)]
17318#[target_feature(enable = "neon")]
17319#[cfg_attr(test, assert_instr(sqdmlal))]
17320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17321pub fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 {
17322    let x: i64 = vqaddd_s64(a, vqdmulls_s32(b, c));
17323    x
17324}
17325#[doc = "Signed saturating doubling multiply-subtract long"]
17326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"]
17327#[inline(always)]
17328#[target_feature(enable = "neon")]
17329#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17330#[rustc_legacy_const_generics(3)]
17331#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17332pub fn vqdmlsl_high_lane_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x4_t) -> int32x4_t {
17333    static_assert_uimm_bits!(N, 2);
17334    vqsubq_s32(a, vqdmull_high_lane_s16::<N>(b, c))
17335}
17336#[doc = "Signed saturating doubling multiply-subtract long"]
17337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"]
17338#[inline(always)]
17339#[target_feature(enable = "neon")]
17340#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17341#[rustc_legacy_const_generics(3)]
17342#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17343pub fn vqdmlsl_high_laneq_s16<const N: i32>(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17344    static_assert_uimm_bits!(N, 3);
17345    vqsubq_s32(a, vqdmull_high_laneq_s16::<N>(b, c))
17346}
17347#[doc = "Signed saturating doubling multiply-subtract long"]
17348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"]
17349#[inline(always)]
17350#[target_feature(enable = "neon")]
17351#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17352#[rustc_legacy_const_generics(3)]
17353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17354pub fn vqdmlsl_high_lane_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x2_t) -> int64x2_t {
17355    static_assert_uimm_bits!(N, 1);
17356    vqsubq_s64(a, vqdmull_high_lane_s32::<N>(b, c))
17357}
17358#[doc = "Signed saturating doubling multiply-subtract long"]
17359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"]
17360#[inline(always)]
17361#[target_feature(enable = "neon")]
17362#[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))]
17363#[rustc_legacy_const_generics(3)]
17364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17365pub fn vqdmlsl_high_laneq_s32<const N: i32>(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17366    static_assert_uimm_bits!(N, 2);
17367    vqsubq_s64(a, vqdmull_high_laneq_s32::<N>(b, c))
17368}
17369#[doc = "Signed saturating doubling multiply-subtract long"]
17370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"]
17371#[inline(always)]
17372#[target_feature(enable = "neon")]
17373#[cfg_attr(test, assert_instr(sqdmlsl2))]
17374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17375pub fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t {
17376    vqsubq_s32(a, vqdmull_high_n_s16(b, c))
17377}
17378#[doc = "Signed saturating doubling multiply-subtract long"]
17379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"]
17380#[inline(always)]
17381#[target_feature(enable = "neon")]
17382#[cfg_attr(test, assert_instr(sqdmlsl2))]
17383#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17384pub fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t {
17385    vqsubq_s32(a, vqdmull_high_s16(b, c))
17386}
17387#[doc = "Signed saturating doubling multiply-subtract long"]
17388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"]
17389#[inline(always)]
17390#[target_feature(enable = "neon")]
17391#[cfg_attr(test, assert_instr(sqdmlsl2))]
17392#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17393pub fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t {
17394    vqsubq_s64(a, vqdmull_high_n_s32(b, c))
17395}
17396#[doc = "Signed saturating doubling multiply-subtract long"]
17397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"]
17398#[inline(always)]
17399#[target_feature(enable = "neon")]
17400#[cfg_attr(test, assert_instr(sqdmlsl2))]
17401#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17402pub fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t {
17403    vqsubq_s64(a, vqdmull_high_s32(b, c))
17404}
17405#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"]
17407#[inline(always)]
17408#[target_feature(enable = "neon")]
17409#[cfg_attr(test, assert_instr(sqdmlsl, N = 2))]
17410#[rustc_legacy_const_generics(3)]
17411#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17412pub fn vqdmlsl_laneq_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t {
17413    static_assert_uimm_bits!(N, 3);
17414    vqsubq_s32(a, vqdmull_laneq_s16::<N>(b, c))
17415}
17416#[doc = "Vector widening saturating doubling multiply subtract with scalar"]
17417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"]
17418#[inline(always)]
17419#[target_feature(enable = "neon")]
17420#[cfg_attr(test, assert_instr(sqdmlsl, N = 1))]
17421#[rustc_legacy_const_generics(3)]
17422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17423pub fn vqdmlsl_laneq_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t {
17424    static_assert_uimm_bits!(N, 2);
17425    vqsubq_s64(a, vqdmull_laneq_s32::<N>(b, c))
17426}
17427#[doc = "Signed saturating doubling multiply-subtract long"]
17428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"]
17429#[inline(always)]
17430#[target_feature(enable = "neon")]
17431#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17432#[rustc_legacy_const_generics(3)]
17433#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17434pub fn vqdmlslh_lane_s16<const LANE: i32>(a: i32, b: i16, c: int16x4_t) -> i32 {
17435    static_assert_uimm_bits!(LANE, 2);
17436    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17437}
17438#[doc = "Signed saturating doubling multiply-subtract long"]
17439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"]
17440#[inline(always)]
17441#[target_feature(enable = "neon")]
17442#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17443#[rustc_legacy_const_generics(3)]
17444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17445pub fn vqdmlslh_laneq_s16<const LANE: i32>(a: i32, b: i16, c: int16x8_t) -> i32 {
17446    static_assert_uimm_bits!(LANE, 3);
17447    unsafe { vqdmlslh_s16(a, b, simd_extract!(c, LANE as u32)) }
17448}
17449#[doc = "Signed saturating doubling multiply-subtract long"]
17450#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"]
17451#[inline(always)]
17452#[target_feature(enable = "neon")]
17453#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17454#[rustc_legacy_const_generics(3)]
17455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17456pub fn vqdmlsls_lane_s32<const LANE: i32>(a: i64, b: i32, c: int32x2_t) -> i64 {
17457    static_assert_uimm_bits!(LANE, 1);
17458    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17459}
17460#[doc = "Signed saturating doubling multiply-subtract long"]
17461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"]
17462#[inline(always)]
17463#[target_feature(enable = "neon")]
17464#[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))]
17465#[rustc_legacy_const_generics(3)]
17466#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17467pub fn vqdmlsls_laneq_s32<const LANE: i32>(a: i64, b: i32, c: int32x4_t) -> i64 {
17468    static_assert_uimm_bits!(LANE, 2);
17469    unsafe { vqdmlsls_s32(a, b, simd_extract!(c, LANE as u32)) }
17470}
17471#[doc = "Signed saturating doubling multiply-subtract long"]
17472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"]
17473#[inline(always)]
17474#[target_feature(enable = "neon")]
17475#[cfg_attr(test, assert_instr(sqdmlsl))]
17476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17477pub fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 {
17478    let x: int32x4_t = vqdmull_s16(vdup_n_s16(b), vdup_n_s16(c));
17479    unsafe { vqsubs_s32(a, simd_extract!(x, 0)) }
17480}
17481#[doc = "Signed saturating doubling multiply-subtract long"]
17482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"]
17483#[inline(always)]
17484#[target_feature(enable = "neon")]
17485#[cfg_attr(test, assert_instr(sqdmlsl))]
17486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17487pub fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 {
17488    let x: i64 = vqsubd_s64(a, vqdmulls_s32(b, c));
17489    x
17490}
17491#[doc = "Vector saturating doubling multiply high by scalar"]
17492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"]
17493#[inline(always)]
17494#[target_feature(enable = "neon")]
17495#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17496#[rustc_legacy_const_generics(2)]
17497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17498pub fn vqdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
17499    static_assert_uimm_bits!(LANE, 2);
17500    unsafe { vqdmulh_s16(a, vdup_n_s16(simd_extract!(b, LANE as u32))) }
17501}
17502#[doc = "Vector saturating doubling multiply high by scalar"]
17503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"]
17504#[inline(always)]
17505#[target_feature(enable = "neon")]
17506#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17507#[rustc_legacy_const_generics(2)]
17508#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17509pub fn vqdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t {
17510    static_assert_uimm_bits!(LANE, 2);
17511    unsafe { vqdmulhq_s16(a, vdupq_n_s16(simd_extract!(b, LANE as u32))) }
17512}
17513#[doc = "Vector saturating doubling multiply high by scalar"]
17514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"]
17515#[inline(always)]
17516#[target_feature(enable = "neon")]
17517#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17518#[rustc_legacy_const_generics(2)]
17519#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17520pub fn vqdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
17521    static_assert_uimm_bits!(LANE, 1);
17522    unsafe { vqdmulh_s32(a, vdup_n_s32(simd_extract!(b, LANE as u32))) }
17523}
17524#[doc = "Vector saturating doubling multiply high by scalar"]
17525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"]
17526#[inline(always)]
17527#[target_feature(enable = "neon")]
17528#[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))]
17529#[rustc_legacy_const_generics(2)]
17530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17531pub fn vqdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t {
17532    static_assert_uimm_bits!(LANE, 1);
17533    unsafe { vqdmulhq_s32(a, vdupq_n_s32(simd_extract!(b, LANE as u32))) }
17534}
17535#[doc = "Signed saturating doubling multiply returning high half"]
17536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"]
17537#[inline(always)]
17538#[target_feature(enable = "neon")]
17539#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17540#[rustc_legacy_const_generics(2)]
17541#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17542pub fn vqdmulhh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i16 {
17543    static_assert_uimm_bits!(N, 2);
17544    unsafe {
17545        let b: i16 = simd_extract!(b, N as u32);
17546        vqdmulhh_s16(a, b)
17547    }
17548}
17549#[doc = "Signed saturating doubling multiply returning high half"]
17550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"]
17551#[inline(always)]
17552#[target_feature(enable = "neon")]
17553#[cfg_attr(test, assert_instr(sqdmulh, N = 2))]
17554#[rustc_legacy_const_generics(2)]
17555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17556pub fn vqdmulhh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i16 {
17557    static_assert_uimm_bits!(N, 3);
17558    unsafe {
17559        let b: i16 = simd_extract!(b, N as u32);
17560        vqdmulhh_s16(a, b)
17561    }
17562}
17563#[doc = "Signed saturating doubling multiply returning high half"]
17564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"]
17565#[inline(always)]
17566#[target_feature(enable = "neon")]
17567#[cfg_attr(test, assert_instr(sqdmulh))]
17568#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17569pub fn vqdmulhh_s16(a: i16, b: i16) -> i16 {
17570    let a: int16x4_t = vdup_n_s16(a);
17571    let b: int16x4_t = vdup_n_s16(b);
17572    unsafe { simd_extract!(vqdmulh_s16(a, b), 0) }
17573}
17574#[doc = "Signed saturating doubling multiply returning high half"]
17575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"]
17576#[inline(always)]
17577#[target_feature(enable = "neon")]
17578#[cfg_attr(test, assert_instr(sqdmulh))]
17579#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17580pub fn vqdmulhs_s32(a: i32, b: i32) -> i32 {
17581    let a: int32x2_t = vdup_n_s32(a);
17582    let b: int32x2_t = vdup_n_s32(b);
17583    unsafe { simd_extract!(vqdmulh_s32(a, b), 0) }
17584}
17585#[doc = "Signed saturating doubling multiply returning high half"]
17586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"]
17587#[inline(always)]
17588#[target_feature(enable = "neon")]
17589#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17590#[rustc_legacy_const_generics(2)]
17591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17592pub fn vqdmulhs_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i32 {
17593    static_assert_uimm_bits!(N, 1);
17594    unsafe {
17595        let b: i32 = simd_extract!(b, N as u32);
17596        vqdmulhs_s32(a, b)
17597    }
17598}
17599#[doc = "Signed saturating doubling multiply returning high half"]
17600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"]
17601#[inline(always)]
17602#[target_feature(enable = "neon")]
17603#[cfg_attr(test, assert_instr(sqdmulh, N = 1))]
17604#[rustc_legacy_const_generics(2)]
17605#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17606pub fn vqdmulhs_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i32 {
17607    static_assert_uimm_bits!(N, 2);
17608    unsafe {
17609        let b: i32 = simd_extract!(b, N as u32);
17610        vqdmulhs_s32(a, b)
17611    }
17612}
17613#[doc = "Signed saturating doubling multiply long"]
17614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"]
17615#[inline(always)]
17616#[target_feature(enable = "neon")]
17617#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17618#[rustc_legacy_const_generics(2)]
17619#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17620pub fn vqdmull_high_lane_s16<const N: i32>(a: int16x8_t, b: int16x4_t) -> int32x4_t {
17621    static_assert_uimm_bits!(N, 2);
17622    unsafe {
17623        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17624        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17625        vqdmull_s16(a, b)
17626    }
17627}
17628#[doc = "Signed saturating doubling multiply long"]
17629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"]
17630#[inline(always)]
17631#[target_feature(enable = "neon")]
17632#[cfg_attr(test, assert_instr(sqdmull2, N = 2))]
17633#[rustc_legacy_const_generics(2)]
17634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17635pub fn vqdmull_high_laneq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17636    static_assert_uimm_bits!(N, 2);
17637    unsafe {
17638        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17639        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17640        vqdmull_s32(a, b)
17641    }
17642}
17643#[doc = "Signed saturating doubling multiply long"]
17644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"]
17645#[inline(always)]
17646#[target_feature(enable = "neon")]
17647#[cfg_attr(test, assert_instr(sqdmull2, N = 1))]
17648#[rustc_legacy_const_generics(2)]
17649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17650pub fn vqdmull_high_lane_s32<const N: i32>(a: int32x4_t, b: int32x2_t) -> int64x2_t {
17651    static_assert_uimm_bits!(N, 1);
17652    unsafe {
17653        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17654        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17655        vqdmull_s32(a, b)
17656    }
17657}
17658#[doc = "Signed saturating doubling multiply long"]
17659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"]
17660#[inline(always)]
17661#[target_feature(enable = "neon")]
17662#[cfg_attr(test, assert_instr(sqdmull2, N = 4))]
17663#[rustc_legacy_const_generics(2)]
17664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17665pub fn vqdmull_high_laneq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17666    static_assert_uimm_bits!(N, 3);
17667    unsafe {
17668        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17669        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17670        vqdmull_s16(a, b)
17671    }
17672}
17673#[doc = "Signed saturating doubling multiply long"]
17674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"]
17675#[inline(always)]
17676#[target_feature(enable = "neon")]
17677#[cfg_attr(test, assert_instr(sqdmull2))]
17678#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17679pub fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t {
17680    unsafe {
17681        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17682        let b: int16x4_t = vdup_n_s16(b);
17683        vqdmull_s16(a, b)
17684    }
17685}
17686#[doc = "Signed saturating doubling multiply long"]
17687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"]
17688#[inline(always)]
17689#[target_feature(enable = "neon")]
17690#[cfg_attr(test, assert_instr(sqdmull2))]
17691#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17692pub fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t {
17693    unsafe {
17694        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17695        let b: int32x2_t = vdup_n_s32(b);
17696        vqdmull_s32(a, b)
17697    }
17698}
17699#[doc = "Signed saturating doubling multiply long"]
17700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"]
17701#[inline(always)]
17702#[target_feature(enable = "neon")]
17703#[cfg_attr(test, assert_instr(sqdmull2))]
17704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17705pub fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
17706    unsafe {
17707        let a: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
17708        let b: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
17709        vqdmull_s16(a, b)
17710    }
17711}
17712#[doc = "Signed saturating doubling multiply long"]
17713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"]
17714#[inline(always)]
17715#[target_feature(enable = "neon")]
17716#[cfg_attr(test, assert_instr(sqdmull2))]
17717#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17718pub fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
17719    unsafe {
17720        let a: int32x2_t = simd_shuffle!(a, a, [2, 3]);
17721        let b: int32x2_t = simd_shuffle!(b, b, [2, 3]);
17722        vqdmull_s32(a, b)
17723    }
17724}
17725#[doc = "Vector saturating doubling long multiply by scalar"]
17726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"]
17727#[inline(always)]
17728#[target_feature(enable = "neon")]
17729#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17730#[rustc_legacy_const_generics(2)]
17731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17732pub fn vqdmull_laneq_s16<const N: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t {
17733    static_assert_uimm_bits!(N, 3);
17734    unsafe {
17735        let b: int16x4_t = simd_shuffle!(b, b, [N as u32, N as u32, N as u32, N as u32]);
17736        vqdmull_s16(a, b)
17737    }
17738}
17739#[doc = "Vector saturating doubling long multiply by scalar"]
17740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"]
17741#[inline(always)]
17742#[target_feature(enable = "neon")]
17743#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17744#[rustc_legacy_const_generics(2)]
17745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17746pub fn vqdmull_laneq_s32<const N: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t {
17747    static_assert_uimm_bits!(N, 2);
17748    unsafe {
17749        let b: int32x2_t = simd_shuffle!(b, b, [N as u32, N as u32]);
17750        vqdmull_s32(a, b)
17751    }
17752}
17753#[doc = "Signed saturating doubling multiply long"]
17754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"]
17755#[inline(always)]
17756#[target_feature(enable = "neon")]
17757#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17758#[rustc_legacy_const_generics(2)]
17759#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17760pub fn vqdmullh_lane_s16<const N: i32>(a: i16, b: int16x4_t) -> i32 {
17761    static_assert_uimm_bits!(N, 2);
17762    unsafe {
17763        let b: i16 = simd_extract!(b, N as u32);
17764        vqdmullh_s16(a, b)
17765    }
17766}
17767#[doc = "Signed saturating doubling multiply long"]
17768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"]
17769#[inline(always)]
17770#[target_feature(enable = "neon")]
17771#[cfg_attr(test, assert_instr(sqdmull, N = 2))]
17772#[rustc_legacy_const_generics(2)]
17773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17774pub fn vqdmulls_laneq_s32<const N: i32>(a: i32, b: int32x4_t) -> i64 {
17775    static_assert_uimm_bits!(N, 2);
17776    unsafe {
17777        let b: i32 = simd_extract!(b, N as u32);
17778        vqdmulls_s32(a, b)
17779    }
17780}
17781#[doc = "Signed saturating doubling multiply long"]
17782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"]
17783#[inline(always)]
17784#[target_feature(enable = "neon")]
17785#[cfg_attr(test, assert_instr(sqdmull, N = 4))]
17786#[rustc_legacy_const_generics(2)]
17787#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17788pub fn vqdmullh_laneq_s16<const N: i32>(a: i16, b: int16x8_t) -> i32 {
17789    static_assert_uimm_bits!(N, 3);
17790    unsafe {
17791        let b: i16 = simd_extract!(b, N as u32);
17792        vqdmullh_s16(a, b)
17793    }
17794}
17795#[doc = "Signed saturating doubling multiply long"]
17796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"]
17797#[inline(always)]
17798#[target_feature(enable = "neon")]
17799#[cfg_attr(test, assert_instr(sqdmull))]
17800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17801pub fn vqdmullh_s16(a: i16, b: i16) -> i32 {
17802    let a: int16x4_t = vdup_n_s16(a);
17803    let b: int16x4_t = vdup_n_s16(b);
17804    unsafe { simd_extract!(vqdmull_s16(a, b), 0) }
17805}
17806#[doc = "Signed saturating doubling multiply long"]
17807#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"]
17808#[inline(always)]
17809#[target_feature(enable = "neon")]
17810#[cfg_attr(test, assert_instr(sqdmull, N = 1))]
17811#[rustc_legacy_const_generics(2)]
17812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17813pub fn vqdmulls_lane_s32<const N: i32>(a: i32, b: int32x2_t) -> i64 {
17814    static_assert_uimm_bits!(N, 1);
17815    unsafe {
17816        let b: i32 = simd_extract!(b, N as u32);
17817        vqdmulls_s32(a, b)
17818    }
17819}
17820#[doc = "Signed saturating doubling multiply long"]
17821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"]
17822#[inline(always)]
17823#[target_feature(enable = "neon")]
17824#[cfg_attr(test, assert_instr(sqdmull))]
17825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17826pub fn vqdmulls_s32(a: i32, b: i32) -> i64 {
17827    unsafe extern "unadjusted" {
17828        #[cfg_attr(
17829            any(target_arch = "aarch64", target_arch = "arm64ec"),
17830            link_name = "llvm.aarch64.neon.sqdmulls.scalar"
17831        )]
17832        fn _vqdmulls_s32(a: i32, b: i32) -> i64;
17833    }
17834    unsafe { _vqdmulls_s32(a, b) }
17835}
17836#[doc = "Signed saturating extract narrow"]
17837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"]
17838#[inline(always)]
17839#[target_feature(enable = "neon")]
17840#[cfg_attr(test, assert_instr(sqxtn2))]
17841#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17842pub fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t {
17843    unsafe {
17844        simd_shuffle!(
17845            a,
17846            vqmovn_s16(b),
17847            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17848        )
17849    }
17850}
17851#[doc = "Signed saturating extract narrow"]
17852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"]
17853#[inline(always)]
17854#[target_feature(enable = "neon")]
17855#[cfg_attr(test, assert_instr(sqxtn2))]
17856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17857pub fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t {
17858    unsafe { simd_shuffle!(a, vqmovn_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17859}
17860#[doc = "Signed saturating extract narrow"]
17861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"]
17862#[inline(always)]
17863#[target_feature(enable = "neon")]
17864#[cfg_attr(test, assert_instr(sqxtn2))]
17865#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17866pub fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t {
17867    unsafe { simd_shuffle!(a, vqmovn_s64(b), [0, 1, 2, 3]) }
17868}
17869#[doc = "Signed saturating extract narrow"]
17870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"]
17871#[inline(always)]
17872#[target_feature(enable = "neon")]
17873#[cfg_attr(test, assert_instr(uqxtn2))]
17874#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17875pub fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
17876    unsafe {
17877        simd_shuffle!(
17878            a,
17879            vqmovn_u16(b),
17880            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17881        )
17882    }
17883}
17884#[doc = "Signed saturating extract narrow"]
17885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"]
17886#[inline(always)]
17887#[target_feature(enable = "neon")]
17888#[cfg_attr(test, assert_instr(uqxtn2))]
17889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17890pub fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
17891    unsafe { simd_shuffle!(a, vqmovn_u32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17892}
17893#[doc = "Signed saturating extract narrow"]
17894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"]
17895#[inline(always)]
17896#[target_feature(enable = "neon")]
17897#[cfg_attr(test, assert_instr(uqxtn2))]
17898#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17899pub fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
17900    unsafe { simd_shuffle!(a, vqmovn_u64(b), [0, 1, 2, 3]) }
17901}
17902#[doc = "Saturating extract narrow"]
17903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"]
17904#[inline(always)]
17905#[target_feature(enable = "neon")]
17906#[cfg_attr(test, assert_instr(sqxtn))]
17907#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17908pub fn vqmovnd_s64(a: i64) -> i32 {
17909    unsafe extern "unadjusted" {
17910        #[cfg_attr(
17911            any(target_arch = "aarch64", target_arch = "arm64ec"),
17912            link_name = "llvm.aarch64.neon.scalar.sqxtn.i32.i64"
17913        )]
17914        fn _vqmovnd_s64(a: i64) -> i32;
17915    }
17916    unsafe { _vqmovnd_s64(a) }
17917}
17918#[doc = "Saturating extract narrow"]
17919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"]
17920#[inline(always)]
17921#[target_feature(enable = "neon")]
17922#[cfg_attr(test, assert_instr(uqxtn))]
17923#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17924pub fn vqmovnd_u64(a: u64) -> u32 {
17925    unsafe extern "unadjusted" {
17926        #[cfg_attr(
17927            any(target_arch = "aarch64", target_arch = "arm64ec"),
17928            link_name = "llvm.aarch64.neon.scalar.uqxtn.i32.i64"
17929        )]
17930        fn _vqmovnd_u64(a: u64) -> u32;
17931    }
17932    unsafe { _vqmovnd_u64(a) }
17933}
17934#[doc = "Saturating extract narrow"]
17935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"]
17936#[inline(always)]
17937#[target_feature(enable = "neon")]
17938#[cfg_attr(test, assert_instr(sqxtn))]
17939#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17940pub fn vqmovnh_s16(a: i16) -> i8 {
17941    unsafe { simd_extract!(vqmovn_s16(vdupq_n_s16(a)), 0) }
17942}
17943#[doc = "Saturating extract narrow"]
17944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"]
17945#[inline(always)]
17946#[target_feature(enable = "neon")]
17947#[cfg_attr(test, assert_instr(sqxtn))]
17948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17949pub fn vqmovns_s32(a: i32) -> i16 {
17950    unsafe { simd_extract!(vqmovn_s32(vdupq_n_s32(a)), 0) }
17951}
17952#[doc = "Saturating extract narrow"]
17953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"]
17954#[inline(always)]
17955#[target_feature(enable = "neon")]
17956#[cfg_attr(test, assert_instr(uqxtn))]
17957#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17958pub fn vqmovnh_u16(a: u16) -> u8 {
17959    unsafe { simd_extract!(vqmovn_u16(vdupq_n_u16(a)), 0) }
17960}
17961#[doc = "Saturating extract narrow"]
17962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"]
17963#[inline(always)]
17964#[target_feature(enable = "neon")]
17965#[cfg_attr(test, assert_instr(uqxtn))]
17966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17967pub fn vqmovns_u32(a: u32) -> u16 {
17968    unsafe { simd_extract!(vqmovn_u32(vdupq_n_u32(a)), 0) }
17969}
17970#[doc = "Signed saturating extract unsigned narrow"]
17971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"]
17972#[inline(always)]
17973#[target_feature(enable = "neon")]
17974#[cfg_attr(test, assert_instr(sqxtun2))]
17975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17976pub fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
17977    unsafe {
17978        simd_shuffle!(
17979            a,
17980            vqmovun_s16(b),
17981            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
17982        )
17983    }
17984}
17985#[doc = "Signed saturating extract unsigned narrow"]
17986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"]
17987#[inline(always)]
17988#[target_feature(enable = "neon")]
17989#[cfg_attr(test, assert_instr(sqxtun2))]
17990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
17991pub fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
17992    unsafe { simd_shuffle!(a, vqmovun_s32(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
17993}
17994#[doc = "Signed saturating extract unsigned narrow"]
17995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"]
17996#[inline(always)]
17997#[target_feature(enable = "neon")]
17998#[cfg_attr(test, assert_instr(sqxtun2))]
17999#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18000pub fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
18001    unsafe { simd_shuffle!(a, vqmovun_s64(b), [0, 1, 2, 3]) }
18002}
18003#[doc = "Signed saturating extract unsigned narrow"]
18004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"]
18005#[inline(always)]
18006#[target_feature(enable = "neon")]
18007#[cfg_attr(test, assert_instr(sqxtun))]
18008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18009pub fn vqmovunh_s16(a: i16) -> u8 {
18010    unsafe { simd_extract!(vqmovun_s16(vdupq_n_s16(a)), 0) }
18011}
18012#[doc = "Signed saturating extract unsigned narrow"]
18013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"]
18014#[inline(always)]
18015#[target_feature(enable = "neon")]
18016#[cfg_attr(test, assert_instr(sqxtun))]
18017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18018pub fn vqmovuns_s32(a: i32) -> u16 {
18019    unsafe { simd_extract!(vqmovun_s32(vdupq_n_s32(a)), 0) }
18020}
18021#[doc = "Signed saturating extract unsigned narrow"]
18022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"]
18023#[inline(always)]
18024#[target_feature(enable = "neon")]
18025#[cfg_attr(test, assert_instr(sqxtun))]
18026#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18027pub fn vqmovund_s64(a: i64) -> u32 {
18028    unsafe { simd_extract!(vqmovun_s64(vdupq_n_s64(a)), 0) }
18029}
18030#[doc = "Signed saturating negate"]
18031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"]
18032#[inline(always)]
18033#[target_feature(enable = "neon")]
18034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18035#[cfg_attr(test, assert_instr(sqneg))]
18036pub fn vqneg_s64(a: int64x1_t) -> int64x1_t {
18037    unsafe extern "unadjusted" {
18038        #[cfg_attr(
18039            any(target_arch = "aarch64", target_arch = "arm64ec"),
18040            link_name = "llvm.aarch64.neon.sqneg.v1i64"
18041        )]
18042        fn _vqneg_s64(a: int64x1_t) -> int64x1_t;
18043    }
18044    unsafe { _vqneg_s64(a) }
18045}
18046#[doc = "Signed saturating negate"]
18047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"]
18048#[inline(always)]
18049#[target_feature(enable = "neon")]
18050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18051#[cfg_attr(test, assert_instr(sqneg))]
18052pub fn vqnegq_s64(a: int64x2_t) -> int64x2_t {
18053    unsafe extern "unadjusted" {
18054        #[cfg_attr(
18055            any(target_arch = "aarch64", target_arch = "arm64ec"),
18056            link_name = "llvm.aarch64.neon.sqneg.v2i64"
18057        )]
18058        fn _vqnegq_s64(a: int64x2_t) -> int64x2_t;
18059    }
18060    unsafe { _vqnegq_s64(a) }
18061}
18062#[doc = "Signed saturating negate"]
18063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"]
18064#[inline(always)]
18065#[target_feature(enable = "neon")]
18066#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18067#[cfg_attr(test, assert_instr(sqneg))]
18068pub fn vqnegb_s8(a: i8) -> i8 {
18069    unsafe { simd_extract!(vqneg_s8(vdup_n_s8(a)), 0) }
18070}
18071#[doc = "Signed saturating negate"]
18072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"]
18073#[inline(always)]
18074#[target_feature(enable = "neon")]
18075#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18076#[cfg_attr(test, assert_instr(sqneg))]
18077pub fn vqnegh_s16(a: i16) -> i16 {
18078    unsafe { simd_extract!(vqneg_s16(vdup_n_s16(a)), 0) }
18079}
18080#[doc = "Signed saturating negate"]
18081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"]
18082#[inline(always)]
18083#[target_feature(enable = "neon")]
18084#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18085#[cfg_attr(test, assert_instr(sqneg))]
18086pub fn vqnegs_s32(a: i32) -> i32 {
18087    unsafe { simd_extract!(vqneg_s32(vdup_n_s32(a)), 0) }
18088}
18089#[doc = "Signed saturating negate"]
18090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"]
18091#[inline(always)]
18092#[target_feature(enable = "neon")]
18093#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18094#[cfg_attr(test, assert_instr(sqneg))]
18095pub fn vqnegd_s64(a: i64) -> i64 {
18096    unsafe { simd_extract!(vqneg_s64(vdup_n_s64(a)), 0) }
18097}
18098#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"]
18100#[inline(always)]
18101#[target_feature(enable = "rdm")]
18102#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18103#[rustc_legacy_const_generics(3)]
18104#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18105pub fn vqrdmlah_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18106    static_assert_uimm_bits!(LANE, 2);
18107    unsafe {
18108        let c: int16x4_t =
18109            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18110        vqrdmlah_s16(a, b, c)
18111    }
18112}
18113#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"]
18115#[inline(always)]
18116#[target_feature(enable = "rdm")]
18117#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18118#[rustc_legacy_const_generics(3)]
18119#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18120pub fn vqrdmlah_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18121    static_assert_uimm_bits!(LANE, 1);
18122    unsafe {
18123        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18124        vqrdmlah_s32(a, b, c)
18125    }
18126}
18127#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"]
18129#[inline(always)]
18130#[target_feature(enable = "rdm")]
18131#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18132#[rustc_legacy_const_generics(3)]
18133#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18134pub fn vqrdmlah_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
18135    static_assert_uimm_bits!(LANE, 3);
18136    unsafe {
18137        let c: int16x4_t =
18138            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18139        vqrdmlah_s16(a, b, c)
18140    }
18141}
18142#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"]
18144#[inline(always)]
18145#[target_feature(enable = "rdm")]
18146#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18147#[rustc_legacy_const_generics(3)]
18148#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18149pub fn vqrdmlah_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
18150    static_assert_uimm_bits!(LANE, 2);
18151    unsafe {
18152        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18153        vqrdmlah_s32(a, b, c)
18154    }
18155}
18156#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"]
18158#[inline(always)]
18159#[target_feature(enable = "rdm")]
18160#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18161#[rustc_legacy_const_generics(3)]
18162#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18163pub fn vqrdmlahq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
18164    static_assert_uimm_bits!(LANE, 2);
18165    unsafe {
18166        let c: int16x8_t = simd_shuffle!(
18167            c,
18168            c,
18169            [
18170                LANE as u32,
18171                LANE as u32,
18172                LANE as u32,
18173                LANE as u32,
18174                LANE as u32,
18175                LANE as u32,
18176                LANE as u32,
18177                LANE as u32
18178            ]
18179        );
18180        vqrdmlahq_s16(a, b, c)
18181    }
18182}
18183#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"]
18185#[inline(always)]
18186#[target_feature(enable = "rdm")]
18187#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18188#[rustc_legacy_const_generics(3)]
18189#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18190pub fn vqrdmlahq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
18191    static_assert_uimm_bits!(LANE, 1);
18192    unsafe {
18193        let c: int32x4_t =
18194            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18195        vqrdmlahq_s32(a, b, c)
18196    }
18197}
18198#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"]
18200#[inline(always)]
18201#[target_feature(enable = "rdm")]
18202#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18203#[rustc_legacy_const_generics(3)]
18204#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18205pub fn vqrdmlahq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18206    static_assert_uimm_bits!(LANE, 3);
18207    unsafe {
18208        let c: int16x8_t = simd_shuffle!(
18209            c,
18210            c,
18211            [
18212                LANE as u32,
18213                LANE as u32,
18214                LANE as u32,
18215                LANE as u32,
18216                LANE as u32,
18217                LANE as u32,
18218                LANE as u32,
18219                LANE as u32
18220            ]
18221        );
18222        vqrdmlahq_s16(a, b, c)
18223    }
18224}
18225#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"]
18227#[inline(always)]
18228#[target_feature(enable = "rdm")]
18229#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18230#[rustc_legacy_const_generics(3)]
18231#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18232pub fn vqrdmlahq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18233    static_assert_uimm_bits!(LANE, 2);
18234    unsafe {
18235        let c: int32x4_t =
18236            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18237        vqrdmlahq_s32(a, b, c)
18238    }
18239}
18240#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"]
18242#[inline(always)]
18243#[target_feature(enable = "rdm")]
18244#[cfg_attr(test, assert_instr(sqrdmlah))]
18245#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18246pub fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18247    unsafe extern "unadjusted" {
18248        #[cfg_attr(
18249            any(target_arch = "aarch64", target_arch = "arm64ec"),
18250            link_name = "llvm.aarch64.neon.sqrdmlah.v4i16"
18251        )]
18252        fn _vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18253    }
18254    unsafe { _vqrdmlah_s16(a, b, c) }
18255}
18256#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"]
18258#[inline(always)]
18259#[target_feature(enable = "rdm")]
18260#[cfg_attr(test, assert_instr(sqrdmlah))]
18261#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18262pub fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18263    unsafe extern "unadjusted" {
18264        #[cfg_attr(
18265            any(target_arch = "aarch64", target_arch = "arm64ec"),
18266            link_name = "llvm.aarch64.neon.sqrdmlah.v8i16"
18267        )]
18268        fn _vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18269    }
18270    unsafe { _vqrdmlahq_s16(a, b, c) }
18271}
18272#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"]
18274#[inline(always)]
18275#[target_feature(enable = "rdm")]
18276#[cfg_attr(test, assert_instr(sqrdmlah))]
18277#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18278pub fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18279    unsafe extern "unadjusted" {
18280        #[cfg_attr(
18281            any(target_arch = "aarch64", target_arch = "arm64ec"),
18282            link_name = "llvm.aarch64.neon.sqrdmlah.v2i32"
18283        )]
18284        fn _vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18285    }
18286    unsafe { _vqrdmlah_s32(a, b, c) }
18287}
18288#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"]
18290#[inline(always)]
18291#[target_feature(enable = "rdm")]
18292#[cfg_attr(test, assert_instr(sqrdmlah))]
18293#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18294pub fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18295    unsafe extern "unadjusted" {
18296        #[cfg_attr(
18297            any(target_arch = "aarch64", target_arch = "arm64ec"),
18298            link_name = "llvm.aarch64.neon.sqrdmlah.v4i32"
18299        )]
18300        fn _vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18301    }
18302    unsafe { _vqrdmlahq_s32(a, b, c) }
18303}
18304#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"]
18306#[inline(always)]
18307#[target_feature(enable = "rdm")]
18308#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18309#[rustc_legacy_const_generics(3)]
18310#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18311pub fn vqrdmlahh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18312    static_assert_uimm_bits!(LANE, 2);
18313    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
18314}
18315#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"]
18317#[inline(always)]
18318#[target_feature(enable = "rdm")]
18319#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18320#[rustc_legacy_const_generics(3)]
18321#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18322pub fn vqrdmlahh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18323    static_assert_uimm_bits!(LANE, 3);
18324    unsafe { vqrdmlahh_s16(a, b, simd_extract!(c, LANE as u32)) }
18325}
18326#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"]
18328#[inline(always)]
18329#[target_feature(enable = "rdm")]
18330#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18331#[rustc_legacy_const_generics(3)]
18332#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18333pub fn vqrdmlahs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18334    static_assert_uimm_bits!(LANE, 1);
18335    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
18336}
18337#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"]
18339#[inline(always)]
18340#[target_feature(enable = "rdm")]
18341#[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))]
18342#[rustc_legacy_const_generics(3)]
18343#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18344pub fn vqrdmlahs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18345    static_assert_uimm_bits!(LANE, 2);
18346    unsafe { vqrdmlahs_s32(a, b, simd_extract!(c, LANE as u32)) }
18347}
18348#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"]
18350#[inline(always)]
18351#[target_feature(enable = "rdm")]
18352#[cfg_attr(test, assert_instr(sqrdmlah))]
18353#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18354pub fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 {
18355    let a: int16x4_t = vdup_n_s16(a);
18356    let b: int16x4_t = vdup_n_s16(b);
18357    let c: int16x4_t = vdup_n_s16(c);
18358    unsafe { simd_extract!(vqrdmlah_s16(a, b, c), 0) }
18359}
18360#[doc = "Signed saturating rounding doubling multiply accumulate returning high half"]
18361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"]
18362#[inline(always)]
18363#[target_feature(enable = "rdm")]
18364#[cfg_attr(test, assert_instr(sqrdmlah))]
18365#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18366pub fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 {
18367    let a: int32x2_t = vdup_n_s32(a);
18368    let b: int32x2_t = vdup_n_s32(b);
18369    let c: int32x2_t = vdup_n_s32(c);
18370    unsafe { simd_extract!(vqrdmlah_s32(a, b, c), 0) }
18371}
18372#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"]
18374#[inline(always)]
18375#[target_feature(enable = "rdm")]
18376#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18377#[rustc_legacy_const_generics(3)]
18378#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18379pub fn vqrdmlsh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18380    static_assert_uimm_bits!(LANE, 2);
18381    unsafe {
18382        let c: int16x4_t =
18383            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18384        vqrdmlsh_s16(a, b, c)
18385    }
18386}
18387#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"]
18389#[inline(always)]
18390#[target_feature(enable = "rdm")]
18391#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18392#[rustc_legacy_const_generics(3)]
18393#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18394pub fn vqrdmlsh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18395    static_assert_uimm_bits!(LANE, 1);
18396    unsafe {
18397        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18398        vqrdmlsh_s32(a, b, c)
18399    }
18400}
18401#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"]
18403#[inline(always)]
18404#[target_feature(enable = "rdm")]
18405#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18406#[rustc_legacy_const_generics(3)]
18407#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18408pub fn vqrdmlsh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t {
18409    static_assert_uimm_bits!(LANE, 3);
18410    unsafe {
18411        let c: int16x4_t =
18412            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18413        vqrdmlsh_s16(a, b, c)
18414    }
18415}
18416#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"]
18418#[inline(always)]
18419#[target_feature(enable = "rdm")]
18420#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18421#[rustc_legacy_const_generics(3)]
18422#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18423pub fn vqrdmlsh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t {
18424    static_assert_uimm_bits!(LANE, 2);
18425    unsafe {
18426        let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]);
18427        vqrdmlsh_s32(a, b, c)
18428    }
18429}
18430#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"]
18432#[inline(always)]
18433#[target_feature(enable = "rdm")]
18434#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18435#[rustc_legacy_const_generics(3)]
18436#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18437pub fn vqrdmlshq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t {
18438    static_assert_uimm_bits!(LANE, 2);
18439    unsafe {
18440        let c: int16x8_t = simd_shuffle!(
18441            c,
18442            c,
18443            [
18444                LANE as u32,
18445                LANE as u32,
18446                LANE as u32,
18447                LANE as u32,
18448                LANE as u32,
18449                LANE as u32,
18450                LANE as u32,
18451                LANE as u32
18452            ]
18453        );
18454        vqrdmlshq_s16(a, b, c)
18455    }
18456}
18457#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"]
18459#[inline(always)]
18460#[target_feature(enable = "rdm")]
18461#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18462#[rustc_legacy_const_generics(3)]
18463#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18464pub fn vqrdmlshq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t {
18465    static_assert_uimm_bits!(LANE, 1);
18466    unsafe {
18467        let c: int32x4_t =
18468            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18469        vqrdmlshq_s32(a, b, c)
18470    }
18471}
18472#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"]
18474#[inline(always)]
18475#[target_feature(enable = "rdm")]
18476#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18477#[rustc_legacy_const_generics(3)]
18478#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18479pub fn vqrdmlshq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18480    static_assert_uimm_bits!(LANE, 3);
18481    unsafe {
18482        let c: int16x8_t = simd_shuffle!(
18483            c,
18484            c,
18485            [
18486                LANE as u32,
18487                LANE as u32,
18488                LANE as u32,
18489                LANE as u32,
18490                LANE as u32,
18491                LANE as u32,
18492                LANE as u32,
18493                LANE as u32
18494            ]
18495        );
18496        vqrdmlshq_s16(a, b, c)
18497    }
18498}
18499#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"]
18501#[inline(always)]
18502#[target_feature(enable = "rdm")]
18503#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18504#[rustc_legacy_const_generics(3)]
18505#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18506pub fn vqrdmlshq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18507    static_assert_uimm_bits!(LANE, 2);
18508    unsafe {
18509        let c: int32x4_t =
18510            simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]);
18511        vqrdmlshq_s32(a, b, c)
18512    }
18513}
18514#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"]
18516#[inline(always)]
18517#[target_feature(enable = "rdm")]
18518#[cfg_attr(test, assert_instr(sqrdmlsh))]
18519#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18520pub fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t {
18521    unsafe extern "unadjusted" {
18522        #[cfg_attr(
18523            any(target_arch = "aarch64", target_arch = "arm64ec"),
18524            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i16"
18525        )]
18526        fn _vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t;
18527    }
18528    unsafe { _vqrdmlsh_s16(a, b, c) }
18529}
18530#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"]
18532#[inline(always)]
18533#[target_feature(enable = "rdm")]
18534#[cfg_attr(test, assert_instr(sqrdmlsh))]
18535#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18536pub fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t {
18537    unsafe extern "unadjusted" {
18538        #[cfg_attr(
18539            any(target_arch = "aarch64", target_arch = "arm64ec"),
18540            link_name = "llvm.aarch64.neon.sqrdmlsh.v8i16"
18541        )]
18542        fn _vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t;
18543    }
18544    unsafe { _vqrdmlshq_s16(a, b, c) }
18545}
18546#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"]
18548#[inline(always)]
18549#[target_feature(enable = "rdm")]
18550#[cfg_attr(test, assert_instr(sqrdmlsh))]
18551#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18552pub fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t {
18553    unsafe extern "unadjusted" {
18554        #[cfg_attr(
18555            any(target_arch = "aarch64", target_arch = "arm64ec"),
18556            link_name = "llvm.aarch64.neon.sqrdmlsh.v2i32"
18557        )]
18558        fn _vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t;
18559    }
18560    unsafe { _vqrdmlsh_s32(a, b, c) }
18561}
18562#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"]
18564#[inline(always)]
18565#[target_feature(enable = "rdm")]
18566#[cfg_attr(test, assert_instr(sqrdmlsh))]
18567#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18568pub fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t {
18569    unsafe extern "unadjusted" {
18570        #[cfg_attr(
18571            any(target_arch = "aarch64", target_arch = "arm64ec"),
18572            link_name = "llvm.aarch64.neon.sqrdmlsh.v4i32"
18573        )]
18574        fn _vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t;
18575    }
18576    unsafe { _vqrdmlshq_s32(a, b, c) }
18577}
18578#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"]
18580#[inline(always)]
18581#[target_feature(enable = "rdm")]
18582#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18583#[rustc_legacy_const_generics(3)]
18584#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18585pub fn vqrdmlshh_lane_s16<const LANE: i32>(a: i16, b: i16, c: int16x4_t) -> i16 {
18586    static_assert_uimm_bits!(LANE, 2);
18587    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18588}
18589#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"]
18591#[inline(always)]
18592#[target_feature(enable = "rdm")]
18593#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18594#[rustc_legacy_const_generics(3)]
18595#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18596pub fn vqrdmlshh_laneq_s16<const LANE: i32>(a: i16, b: i16, c: int16x8_t) -> i16 {
18597    static_assert_uimm_bits!(LANE, 3);
18598    unsafe { vqrdmlshh_s16(a, b, simd_extract!(c, LANE as u32)) }
18599}
18600#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"]
18602#[inline(always)]
18603#[target_feature(enable = "rdm")]
18604#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18605#[rustc_legacy_const_generics(3)]
18606#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18607pub fn vqrdmlshs_lane_s32<const LANE: i32>(a: i32, b: i32, c: int32x2_t) -> i32 {
18608    static_assert_uimm_bits!(LANE, 1);
18609    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18610}
18611#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"]
18613#[inline(always)]
18614#[target_feature(enable = "rdm")]
18615#[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))]
18616#[rustc_legacy_const_generics(3)]
18617#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18618pub fn vqrdmlshs_laneq_s32<const LANE: i32>(a: i32, b: i32, c: int32x4_t) -> i32 {
18619    static_assert_uimm_bits!(LANE, 2);
18620    unsafe { vqrdmlshs_s32(a, b, simd_extract!(c, LANE as u32)) }
18621}
18622#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"]
18624#[inline(always)]
18625#[target_feature(enable = "rdm")]
18626#[cfg_attr(test, assert_instr(sqrdmlsh))]
18627#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18628pub fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 {
18629    let a: int16x4_t = vdup_n_s16(a);
18630    let b: int16x4_t = vdup_n_s16(b);
18631    let c: int16x4_t = vdup_n_s16(c);
18632    unsafe { simd_extract!(vqrdmlsh_s16(a, b, c), 0) }
18633}
18634#[doc = "Signed saturating rounding doubling multiply subtract returning high half"]
18635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"]
18636#[inline(always)]
18637#[target_feature(enable = "rdm")]
18638#[cfg_attr(test, assert_instr(sqrdmlsh))]
18639#[stable(feature = "rdm_intrinsics", since = "1.62.0")]
18640pub fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 {
18641    let a: int32x2_t = vdup_n_s32(a);
18642    let b: int32x2_t = vdup_n_s32(b);
18643    let c: int32x2_t = vdup_n_s32(c);
18644    unsafe { simd_extract!(vqrdmlsh_s32(a, b, c), 0) }
18645}
18646#[doc = "Signed saturating rounding doubling multiply returning high half"]
18647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"]
18648#[inline(always)]
18649#[target_feature(enable = "neon")]
18650#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18651#[rustc_legacy_const_generics(2)]
18652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18653pub fn vqrdmulhh_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> i16 {
18654    static_assert_uimm_bits!(LANE, 2);
18655    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18656}
18657#[doc = "Signed saturating rounding doubling multiply returning high half"]
18658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"]
18659#[inline(always)]
18660#[target_feature(enable = "neon")]
18661#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18662#[rustc_legacy_const_generics(2)]
18663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18664pub fn vqrdmulhh_laneq_s16<const LANE: i32>(a: i16, b: int16x8_t) -> i16 {
18665    static_assert_uimm_bits!(LANE, 3);
18666    unsafe { vqrdmulhh_s16(a, simd_extract!(b, LANE as u32)) }
18667}
18668#[doc = "Signed saturating rounding doubling multiply returning high half"]
18669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"]
18670#[inline(always)]
18671#[target_feature(enable = "neon")]
18672#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18673#[rustc_legacy_const_generics(2)]
18674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18675pub fn vqrdmulhs_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> i32 {
18676    static_assert_uimm_bits!(LANE, 1);
18677    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18678}
18679#[doc = "Signed saturating rounding doubling multiply returning high half"]
18680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"]
18681#[inline(always)]
18682#[target_feature(enable = "neon")]
18683#[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))]
18684#[rustc_legacy_const_generics(2)]
18685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18686pub fn vqrdmulhs_laneq_s32<const LANE: i32>(a: i32, b: int32x4_t) -> i32 {
18687    static_assert_uimm_bits!(LANE, 2);
18688    unsafe { vqrdmulhs_s32(a, simd_extract!(b, LANE as u32)) }
18689}
18690#[doc = "Signed saturating rounding doubling multiply returning high half"]
18691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"]
18692#[inline(always)]
18693#[target_feature(enable = "neon")]
18694#[cfg_attr(test, assert_instr(sqrdmulh))]
18695#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18696pub fn vqrdmulhh_s16(a: i16, b: i16) -> i16 {
18697    unsafe { simd_extract!(vqrdmulh_s16(vdup_n_s16(a), vdup_n_s16(b)), 0) }
18698}
18699#[doc = "Signed saturating rounding doubling multiply returning high half"]
18700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"]
18701#[inline(always)]
18702#[target_feature(enable = "neon")]
18703#[cfg_attr(test, assert_instr(sqrdmulh))]
18704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18705pub fn vqrdmulhs_s32(a: i32, b: i32) -> i32 {
18706    unsafe { simd_extract!(vqrdmulh_s32(vdup_n_s32(a), vdup_n_s32(b)), 0) }
18707}
18708#[doc = "Signed saturating rounding shift left"]
18709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"]
18710#[inline(always)]
18711#[target_feature(enable = "neon")]
18712#[cfg_attr(test, assert_instr(sqrshl))]
18713#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18714pub fn vqrshlb_s8(a: i8, b: i8) -> i8 {
18715    let a: int8x8_t = vdup_n_s8(a);
18716    let b: int8x8_t = vdup_n_s8(b);
18717    unsafe { simd_extract!(vqrshl_s8(a, b), 0) }
18718}
18719#[doc = "Signed saturating rounding shift left"]
18720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"]
18721#[inline(always)]
18722#[target_feature(enable = "neon")]
18723#[cfg_attr(test, assert_instr(sqrshl))]
18724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18725pub fn vqrshlh_s16(a: i16, b: i16) -> i16 {
18726    let a: int16x4_t = vdup_n_s16(a);
18727    let b: int16x4_t = vdup_n_s16(b);
18728    unsafe { simd_extract!(vqrshl_s16(a, b), 0) }
18729}
18730#[doc = "Unsigned signed saturating rounding shift left"]
18731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"]
18732#[inline(always)]
18733#[target_feature(enable = "neon")]
18734#[cfg_attr(test, assert_instr(uqrshl))]
18735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18736pub fn vqrshlb_u8(a: u8, b: i8) -> u8 {
18737    let a: uint8x8_t = vdup_n_u8(a);
18738    let b: int8x8_t = vdup_n_s8(b);
18739    unsafe { simd_extract!(vqrshl_u8(a, b), 0) }
18740}
18741#[doc = "Unsigned signed saturating rounding shift left"]
18742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"]
18743#[inline(always)]
18744#[target_feature(enable = "neon")]
18745#[cfg_attr(test, assert_instr(uqrshl))]
18746#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18747pub fn vqrshlh_u16(a: u16, b: i16) -> u16 {
18748    let a: uint16x4_t = vdup_n_u16(a);
18749    let b: int16x4_t = vdup_n_s16(b);
18750    unsafe { simd_extract!(vqrshl_u16(a, b), 0) }
18751}
18752#[doc = "Signed saturating rounding shift left"]
18753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"]
18754#[inline(always)]
18755#[target_feature(enable = "neon")]
18756#[cfg_attr(test, assert_instr(sqrshl))]
18757#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18758pub fn vqrshld_s64(a: i64, b: i64) -> i64 {
18759    unsafe extern "unadjusted" {
18760        #[cfg_attr(
18761            any(target_arch = "aarch64", target_arch = "arm64ec"),
18762            link_name = "llvm.aarch64.neon.sqrshl.i64"
18763        )]
18764        fn _vqrshld_s64(a: i64, b: i64) -> i64;
18765    }
18766    unsafe { _vqrshld_s64(a, b) }
18767}
18768#[doc = "Signed saturating rounding shift left"]
18769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"]
18770#[inline(always)]
18771#[target_feature(enable = "neon")]
18772#[cfg_attr(test, assert_instr(sqrshl))]
18773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18774pub fn vqrshls_s32(a: i32, b: i32) -> i32 {
18775    unsafe extern "unadjusted" {
18776        #[cfg_attr(
18777            any(target_arch = "aarch64", target_arch = "arm64ec"),
18778            link_name = "llvm.aarch64.neon.sqrshl.i32"
18779        )]
18780        fn _vqrshls_s32(a: i32, b: i32) -> i32;
18781    }
18782    unsafe { _vqrshls_s32(a, b) }
18783}
18784#[doc = "Unsigned signed saturating rounding shift left"]
18785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"]
18786#[inline(always)]
18787#[target_feature(enable = "neon")]
18788#[cfg_attr(test, assert_instr(uqrshl))]
18789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18790pub fn vqrshls_u32(a: u32, b: i32) -> u32 {
18791    unsafe extern "unadjusted" {
18792        #[cfg_attr(
18793            any(target_arch = "aarch64", target_arch = "arm64ec"),
18794            link_name = "llvm.aarch64.neon.uqrshl.i32"
18795        )]
18796        fn _vqrshls_u32(a: u32, b: i32) -> u32;
18797    }
18798    unsafe { _vqrshls_u32(a, b) }
18799}
18800#[doc = "Unsigned signed saturating rounding shift left"]
18801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"]
18802#[inline(always)]
18803#[target_feature(enable = "neon")]
18804#[cfg_attr(test, assert_instr(uqrshl))]
18805#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18806pub fn vqrshld_u64(a: u64, b: i64) -> u64 {
18807    unsafe extern "unadjusted" {
18808        #[cfg_attr(
18809            any(target_arch = "aarch64", target_arch = "arm64ec"),
18810            link_name = "llvm.aarch64.neon.uqrshl.i64"
18811        )]
18812        fn _vqrshld_u64(a: u64, b: i64) -> u64;
18813    }
18814    unsafe { _vqrshld_u64(a, b) }
18815}
18816#[doc = "Signed saturating rounded shift right narrow"]
18817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"]
18818#[inline(always)]
18819#[target_feature(enable = "neon")]
18820#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18821#[rustc_legacy_const_generics(2)]
18822#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18823pub fn vqrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
18824    static_assert!(N >= 1 && N <= 8);
18825    unsafe {
18826        simd_shuffle!(
18827            a,
18828            vqrshrn_n_s16::<N>(b),
18829            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18830        )
18831    }
18832}
18833#[doc = "Signed saturating rounded shift right narrow"]
18834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"]
18835#[inline(always)]
18836#[target_feature(enable = "neon")]
18837#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18838#[rustc_legacy_const_generics(2)]
18839#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18840pub fn vqrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
18841    static_assert!(N >= 1 && N <= 16);
18842    unsafe { simd_shuffle!(a, vqrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18843}
18844#[doc = "Signed saturating rounded shift right narrow"]
18845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"]
18846#[inline(always)]
18847#[target_feature(enable = "neon")]
18848#[cfg_attr(test, assert_instr(sqrshrn2, N = 2))]
18849#[rustc_legacy_const_generics(2)]
18850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18851pub fn vqrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
18852    static_assert!(N >= 1 && N <= 32);
18853    unsafe { simd_shuffle!(a, vqrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
18854}
18855#[doc = "Unsigned saturating rounded shift right narrow"]
18856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"]
18857#[inline(always)]
18858#[target_feature(enable = "neon")]
18859#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18860#[rustc_legacy_const_generics(2)]
18861#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18862pub fn vqrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
18863    static_assert!(N >= 1 && N <= 8);
18864    unsafe {
18865        simd_shuffle!(
18866            a,
18867            vqrshrn_n_u16::<N>(b),
18868            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18869        )
18870    }
18871}
18872#[doc = "Unsigned saturating rounded shift right narrow"]
18873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"]
18874#[inline(always)]
18875#[target_feature(enable = "neon")]
18876#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18877#[rustc_legacy_const_generics(2)]
18878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18879pub fn vqrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
18880    static_assert!(N >= 1 && N <= 16);
18881    unsafe { simd_shuffle!(a, vqrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18882}
18883#[doc = "Unsigned saturating rounded shift right narrow"]
18884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"]
18885#[inline(always)]
18886#[target_feature(enable = "neon")]
18887#[cfg_attr(test, assert_instr(uqrshrn2, N = 2))]
18888#[rustc_legacy_const_generics(2)]
18889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18890pub fn vqrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
18891    static_assert!(N >= 1 && N <= 32);
18892    unsafe { simd_shuffle!(a, vqrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
18893}
18894#[doc = "Unsigned saturating rounded shift right narrow"]
18895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"]
18896#[inline(always)]
18897#[target_feature(enable = "neon")]
18898#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18899#[rustc_legacy_const_generics(1)]
18900#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18901pub fn vqrshrnd_n_u64<const N: i32>(a: u64) -> u32 {
18902    static_assert!(N >= 1 && N <= 32);
18903    let a: uint64x2_t = vdupq_n_u64(a);
18904    unsafe { simd_extract!(vqrshrn_n_u64::<N>(a), 0) }
18905}
18906#[doc = "Unsigned saturating rounded shift right narrow"]
18907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"]
18908#[inline(always)]
18909#[target_feature(enable = "neon")]
18910#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18911#[rustc_legacy_const_generics(1)]
18912#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18913pub fn vqrshrnh_n_u16<const N: i32>(a: u16) -> u8 {
18914    static_assert!(N >= 1 && N <= 8);
18915    let a: uint16x8_t = vdupq_n_u16(a);
18916    unsafe { simd_extract!(vqrshrn_n_u16::<N>(a), 0) }
18917}
18918#[doc = "Unsigned saturating rounded shift right narrow"]
18919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"]
18920#[inline(always)]
18921#[target_feature(enable = "neon")]
18922#[cfg_attr(test, assert_instr(uqrshrn, N = 2))]
18923#[rustc_legacy_const_generics(1)]
18924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18925pub fn vqrshrns_n_u32<const N: i32>(a: u32) -> u16 {
18926    static_assert!(N >= 1 && N <= 16);
18927    let a: uint32x4_t = vdupq_n_u32(a);
18928    unsafe { simd_extract!(vqrshrn_n_u32::<N>(a), 0) }
18929}
18930#[doc = "Signed saturating rounded shift right narrow"]
18931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"]
18932#[inline(always)]
18933#[target_feature(enable = "neon")]
18934#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18935#[rustc_legacy_const_generics(1)]
18936#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18937pub fn vqrshrnh_n_s16<const N: i32>(a: i16) -> i8 {
18938    static_assert!(N >= 1 && N <= 8);
18939    let a: int16x8_t = vdupq_n_s16(a);
18940    unsafe { simd_extract!(vqrshrn_n_s16::<N>(a), 0) }
18941}
18942#[doc = "Signed saturating rounded shift right narrow"]
18943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"]
18944#[inline(always)]
18945#[target_feature(enable = "neon")]
18946#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18947#[rustc_legacy_const_generics(1)]
18948#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18949pub fn vqrshrns_n_s32<const N: i32>(a: i32) -> i16 {
18950    static_assert!(N >= 1 && N <= 16);
18951    let a: int32x4_t = vdupq_n_s32(a);
18952    unsafe { simd_extract!(vqrshrn_n_s32::<N>(a), 0) }
18953}
18954#[doc = "Signed saturating rounded shift right narrow"]
18955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"]
18956#[inline(always)]
18957#[target_feature(enable = "neon")]
18958#[cfg_attr(test, assert_instr(sqrshrn, N = 2))]
18959#[rustc_legacy_const_generics(1)]
18960#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18961pub fn vqrshrnd_n_s64<const N: i32>(a: i64) -> i32 {
18962    static_assert!(N >= 1 && N <= 32);
18963    let a: int64x2_t = vdupq_n_s64(a);
18964    unsafe { simd_extract!(vqrshrn_n_s64::<N>(a), 0) }
18965}
18966#[doc = "Signed saturating rounded shift right unsigned narrow"]
18967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"]
18968#[inline(always)]
18969#[target_feature(enable = "neon")]
18970#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18971#[rustc_legacy_const_generics(2)]
18972#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18973pub fn vqrshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
18974    static_assert!(N >= 1 && N <= 8);
18975    unsafe {
18976        simd_shuffle!(
18977            a,
18978            vqrshrun_n_s16::<N>(b),
18979            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
18980        )
18981    }
18982}
18983#[doc = "Signed saturating rounded shift right unsigned narrow"]
18984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"]
18985#[inline(always)]
18986#[target_feature(enable = "neon")]
18987#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18988#[rustc_legacy_const_generics(2)]
18989#[stable(feature = "neon_intrinsics", since = "1.59.0")]
18990pub fn vqrshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
18991    static_assert!(N >= 1 && N <= 16);
18992    unsafe { simd_shuffle!(a, vqrshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
18993}
18994#[doc = "Signed saturating rounded shift right unsigned narrow"]
18995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"]
18996#[inline(always)]
18997#[target_feature(enable = "neon")]
18998#[cfg_attr(test, assert_instr(sqrshrun2, N = 2))]
18999#[rustc_legacy_const_generics(2)]
19000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19001pub fn vqrshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
19002    static_assert!(N >= 1 && N <= 32);
19003    unsafe { simd_shuffle!(a, vqrshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
19004}
19005#[doc = "Signed saturating rounded shift right unsigned narrow"]
19006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"]
19007#[inline(always)]
19008#[target_feature(enable = "neon")]
19009#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
19010#[rustc_legacy_const_generics(1)]
19011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19012pub fn vqrshrund_n_s64<const N: i32>(a: i64) -> u32 {
19013    static_assert!(N >= 1 && N <= 32);
19014    let a: int64x2_t = vdupq_n_s64(a);
19015    unsafe { simd_extract!(vqrshrun_n_s64::<N>(a), 0) }
19016}
19017#[doc = "Signed saturating rounded shift right unsigned narrow"]
19018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"]
19019#[inline(always)]
19020#[target_feature(enable = "neon")]
19021#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
19022#[rustc_legacy_const_generics(1)]
19023#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19024pub fn vqrshrunh_n_s16<const N: i32>(a: i16) -> u8 {
19025    static_assert!(N >= 1 && N <= 8);
19026    let a: int16x8_t = vdupq_n_s16(a);
19027    unsafe { simd_extract!(vqrshrun_n_s16::<N>(a), 0) }
19028}
19029#[doc = "Signed saturating rounded shift right unsigned narrow"]
19030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"]
19031#[inline(always)]
19032#[target_feature(enable = "neon")]
19033#[cfg_attr(test, assert_instr(sqrshrun, N = 2))]
19034#[rustc_legacy_const_generics(1)]
19035#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19036pub fn vqrshruns_n_s32<const N: i32>(a: i32) -> u16 {
19037    static_assert!(N >= 1 && N <= 16);
19038    let a: int32x4_t = vdupq_n_s32(a);
19039    unsafe { simd_extract!(vqrshrun_n_s32::<N>(a), 0) }
19040}
19041#[doc = "Signed saturating shift left"]
19042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"]
19043#[inline(always)]
19044#[target_feature(enable = "neon")]
19045#[cfg_attr(test, assert_instr(sqshl, N = 2))]
19046#[rustc_legacy_const_generics(1)]
19047#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19048pub fn vqshlb_n_s8<const N: i32>(a: i8) -> i8 {
19049    static_assert_uimm_bits!(N, 3);
19050    unsafe { simd_extract!(vqshl_n_s8::<N>(vdup_n_s8(a)), 0) }
19051}
19052#[doc = "Signed saturating shift left"]
19053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"]
19054#[inline(always)]
19055#[target_feature(enable = "neon")]
19056#[cfg_attr(test, assert_instr(sqshl, N = 2))]
19057#[rustc_legacy_const_generics(1)]
19058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19059pub fn vqshld_n_s64<const N: i32>(a: i64) -> i64 {
19060    static_assert_uimm_bits!(N, 6);
19061    unsafe { simd_extract!(vqshl_n_s64::<N>(vdup_n_s64(a)), 0) }
19062}
19063#[doc = "Signed saturating shift left"]
19064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"]
19065#[inline(always)]
19066#[target_feature(enable = "neon")]
19067#[cfg_attr(test, assert_instr(sqshl, N = 2))]
19068#[rustc_legacy_const_generics(1)]
19069#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19070pub fn vqshlh_n_s16<const N: i32>(a: i16) -> i16 {
19071    static_assert_uimm_bits!(N, 4);
19072    unsafe { simd_extract!(vqshl_n_s16::<N>(vdup_n_s16(a)), 0) }
19073}
19074#[doc = "Signed saturating shift left"]
19075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"]
19076#[inline(always)]
19077#[target_feature(enable = "neon")]
19078#[cfg_attr(test, assert_instr(sqshl, N = 2))]
19079#[rustc_legacy_const_generics(1)]
19080#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19081pub fn vqshls_n_s32<const N: i32>(a: i32) -> i32 {
19082    static_assert_uimm_bits!(N, 5);
19083    unsafe { simd_extract!(vqshl_n_s32::<N>(vdup_n_s32(a)), 0) }
19084}
19085#[doc = "Unsigned saturating shift left"]
19086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"]
19087#[inline(always)]
19088#[target_feature(enable = "neon")]
19089#[cfg_attr(test, assert_instr(uqshl, N = 2))]
19090#[rustc_legacy_const_generics(1)]
19091#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19092pub fn vqshlb_n_u8<const N: i32>(a: u8) -> u8 {
19093    static_assert_uimm_bits!(N, 3);
19094    unsafe { simd_extract!(vqshl_n_u8::<N>(vdup_n_u8(a)), 0) }
19095}
19096#[doc = "Unsigned saturating shift left"]
19097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"]
19098#[inline(always)]
19099#[target_feature(enable = "neon")]
19100#[cfg_attr(test, assert_instr(uqshl, N = 2))]
19101#[rustc_legacy_const_generics(1)]
19102#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19103pub fn vqshld_n_u64<const N: i32>(a: u64) -> u64 {
19104    static_assert_uimm_bits!(N, 6);
19105    unsafe { simd_extract!(vqshl_n_u64::<N>(vdup_n_u64(a)), 0) }
19106}
19107#[doc = "Unsigned saturating shift left"]
19108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"]
19109#[inline(always)]
19110#[target_feature(enable = "neon")]
19111#[cfg_attr(test, assert_instr(uqshl, N = 2))]
19112#[rustc_legacy_const_generics(1)]
19113#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19114pub fn vqshlh_n_u16<const N: i32>(a: u16) -> u16 {
19115    static_assert_uimm_bits!(N, 4);
19116    unsafe { simd_extract!(vqshl_n_u16::<N>(vdup_n_u16(a)), 0) }
19117}
19118#[doc = "Unsigned saturating shift left"]
19119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"]
19120#[inline(always)]
19121#[target_feature(enable = "neon")]
19122#[cfg_attr(test, assert_instr(uqshl, N = 2))]
19123#[rustc_legacy_const_generics(1)]
19124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19125pub fn vqshls_n_u32<const N: i32>(a: u32) -> u32 {
19126    static_assert_uimm_bits!(N, 5);
19127    unsafe { simd_extract!(vqshl_n_u32::<N>(vdup_n_u32(a)), 0) }
19128}
19129#[doc = "Signed saturating shift left"]
19130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"]
19131#[inline(always)]
19132#[target_feature(enable = "neon")]
19133#[cfg_attr(test, assert_instr(sqshl))]
19134#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19135pub fn vqshlb_s8(a: i8, b: i8) -> i8 {
19136    let c: int8x8_t = vqshl_s8(vdup_n_s8(a), vdup_n_s8(b));
19137    unsafe { simd_extract!(c, 0) }
19138}
19139#[doc = "Signed saturating shift left"]
19140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"]
19141#[inline(always)]
19142#[target_feature(enable = "neon")]
19143#[cfg_attr(test, assert_instr(sqshl))]
19144#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19145pub fn vqshlh_s16(a: i16, b: i16) -> i16 {
19146    let c: int16x4_t = vqshl_s16(vdup_n_s16(a), vdup_n_s16(b));
19147    unsafe { simd_extract!(c, 0) }
19148}
19149#[doc = "Signed saturating shift left"]
19150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"]
19151#[inline(always)]
19152#[target_feature(enable = "neon")]
19153#[cfg_attr(test, assert_instr(sqshl))]
19154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19155pub fn vqshls_s32(a: i32, b: i32) -> i32 {
19156    let c: int32x2_t = vqshl_s32(vdup_n_s32(a), vdup_n_s32(b));
19157    unsafe { simd_extract!(c, 0) }
19158}
19159#[doc = "Unsigned saturating shift left"]
19160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"]
19161#[inline(always)]
19162#[target_feature(enable = "neon")]
19163#[cfg_attr(test, assert_instr(uqshl))]
19164#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19165pub fn vqshlb_u8(a: u8, b: i8) -> u8 {
19166    let c: uint8x8_t = vqshl_u8(vdup_n_u8(a), vdup_n_s8(b));
19167    unsafe { simd_extract!(c, 0) }
19168}
19169#[doc = "Unsigned saturating shift left"]
19170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"]
19171#[inline(always)]
19172#[target_feature(enable = "neon")]
19173#[cfg_attr(test, assert_instr(uqshl))]
19174#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19175pub fn vqshlh_u16(a: u16, b: i16) -> u16 {
19176    let c: uint16x4_t = vqshl_u16(vdup_n_u16(a), vdup_n_s16(b));
19177    unsafe { simd_extract!(c, 0) }
19178}
19179#[doc = "Unsigned saturating shift left"]
19180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"]
19181#[inline(always)]
19182#[target_feature(enable = "neon")]
19183#[cfg_attr(test, assert_instr(uqshl))]
19184#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19185pub fn vqshls_u32(a: u32, b: i32) -> u32 {
19186    let c: uint32x2_t = vqshl_u32(vdup_n_u32(a), vdup_n_s32(b));
19187    unsafe { simd_extract!(c, 0) }
19188}
19189#[doc = "Signed saturating shift left"]
19190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"]
19191#[inline(always)]
19192#[target_feature(enable = "neon")]
19193#[cfg_attr(test, assert_instr(sqshl))]
19194#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19195pub fn vqshld_s64(a: i64, b: i64) -> i64 {
19196    unsafe extern "unadjusted" {
19197        #[cfg_attr(
19198            any(target_arch = "aarch64", target_arch = "arm64ec"),
19199            link_name = "llvm.aarch64.neon.sqshl.i64"
19200        )]
19201        fn _vqshld_s64(a: i64, b: i64) -> i64;
19202    }
19203    unsafe { _vqshld_s64(a, b) }
19204}
19205#[doc = "Unsigned saturating shift left"]
19206#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"]
19207#[inline(always)]
19208#[target_feature(enable = "neon")]
19209#[cfg_attr(test, assert_instr(uqshl))]
19210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19211pub fn vqshld_u64(a: u64, b: i64) -> u64 {
19212    unsafe extern "unadjusted" {
19213        #[cfg_attr(
19214            any(target_arch = "aarch64", target_arch = "arm64ec"),
19215            link_name = "llvm.aarch64.neon.uqshl.i64"
19216        )]
19217        fn _vqshld_u64(a: u64, b: i64) -> u64;
19218    }
19219    unsafe { _vqshld_u64(a, b) }
19220}
19221#[doc = "Signed saturating shift left unsigned"]
19222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"]
19223#[inline(always)]
19224#[target_feature(enable = "neon")]
19225#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19226#[rustc_legacy_const_generics(1)]
19227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19228pub fn vqshlub_n_s8<const N: i32>(a: i8) -> u8 {
19229    static_assert_uimm_bits!(N, 3);
19230    unsafe { simd_extract!(vqshlu_n_s8::<N>(vdup_n_s8(a)), 0) }
19231}
19232#[doc = "Signed saturating shift left unsigned"]
19233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"]
19234#[inline(always)]
19235#[target_feature(enable = "neon")]
19236#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19237#[rustc_legacy_const_generics(1)]
19238#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19239pub fn vqshlud_n_s64<const N: i32>(a: i64) -> u64 {
19240    static_assert_uimm_bits!(N, 6);
19241    unsafe { simd_extract!(vqshlu_n_s64::<N>(vdup_n_s64(a)), 0) }
19242}
19243#[doc = "Signed saturating shift left unsigned"]
19244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"]
19245#[inline(always)]
19246#[target_feature(enable = "neon")]
19247#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19248#[rustc_legacy_const_generics(1)]
19249#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19250pub fn vqshluh_n_s16<const N: i32>(a: i16) -> u16 {
19251    static_assert_uimm_bits!(N, 4);
19252    unsafe { simd_extract!(vqshlu_n_s16::<N>(vdup_n_s16(a)), 0) }
19253}
19254#[doc = "Signed saturating shift left unsigned"]
19255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"]
19256#[inline(always)]
19257#[target_feature(enable = "neon")]
19258#[cfg_attr(test, assert_instr(sqshlu, N = 2))]
19259#[rustc_legacy_const_generics(1)]
19260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19261pub fn vqshlus_n_s32<const N: i32>(a: i32) -> u32 {
19262    static_assert_uimm_bits!(N, 5);
19263    unsafe { simd_extract!(vqshlu_n_s32::<N>(vdup_n_s32(a)), 0) }
19264}
19265#[doc = "Signed saturating shift right narrow"]
19266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"]
19267#[inline(always)]
19268#[target_feature(enable = "neon")]
19269#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19270#[rustc_legacy_const_generics(2)]
19271#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19272pub fn vqshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
19273    static_assert!(N >= 1 && N <= 8);
19274    unsafe {
19275        simd_shuffle!(
19276            a,
19277            vqshrn_n_s16::<N>(b),
19278            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19279        )
19280    }
19281}
19282#[doc = "Signed saturating shift right narrow"]
19283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"]
19284#[inline(always)]
19285#[target_feature(enable = "neon")]
19286#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19287#[rustc_legacy_const_generics(2)]
19288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19289pub fn vqshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
19290    static_assert!(N >= 1 && N <= 16);
19291    unsafe { simd_shuffle!(a, vqshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19292}
19293#[doc = "Signed saturating shift right narrow"]
19294#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"]
19295#[inline(always)]
19296#[target_feature(enable = "neon")]
19297#[cfg_attr(test, assert_instr(sqshrn2, N = 2))]
19298#[rustc_legacy_const_generics(2)]
19299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19300pub fn vqshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
19301    static_assert!(N >= 1 && N <= 32);
19302    unsafe { simd_shuffle!(a, vqshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
19303}
19304#[doc = "Unsigned saturating shift right narrow"]
19305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"]
19306#[inline(always)]
19307#[target_feature(enable = "neon")]
19308#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19309#[rustc_legacy_const_generics(2)]
19310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19311pub fn vqshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
19312    static_assert!(N >= 1 && N <= 8);
19313    unsafe {
19314        simd_shuffle!(
19315            a,
19316            vqshrn_n_u16::<N>(b),
19317            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19318        )
19319    }
19320}
19321#[doc = "Unsigned saturating shift right narrow"]
19322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"]
19323#[inline(always)]
19324#[target_feature(enable = "neon")]
19325#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19326#[rustc_legacy_const_generics(2)]
19327#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19328pub fn vqshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
19329    static_assert!(N >= 1 && N <= 16);
19330    unsafe { simd_shuffle!(a, vqshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19331}
19332#[doc = "Unsigned saturating shift right narrow"]
19333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"]
19334#[inline(always)]
19335#[target_feature(enable = "neon")]
19336#[cfg_attr(test, assert_instr(uqshrn2, N = 2))]
19337#[rustc_legacy_const_generics(2)]
19338#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19339pub fn vqshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
19340    static_assert!(N >= 1 && N <= 32);
19341    unsafe { simd_shuffle!(a, vqshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
19342}
19343#[doc = "Signed saturating shift right narrow"]
19344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"]
19345#[inline(always)]
19346#[target_feature(enable = "neon")]
19347#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19348#[rustc_legacy_const_generics(1)]
19349#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19350pub fn vqshrnd_n_s64<const N: i32>(a: i64) -> i32 {
19351    static_assert!(N >= 1 && N <= 32);
19352    unsafe extern "unadjusted" {
19353        #[cfg_attr(
19354            any(target_arch = "aarch64", target_arch = "arm64ec"),
19355            link_name = "llvm.aarch64.neon.sqshrn.i32"
19356        )]
19357        fn _vqshrnd_n_s64(a: i64, n: i32) -> i32;
19358    }
19359    unsafe { _vqshrnd_n_s64(a, N) }
19360}
19361#[doc = "Unsigned saturating shift right narrow"]
19362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"]
19363#[inline(always)]
19364#[target_feature(enable = "neon")]
19365#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19366#[rustc_legacy_const_generics(1)]
19367#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19368pub fn vqshrnd_n_u64<const N: i32>(a: u64) -> u32 {
19369    static_assert!(N >= 1 && N <= 32);
19370    unsafe extern "unadjusted" {
19371        #[cfg_attr(
19372            any(target_arch = "aarch64", target_arch = "arm64ec"),
19373            link_name = "llvm.aarch64.neon.uqshrn.i32"
19374        )]
19375        fn _vqshrnd_n_u64(a: u64, n: i32) -> u32;
19376    }
19377    unsafe { _vqshrnd_n_u64(a, N) }
19378}
19379#[doc = "Signed saturating shift right narrow"]
19380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"]
19381#[inline(always)]
19382#[target_feature(enable = "neon")]
19383#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19384#[rustc_legacy_const_generics(1)]
19385#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19386pub fn vqshrnh_n_s16<const N: i32>(a: i16) -> i8 {
19387    static_assert!(N >= 1 && N <= 8);
19388    unsafe { simd_extract!(vqshrn_n_s16::<N>(vdupq_n_s16(a)), 0) }
19389}
19390#[doc = "Signed saturating shift right narrow"]
19391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"]
19392#[inline(always)]
19393#[target_feature(enable = "neon")]
19394#[cfg_attr(test, assert_instr(sqshrn, N = 2))]
19395#[rustc_legacy_const_generics(1)]
19396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19397pub fn vqshrns_n_s32<const N: i32>(a: i32) -> i16 {
19398    static_assert!(N >= 1 && N <= 16);
19399    unsafe { simd_extract!(vqshrn_n_s32::<N>(vdupq_n_s32(a)), 0) }
19400}
19401#[doc = "Unsigned saturating shift right narrow"]
19402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"]
19403#[inline(always)]
19404#[target_feature(enable = "neon")]
19405#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19406#[rustc_legacy_const_generics(1)]
19407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19408pub fn vqshrnh_n_u16<const N: i32>(a: u16) -> u8 {
19409    static_assert!(N >= 1 && N <= 8);
19410    unsafe { simd_extract!(vqshrn_n_u16::<N>(vdupq_n_u16(a)), 0) }
19411}
19412#[doc = "Unsigned saturating shift right narrow"]
19413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"]
19414#[inline(always)]
19415#[target_feature(enable = "neon")]
19416#[cfg_attr(test, assert_instr(uqshrn, N = 2))]
19417#[rustc_legacy_const_generics(1)]
19418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19419pub fn vqshrns_n_u32<const N: i32>(a: u32) -> u16 {
19420    static_assert!(N >= 1 && N <= 16);
19421    unsafe { simd_extract!(vqshrn_n_u32::<N>(vdupq_n_u32(a)), 0) }
19422}
19423#[doc = "Signed saturating shift right unsigned narrow"]
19424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"]
19425#[inline(always)]
19426#[target_feature(enable = "neon")]
19427#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19428#[rustc_legacy_const_generics(2)]
19429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19430pub fn vqshrun_high_n_s16<const N: i32>(a: uint8x8_t, b: int16x8_t) -> uint8x16_t {
19431    static_assert!(N >= 1 && N <= 8);
19432    unsafe {
19433        simd_shuffle!(
19434            a,
19435            vqshrun_n_s16::<N>(b),
19436            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
19437        )
19438    }
19439}
19440#[doc = "Signed saturating shift right unsigned narrow"]
19441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"]
19442#[inline(always)]
19443#[target_feature(enable = "neon")]
19444#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19445#[rustc_legacy_const_generics(2)]
19446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19447pub fn vqshrun_high_n_s32<const N: i32>(a: uint16x4_t, b: int32x4_t) -> uint16x8_t {
19448    static_assert!(N >= 1 && N <= 16);
19449    unsafe { simd_shuffle!(a, vqshrun_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
19450}
19451#[doc = "Signed saturating shift right unsigned narrow"]
19452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"]
19453#[inline(always)]
19454#[target_feature(enable = "neon")]
19455#[cfg_attr(test, assert_instr(sqshrun2, N = 2))]
19456#[rustc_legacy_const_generics(2)]
19457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19458pub fn vqshrun_high_n_s64<const N: i32>(a: uint32x2_t, b: int64x2_t) -> uint32x4_t {
19459    static_assert!(N >= 1 && N <= 32);
19460    unsafe { simd_shuffle!(a, vqshrun_n_s64::<N>(b), [0, 1, 2, 3]) }
19461}
19462#[doc = "Signed saturating shift right unsigned narrow"]
19463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"]
19464#[inline(always)]
19465#[target_feature(enable = "neon")]
19466#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19467#[rustc_legacy_const_generics(1)]
19468#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19469pub fn vqshrund_n_s64<const N: i32>(a: i64) -> u32 {
19470    static_assert!(N >= 1 && N <= 32);
19471    unsafe { simd_extract!(vqshrun_n_s64::<N>(vdupq_n_s64(a)), 0) }
19472}
19473#[doc = "Signed saturating shift right unsigned narrow"]
19474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"]
19475#[inline(always)]
19476#[target_feature(enable = "neon")]
19477#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19478#[rustc_legacy_const_generics(1)]
19479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19480pub fn vqshrunh_n_s16<const N: i32>(a: i16) -> u8 {
19481    static_assert!(N >= 1 && N <= 8);
19482    unsafe { simd_extract!(vqshrun_n_s16::<N>(vdupq_n_s16(a)), 0) }
19483}
19484#[doc = "Signed saturating shift right unsigned narrow"]
19485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"]
19486#[inline(always)]
19487#[target_feature(enable = "neon")]
19488#[cfg_attr(test, assert_instr(sqshrun, N = 2))]
19489#[rustc_legacy_const_generics(1)]
19490#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19491pub fn vqshruns_n_s32<const N: i32>(a: i32) -> u16 {
19492    static_assert!(N >= 1 && N <= 16);
19493    unsafe { simd_extract!(vqshrun_n_s32::<N>(vdupq_n_s32(a)), 0) }
19494}
19495#[doc = "Saturating subtract"]
19496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"]
19497#[inline(always)]
19498#[target_feature(enable = "neon")]
19499#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19500#[cfg_attr(test, assert_instr(sqsub))]
19501pub fn vqsubb_s8(a: i8, b: i8) -> i8 {
19502    let a: int8x8_t = vdup_n_s8(a);
19503    let b: int8x8_t = vdup_n_s8(b);
19504    unsafe { simd_extract!(vqsub_s8(a, b), 0) }
19505}
19506#[doc = "Saturating subtract"]
19507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"]
19508#[inline(always)]
19509#[target_feature(enable = "neon")]
19510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19511#[cfg_attr(test, assert_instr(sqsub))]
19512pub fn vqsubh_s16(a: i16, b: i16) -> i16 {
19513    let a: int16x4_t = vdup_n_s16(a);
19514    let b: int16x4_t = vdup_n_s16(b);
19515    unsafe { simd_extract!(vqsub_s16(a, b), 0) }
19516}
19517#[doc = "Saturating subtract"]
19518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"]
19519#[inline(always)]
19520#[target_feature(enable = "neon")]
19521#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19522#[cfg_attr(test, assert_instr(uqsub))]
19523pub fn vqsubb_u8(a: u8, b: u8) -> u8 {
19524    let a: uint8x8_t = vdup_n_u8(a);
19525    let b: uint8x8_t = vdup_n_u8(b);
19526    unsafe { simd_extract!(vqsub_u8(a, b), 0) }
19527}
19528#[doc = "Saturating subtract"]
19529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"]
19530#[inline(always)]
19531#[target_feature(enable = "neon")]
19532#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19533#[cfg_attr(test, assert_instr(uqsub))]
19534pub fn vqsubh_u16(a: u16, b: u16) -> u16 {
19535    let a: uint16x4_t = vdup_n_u16(a);
19536    let b: uint16x4_t = vdup_n_u16(b);
19537    unsafe { simd_extract!(vqsub_u16(a, b), 0) }
19538}
19539#[doc = "Saturating subtract"]
19540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"]
19541#[inline(always)]
19542#[target_feature(enable = "neon")]
19543#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19544#[cfg_attr(test, assert_instr(sqsub))]
19545pub fn vqsubs_s32(a: i32, b: i32) -> i32 {
19546    unsafe extern "unadjusted" {
19547        #[cfg_attr(
19548            any(target_arch = "aarch64", target_arch = "arm64ec"),
19549            link_name = "llvm.aarch64.neon.sqsub.i32"
19550        )]
19551        fn _vqsubs_s32(a: i32, b: i32) -> i32;
19552    }
19553    unsafe { _vqsubs_s32(a, b) }
19554}
19555#[doc = "Saturating subtract"]
19556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"]
19557#[inline(always)]
19558#[target_feature(enable = "neon")]
19559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19560#[cfg_attr(test, assert_instr(sqsub))]
19561pub fn vqsubd_s64(a: i64, b: i64) -> i64 {
19562    unsafe extern "unadjusted" {
19563        #[cfg_attr(
19564            any(target_arch = "aarch64", target_arch = "arm64ec"),
19565            link_name = "llvm.aarch64.neon.sqsub.i64"
19566        )]
19567        fn _vqsubd_s64(a: i64, b: i64) -> i64;
19568    }
19569    unsafe { _vqsubd_s64(a, b) }
19570}
19571#[doc = "Saturating subtract"]
19572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"]
19573#[inline(always)]
19574#[target_feature(enable = "neon")]
19575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19576#[cfg_attr(test, assert_instr(uqsub))]
19577pub fn vqsubs_u32(a: u32, b: u32) -> u32 {
19578    unsafe extern "unadjusted" {
19579        #[cfg_attr(
19580            any(target_arch = "aarch64", target_arch = "arm64ec"),
19581            link_name = "llvm.aarch64.neon.uqsub.i32"
19582        )]
19583        fn _vqsubs_u32(a: u32, b: u32) -> u32;
19584    }
19585    unsafe { _vqsubs_u32(a, b) }
19586}
19587#[doc = "Saturating subtract"]
19588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"]
19589#[inline(always)]
19590#[target_feature(enable = "neon")]
19591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19592#[cfg_attr(test, assert_instr(uqsub))]
19593pub fn vqsubd_u64(a: u64, b: u64) -> u64 {
19594    unsafe extern "unadjusted" {
19595        #[cfg_attr(
19596            any(target_arch = "aarch64", target_arch = "arm64ec"),
19597            link_name = "llvm.aarch64.neon.uqsub.i64"
19598        )]
19599        fn _vqsubd_u64(a: u64, b: u64) -> u64;
19600    }
19601    unsafe { _vqsubd_u64(a, b) }
19602}
19603#[doc = "Table look-up"]
19604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"]
19605#[inline(always)]
19606#[target_feature(enable = "neon")]
19607#[cfg_attr(test, assert_instr(tbl))]
19608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19609fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19610    unsafe extern "unadjusted" {
19611        #[cfg_attr(
19612            any(target_arch = "aarch64", target_arch = "arm64ec"),
19613            link_name = "llvm.aarch64.neon.tbl1.v8i8"
19614        )]
19615        fn _vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t;
19616    }
19617    unsafe { _vqtbl1(a, b) }
19618}
19619#[doc = "Table look-up"]
19620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"]
19621#[inline(always)]
19622#[target_feature(enable = "neon")]
19623#[cfg_attr(test, assert_instr(tbl))]
19624#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19625fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19626    unsafe extern "unadjusted" {
19627        #[cfg_attr(
19628            any(target_arch = "aarch64", target_arch = "arm64ec"),
19629            link_name = "llvm.aarch64.neon.tbl1.v16i8"
19630        )]
19631        fn _vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
19632    }
19633    unsafe { _vqtbl1q(a, b) }
19634}
19635#[doc = "Table look-up"]
19636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"]
19637#[inline(always)]
19638#[target_feature(enable = "neon")]
19639#[cfg_attr(test, assert_instr(tbl))]
19640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19641pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t {
19642    vqtbl1(a, b)
19643}
19644#[doc = "Table look-up"]
19645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"]
19646#[inline(always)]
19647#[target_feature(enable = "neon")]
19648#[cfg_attr(test, assert_instr(tbl))]
19649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19650pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
19651    vqtbl1q(a, b)
19652}
19653#[doc = "Table look-up"]
19654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"]
19655#[inline(always)]
19656#[target_feature(enable = "neon")]
19657#[cfg_attr(test, assert_instr(tbl))]
19658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19659pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t {
19660    unsafe { transmute(vqtbl1(transmute(a), b)) }
19661}
19662#[doc = "Table look-up"]
19663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"]
19664#[inline(always)]
19665#[target_feature(enable = "neon")]
19666#[cfg_attr(test, assert_instr(tbl))]
19667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19668pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
19669    unsafe { transmute(vqtbl1q(transmute(a), b)) }
19670}
19671#[doc = "Table look-up"]
19672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"]
19673#[inline(always)]
19674#[target_feature(enable = "neon")]
19675#[cfg_attr(test, assert_instr(tbl))]
19676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19677pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t {
19678    unsafe { transmute(vqtbl1(transmute(a), b)) }
19679}
19680#[doc = "Table look-up"]
19681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"]
19682#[inline(always)]
19683#[target_feature(enable = "neon")]
19684#[cfg_attr(test, assert_instr(tbl))]
19685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19686pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t {
19687    unsafe { transmute(vqtbl1q(transmute(a), b)) }
19688}
19689#[doc = "Table look-up"]
19690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"]
19691#[inline(always)]
19692#[target_feature(enable = "neon")]
19693#[cfg_attr(test, assert_instr(tbl))]
19694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19695fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
19696    unsafe extern "unadjusted" {
19697        #[cfg_attr(
19698            any(target_arch = "aarch64", target_arch = "arm64ec"),
19699            link_name = "llvm.aarch64.neon.tbl2.v8i8"
19700        )]
19701        fn _vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
19702    }
19703    unsafe { _vqtbl2(a, b, c) }
19704}
19705#[doc = "Table look-up"]
19706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"]
19707#[inline(always)]
19708#[target_feature(enable = "neon")]
19709#[cfg_attr(test, assert_instr(tbl))]
19710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19711fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
19712    unsafe extern "unadjusted" {
19713        #[cfg_attr(
19714            any(target_arch = "aarch64", target_arch = "arm64ec"),
19715            link_name = "llvm.aarch64.neon.tbl2.v16i8"
19716        )]
19717        fn _vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
19718    }
19719    unsafe { _vqtbl2q(a, b, c) }
19720}
19721#[doc = "Table look-up"]
19722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"]
19723#[inline(always)]
19724#[target_feature(enable = "neon")]
19725#[cfg_attr(test, assert_instr(tbl))]
19726#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19727pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t {
19728    vqtbl2(a.0, a.1, b)
19729}
19730#[doc = "Table look-up"]
19731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"]
19732#[inline(always)]
19733#[target_feature(enable = "neon")]
19734#[cfg_attr(test, assert_instr(tbl))]
19735#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19736pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t {
19737    vqtbl2q(a.0, a.1, b)
19738}
19739#[doc = "Table look-up"]
19740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19741#[inline(always)]
19742#[cfg(target_endian = "little")]
19743#[target_feature(enable = "neon")]
19744#[cfg_attr(test, assert_instr(tbl))]
19745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19746pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19747    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19748}
19749#[doc = "Table look-up"]
19750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"]
19751#[inline(always)]
19752#[cfg(target_endian = "big")]
19753#[target_feature(enable = "neon")]
19754#[cfg_attr(test, assert_instr(tbl))]
19755#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19756pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t {
19757    let mut a: uint8x16x2_t = a;
19758    a.0 = unsafe {
19759        simd_shuffle!(
19760            a.0,
19761            a.0,
19762            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19763        )
19764    };
19765    a.1 = unsafe {
19766        simd_shuffle!(
19767            a.1,
19768            a.1,
19769            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19770        )
19771    };
19772    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19773    unsafe {
19774        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19775        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19776    }
19777}
19778#[doc = "Table look-up"]
19779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19780#[inline(always)]
19781#[cfg(target_endian = "little")]
19782#[target_feature(enable = "neon")]
19783#[cfg_attr(test, assert_instr(tbl))]
19784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19785pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19786    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19787}
19788#[doc = "Table look-up"]
19789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"]
19790#[inline(always)]
19791#[cfg(target_endian = "big")]
19792#[target_feature(enable = "neon")]
19793#[cfg_attr(test, assert_instr(tbl))]
19794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19795pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t {
19796    let mut a: uint8x16x2_t = a;
19797    a.0 = unsafe {
19798        simd_shuffle!(
19799            a.0,
19800            a.0,
19801            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19802        )
19803    };
19804    a.1 = unsafe {
19805        simd_shuffle!(
19806            a.1,
19807            a.1,
19808            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19809        )
19810    };
19811    let b: uint8x16_t =
19812        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19813    unsafe {
19814        let ret_val: uint8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19815        simd_shuffle!(
19816            ret_val,
19817            ret_val,
19818            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19819        )
19820    }
19821}
19822#[doc = "Table look-up"]
19823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19824#[inline(always)]
19825#[cfg(target_endian = "little")]
19826#[target_feature(enable = "neon")]
19827#[cfg_attr(test, assert_instr(tbl))]
19828#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19829pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19830    unsafe { transmute(vqtbl2(transmute(a.0), transmute(a.1), b)) }
19831}
19832#[doc = "Table look-up"]
19833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"]
19834#[inline(always)]
19835#[cfg(target_endian = "big")]
19836#[target_feature(enable = "neon")]
19837#[cfg_attr(test, assert_instr(tbl))]
19838#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19839pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t {
19840    let mut a: poly8x16x2_t = a;
19841    a.0 = unsafe {
19842        simd_shuffle!(
19843            a.0,
19844            a.0,
19845            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19846        )
19847    };
19848    a.1 = unsafe {
19849        simd_shuffle!(
19850            a.1,
19851            a.1,
19852            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19853        )
19854    };
19855    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19856    unsafe {
19857        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(a.0), transmute(a.1), b));
19858        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
19859    }
19860}
19861#[doc = "Table look-up"]
19862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19863#[inline(always)]
19864#[cfg(target_endian = "little")]
19865#[target_feature(enable = "neon")]
19866#[cfg_attr(test, assert_instr(tbl))]
19867#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19868pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19869    unsafe { transmute(vqtbl2q(transmute(a.0), transmute(a.1), b)) }
19870}
19871#[doc = "Table look-up"]
19872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"]
19873#[inline(always)]
19874#[cfg(target_endian = "big")]
19875#[target_feature(enable = "neon")]
19876#[cfg_attr(test, assert_instr(tbl))]
19877#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19878pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t {
19879    let mut a: poly8x16x2_t = a;
19880    a.0 = unsafe {
19881        simd_shuffle!(
19882            a.0,
19883            a.0,
19884            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19885        )
19886    };
19887    a.1 = unsafe {
19888        simd_shuffle!(
19889            a.1,
19890            a.1,
19891            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19892        )
19893    };
19894    let b: uint8x16_t =
19895        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
19896    unsafe {
19897        let ret_val: poly8x16_t = transmute(vqtbl2q(transmute(a.0), transmute(a.1), b));
19898        simd_shuffle!(
19899            ret_val,
19900            ret_val,
19901            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19902        )
19903    }
19904}
19905#[doc = "Table look-up"]
19906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"]
19907#[inline(always)]
19908#[target_feature(enable = "neon")]
19909#[cfg_attr(test, assert_instr(tbl))]
19910#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19911fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
19912    unsafe extern "unadjusted" {
19913        #[cfg_attr(
19914            any(target_arch = "aarch64", target_arch = "arm64ec"),
19915            link_name = "llvm.aarch64.neon.tbl3.v8i8"
19916        )]
19917        fn _vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
19918    }
19919    unsafe { _vqtbl3(a, b, c, d) }
19920}
19921#[doc = "Table look-up"]
19922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"]
19923#[inline(always)]
19924#[target_feature(enable = "neon")]
19925#[cfg_attr(test, assert_instr(tbl))]
19926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19927fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
19928    unsafe extern "unadjusted" {
19929        #[cfg_attr(
19930            any(target_arch = "aarch64", target_arch = "arm64ec"),
19931            link_name = "llvm.aarch64.neon.tbl3.v16i8"
19932        )]
19933        fn _vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
19934    }
19935    unsafe { _vqtbl3q(a, b, c, d) }
19936}
19937#[doc = "Table look-up"]
19938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"]
19939#[inline(always)]
19940#[target_feature(enable = "neon")]
19941#[cfg_attr(test, assert_instr(tbl))]
19942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19943pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t {
19944    vqtbl3(a.0, a.1, a.2, b)
19945}
19946#[doc = "Table look-up"]
19947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"]
19948#[inline(always)]
19949#[target_feature(enable = "neon")]
19950#[cfg_attr(test, assert_instr(tbl))]
19951#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19952pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t {
19953    vqtbl3q(a.0, a.1, a.2, b)
19954}
19955#[doc = "Table look-up"]
19956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19957#[inline(always)]
19958#[cfg(target_endian = "little")]
19959#[target_feature(enable = "neon")]
19960#[cfg_attr(test, assert_instr(tbl))]
19961#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19962pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19963    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
19964}
19965#[doc = "Table look-up"]
19966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"]
19967#[inline(always)]
19968#[cfg(target_endian = "big")]
19969#[target_feature(enable = "neon")]
19970#[cfg_attr(test, assert_instr(tbl))]
19971#[stable(feature = "neon_intrinsics", since = "1.59.0")]
19972pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t {
19973    let mut a: uint8x16x3_t = a;
19974    a.0 = unsafe {
19975        simd_shuffle!(
19976            a.0,
19977            a.0,
19978            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19979        )
19980    };
19981    a.1 = unsafe {
19982        simd_shuffle!(
19983            a.1,
19984            a.1,
19985            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19986        )
19987    };
19988    a.2 = unsafe {
19989        simd_shuffle!(
19990            a.2,
19991            a.2,
19992            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
19993        )
19994    };
19995    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
19996    unsafe {
19997        let ret_val: uint8x8_t =
19998            transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
19999        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20000    }
20001}
20002#[doc = "Table look-up"]
20003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
20004#[inline(always)]
20005#[cfg(target_endian = "little")]
20006#[target_feature(enable = "neon")]
20007#[cfg_attr(test, assert_instr(tbl))]
20008#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20009pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
20010    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
20011}
20012#[doc = "Table look-up"]
20013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"]
20014#[inline(always)]
20015#[cfg(target_endian = "big")]
20016#[target_feature(enable = "neon")]
20017#[cfg_attr(test, assert_instr(tbl))]
20018#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20019pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t {
20020    let mut a: uint8x16x3_t = a;
20021    a.0 = unsafe {
20022        simd_shuffle!(
20023            a.0,
20024            a.0,
20025            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20026        )
20027    };
20028    a.1 = unsafe {
20029        simd_shuffle!(
20030            a.1,
20031            a.1,
20032            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20033        )
20034    };
20035    a.2 = unsafe {
20036        simd_shuffle!(
20037            a.2,
20038            a.2,
20039            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20040        )
20041    };
20042    let b: uint8x16_t =
20043        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20044    unsafe {
20045        let ret_val: uint8x16_t =
20046            transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
20047        simd_shuffle!(
20048            ret_val,
20049            ret_val,
20050            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20051        )
20052    }
20053}
20054#[doc = "Table look-up"]
20055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
20056#[inline(always)]
20057#[cfg(target_endian = "little")]
20058#[target_feature(enable = "neon")]
20059#[cfg_attr(test, assert_instr(tbl))]
20060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20061pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
20062    unsafe { transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
20063}
20064#[doc = "Table look-up"]
20065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"]
20066#[inline(always)]
20067#[cfg(target_endian = "big")]
20068#[target_feature(enable = "neon")]
20069#[cfg_attr(test, assert_instr(tbl))]
20070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20071pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t {
20072    let mut a: poly8x16x3_t = a;
20073    a.0 = unsafe {
20074        simd_shuffle!(
20075            a.0,
20076            a.0,
20077            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20078        )
20079    };
20080    a.1 = unsafe {
20081        simd_shuffle!(
20082            a.1,
20083            a.1,
20084            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20085        )
20086    };
20087    a.2 = unsafe {
20088        simd_shuffle!(
20089            a.2,
20090            a.2,
20091            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20092        )
20093    };
20094    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
20095    unsafe {
20096        let ret_val: poly8x8_t =
20097            transmute(vqtbl3(transmute(a.0), transmute(a.1), transmute(a.2), b));
20098        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20099    }
20100}
20101#[doc = "Table look-up"]
20102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
20103#[inline(always)]
20104#[cfg(target_endian = "little")]
20105#[target_feature(enable = "neon")]
20106#[cfg_attr(test, assert_instr(tbl))]
20107#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20108pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
20109    unsafe { transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b)) }
20110}
20111#[doc = "Table look-up"]
20112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"]
20113#[inline(always)]
20114#[cfg(target_endian = "big")]
20115#[target_feature(enable = "neon")]
20116#[cfg_attr(test, assert_instr(tbl))]
20117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20118pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t {
20119    let mut a: poly8x16x3_t = a;
20120    a.0 = unsafe {
20121        simd_shuffle!(
20122            a.0,
20123            a.0,
20124            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20125        )
20126    };
20127    a.1 = unsafe {
20128        simd_shuffle!(
20129            a.1,
20130            a.1,
20131            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20132        )
20133    };
20134    a.2 = unsafe {
20135        simd_shuffle!(
20136            a.2,
20137            a.2,
20138            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20139        )
20140    };
20141    let b: uint8x16_t =
20142        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20143    unsafe {
20144        let ret_val: poly8x16_t =
20145            transmute(vqtbl3q(transmute(a.0), transmute(a.1), transmute(a.2), b));
20146        simd_shuffle!(
20147            ret_val,
20148            ret_val,
20149            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20150        )
20151    }
20152}
20153#[doc = "Table look-up"]
20154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"]
20155#[inline(always)]
20156#[target_feature(enable = "neon")]
20157#[cfg_attr(test, assert_instr(tbl))]
20158#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20159fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
20160    unsafe extern "unadjusted" {
20161        #[cfg_attr(
20162            any(target_arch = "aarch64", target_arch = "arm64ec"),
20163            link_name = "llvm.aarch64.neon.tbl4.v8i8"
20164        )]
20165        fn _vqtbl4(
20166            a: int8x16_t,
20167            b: int8x16_t,
20168            c: int8x16_t,
20169            d: int8x16_t,
20170            e: uint8x8_t,
20171        ) -> int8x8_t;
20172    }
20173    unsafe { _vqtbl4(a, b, c, d, e) }
20174}
20175#[doc = "Table look-up"]
20176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"]
20177#[inline(always)]
20178#[target_feature(enable = "neon")]
20179#[cfg_attr(test, assert_instr(tbl))]
20180#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20181fn vqtbl4q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
20182    unsafe extern "unadjusted" {
20183        #[cfg_attr(
20184            any(target_arch = "aarch64", target_arch = "arm64ec"),
20185            link_name = "llvm.aarch64.neon.tbl4.v16i8"
20186        )]
20187        fn _vqtbl4q(
20188            a: int8x16_t,
20189            b: int8x16_t,
20190            c: int8x16_t,
20191            d: int8x16_t,
20192            e: uint8x16_t,
20193        ) -> int8x16_t;
20194    }
20195    unsafe { _vqtbl4q(a, b, c, d, e) }
20196}
20197#[doc = "Table look-up"]
20198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"]
20199#[inline(always)]
20200#[target_feature(enable = "neon")]
20201#[cfg_attr(test, assert_instr(tbl))]
20202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20203pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t {
20204    vqtbl4(a.0, a.1, a.2, a.3, b)
20205}
20206#[doc = "Table look-up"]
20207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"]
20208#[inline(always)]
20209#[target_feature(enable = "neon")]
20210#[cfg_attr(test, assert_instr(tbl))]
20211#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20212pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t {
20213    vqtbl4q(a.0, a.1, a.2, a.3, b)
20214}
20215#[doc = "Table look-up"]
20216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
20217#[inline(always)]
20218#[cfg(target_endian = "little")]
20219#[target_feature(enable = "neon")]
20220#[cfg_attr(test, assert_instr(tbl))]
20221#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20222pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
20223    unsafe {
20224        transmute(vqtbl4(
20225            transmute(a.0),
20226            transmute(a.1),
20227            transmute(a.2),
20228            transmute(a.3),
20229            b,
20230        ))
20231    }
20232}
20233#[doc = "Table look-up"]
20234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"]
20235#[inline(always)]
20236#[cfg(target_endian = "big")]
20237#[target_feature(enable = "neon")]
20238#[cfg_attr(test, assert_instr(tbl))]
20239#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20240pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t {
20241    let mut a: uint8x16x4_t = a;
20242    a.0 = unsafe {
20243        simd_shuffle!(
20244            a.0,
20245            a.0,
20246            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20247        )
20248    };
20249    a.1 = unsafe {
20250        simd_shuffle!(
20251            a.1,
20252            a.1,
20253            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20254        )
20255    };
20256    a.2 = unsafe {
20257        simd_shuffle!(
20258            a.2,
20259            a.2,
20260            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20261        )
20262    };
20263    a.3 = unsafe {
20264        simd_shuffle!(
20265            a.3,
20266            a.3,
20267            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20268        )
20269    };
20270    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
20271    unsafe {
20272        let ret_val: uint8x8_t = transmute(vqtbl4(
20273            transmute(a.0),
20274            transmute(a.1),
20275            transmute(a.2),
20276            transmute(a.3),
20277            b,
20278        ));
20279        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20280    }
20281}
20282#[doc = "Table look-up"]
20283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
20284#[inline(always)]
20285#[cfg(target_endian = "little")]
20286#[target_feature(enable = "neon")]
20287#[cfg_attr(test, assert_instr(tbl))]
20288#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20289pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
20290    unsafe {
20291        transmute(vqtbl4q(
20292            transmute(a.0),
20293            transmute(a.1),
20294            transmute(a.2),
20295            transmute(a.3),
20296            b,
20297        ))
20298    }
20299}
20300#[doc = "Table look-up"]
20301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"]
20302#[inline(always)]
20303#[cfg(target_endian = "big")]
20304#[target_feature(enable = "neon")]
20305#[cfg_attr(test, assert_instr(tbl))]
20306#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20307pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t {
20308    let mut a: uint8x16x4_t = a;
20309    a.0 = unsafe {
20310        simd_shuffle!(
20311            a.0,
20312            a.0,
20313            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20314        )
20315    };
20316    a.1 = unsafe {
20317        simd_shuffle!(
20318            a.1,
20319            a.1,
20320            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20321        )
20322    };
20323    a.2 = unsafe {
20324        simd_shuffle!(
20325            a.2,
20326            a.2,
20327            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20328        )
20329    };
20330    a.3 = unsafe {
20331        simd_shuffle!(
20332            a.3,
20333            a.3,
20334            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20335        )
20336    };
20337    let b: uint8x16_t =
20338        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20339    unsafe {
20340        let ret_val: uint8x16_t = transmute(vqtbl4q(
20341            transmute(a.0),
20342            transmute(a.1),
20343            transmute(a.2),
20344            transmute(a.3),
20345            b,
20346        ));
20347        simd_shuffle!(
20348            ret_val,
20349            ret_val,
20350            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20351        )
20352    }
20353}
20354#[doc = "Table look-up"]
20355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
20356#[inline(always)]
20357#[cfg(target_endian = "little")]
20358#[target_feature(enable = "neon")]
20359#[cfg_attr(test, assert_instr(tbl))]
20360#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20361pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
20362    unsafe {
20363        transmute(vqtbl4(
20364            transmute(a.0),
20365            transmute(a.1),
20366            transmute(a.2),
20367            transmute(a.3),
20368            b,
20369        ))
20370    }
20371}
20372#[doc = "Table look-up"]
20373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"]
20374#[inline(always)]
20375#[cfg(target_endian = "big")]
20376#[target_feature(enable = "neon")]
20377#[cfg_attr(test, assert_instr(tbl))]
20378#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20379pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t {
20380    let mut a: poly8x16x4_t = a;
20381    a.0 = unsafe {
20382        simd_shuffle!(
20383            a.0,
20384            a.0,
20385            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20386        )
20387    };
20388    a.1 = unsafe {
20389        simd_shuffle!(
20390            a.1,
20391            a.1,
20392            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20393        )
20394    };
20395    a.2 = unsafe {
20396        simd_shuffle!(
20397            a.2,
20398            a.2,
20399            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20400        )
20401    };
20402    a.3 = unsafe {
20403        simd_shuffle!(
20404            a.3,
20405            a.3,
20406            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20407        )
20408    };
20409    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
20410    unsafe {
20411        let ret_val: poly8x8_t = transmute(vqtbl4(
20412            transmute(a.0),
20413            transmute(a.1),
20414            transmute(a.2),
20415            transmute(a.3),
20416            b,
20417        ));
20418        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20419    }
20420}
20421#[doc = "Table look-up"]
20422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
20423#[inline(always)]
20424#[cfg(target_endian = "little")]
20425#[target_feature(enable = "neon")]
20426#[cfg_attr(test, assert_instr(tbl))]
20427#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20428pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
20429    unsafe {
20430        transmute(vqtbl4q(
20431            transmute(a.0),
20432            transmute(a.1),
20433            transmute(a.2),
20434            transmute(a.3),
20435            b,
20436        ))
20437    }
20438}
20439#[doc = "Table look-up"]
20440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"]
20441#[inline(always)]
20442#[cfg(target_endian = "big")]
20443#[target_feature(enable = "neon")]
20444#[cfg_attr(test, assert_instr(tbl))]
20445#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20446pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t {
20447    let mut a: poly8x16x4_t = a;
20448    a.0 = unsafe {
20449        simd_shuffle!(
20450            a.0,
20451            a.0,
20452            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20453        )
20454    };
20455    a.1 = unsafe {
20456        simd_shuffle!(
20457            a.1,
20458            a.1,
20459            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20460        )
20461    };
20462    a.2 = unsafe {
20463        simd_shuffle!(
20464            a.2,
20465            a.2,
20466            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20467        )
20468    };
20469    a.3 = unsafe {
20470        simd_shuffle!(
20471            a.3,
20472            a.3,
20473            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20474        )
20475    };
20476    let b: uint8x16_t =
20477        unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20478    unsafe {
20479        let ret_val: poly8x16_t = transmute(vqtbl4q(
20480            transmute(a.0),
20481            transmute(a.1),
20482            transmute(a.2),
20483            transmute(a.3),
20484            b,
20485        ));
20486        simd_shuffle!(
20487            ret_val,
20488            ret_val,
20489            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20490        )
20491    }
20492}
20493#[doc = "Extended table look-up"]
20494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"]
20495#[inline(always)]
20496#[target_feature(enable = "neon")]
20497#[cfg_attr(test, assert_instr(tbx))]
20498#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20499fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
20500    unsafe extern "unadjusted" {
20501        #[cfg_attr(
20502            any(target_arch = "aarch64", target_arch = "arm64ec"),
20503            link_name = "llvm.aarch64.neon.tbx1.v8i8"
20504        )]
20505        fn _vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t;
20506    }
20507    unsafe { _vqtbx1(a, b, c) }
20508}
20509#[doc = "Extended table look-up"]
20510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"]
20511#[inline(always)]
20512#[target_feature(enable = "neon")]
20513#[cfg_attr(test, assert_instr(tbx))]
20514#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20515fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
20516    unsafe extern "unadjusted" {
20517        #[cfg_attr(
20518            any(target_arch = "aarch64", target_arch = "arm64ec"),
20519            link_name = "llvm.aarch64.neon.tbx1.v16i8"
20520        )]
20521        fn _vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t;
20522    }
20523    unsafe { _vqtbx1q(a, b, c) }
20524}
20525#[doc = "Extended table look-up"]
20526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"]
20527#[inline(always)]
20528#[target_feature(enable = "neon")]
20529#[cfg_attr(test, assert_instr(tbx))]
20530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20531pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t {
20532    vqtbx1(a, b, c)
20533}
20534#[doc = "Extended table look-up"]
20535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"]
20536#[inline(always)]
20537#[target_feature(enable = "neon")]
20538#[cfg_attr(test, assert_instr(tbx))]
20539#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20540pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t {
20541    vqtbx1q(a, b, c)
20542}
20543#[doc = "Extended table look-up"]
20544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"]
20545#[inline(always)]
20546#[target_feature(enable = "neon")]
20547#[cfg_attr(test, assert_instr(tbx))]
20548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20549pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t {
20550    unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
20551}
20552#[doc = "Extended table look-up"]
20553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"]
20554#[inline(always)]
20555#[target_feature(enable = "neon")]
20556#[cfg_attr(test, assert_instr(tbx))]
20557#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20558pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t {
20559    unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
20560}
20561#[doc = "Extended table look-up"]
20562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"]
20563#[inline(always)]
20564#[target_feature(enable = "neon")]
20565#[cfg_attr(test, assert_instr(tbx))]
20566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20567pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t {
20568    unsafe { transmute(vqtbx1(transmute(a), transmute(b), c)) }
20569}
20570#[doc = "Extended table look-up"]
20571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"]
20572#[inline(always)]
20573#[target_feature(enable = "neon")]
20574#[cfg_attr(test, assert_instr(tbx))]
20575#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20576pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t {
20577    unsafe { transmute(vqtbx1q(transmute(a), transmute(b), c)) }
20578}
20579#[doc = "Extended table look-up"]
20580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"]
20581#[inline(always)]
20582#[target_feature(enable = "neon")]
20583#[cfg_attr(test, assert_instr(tbx))]
20584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20585fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t {
20586    unsafe extern "unadjusted" {
20587        #[cfg_attr(
20588            any(target_arch = "aarch64", target_arch = "arm64ec"),
20589            link_name = "llvm.aarch64.neon.tbx2.v8i8"
20590        )]
20591        fn _vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t;
20592    }
20593    unsafe { _vqtbx2(a, b, c, d) }
20594}
20595#[doc = "Extended table look-up"]
20596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"]
20597#[inline(always)]
20598#[target_feature(enable = "neon")]
20599#[cfg_attr(test, assert_instr(tbx))]
20600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20601fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t {
20602    unsafe extern "unadjusted" {
20603        #[cfg_attr(
20604            any(target_arch = "aarch64", target_arch = "arm64ec"),
20605            link_name = "llvm.aarch64.neon.tbx2.v16i8"
20606        )]
20607        fn _vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t;
20608    }
20609    unsafe { _vqtbx2q(a, b, c, d) }
20610}
20611#[doc = "Extended table look-up"]
20612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"]
20613#[inline(always)]
20614#[target_feature(enable = "neon")]
20615#[cfg_attr(test, assert_instr(tbx))]
20616#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20617pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t {
20618    vqtbx2(a, b.0, b.1, c)
20619}
20620#[doc = "Extended table look-up"]
20621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"]
20622#[inline(always)]
20623#[target_feature(enable = "neon")]
20624#[cfg_attr(test, assert_instr(tbx))]
20625#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20626pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t {
20627    vqtbx2q(a, b.0, b.1, c)
20628}
20629#[doc = "Extended table look-up"]
20630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20631#[inline(always)]
20632#[cfg(target_endian = "little")]
20633#[target_feature(enable = "neon")]
20634#[cfg_attr(test, assert_instr(tbx))]
20635#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20636pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20637    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20638}
20639#[doc = "Extended table look-up"]
20640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"]
20641#[inline(always)]
20642#[cfg(target_endian = "big")]
20643#[target_feature(enable = "neon")]
20644#[cfg_attr(test, assert_instr(tbx))]
20645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20646pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t {
20647    let mut b: uint8x16x2_t = b;
20648    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20649    b.0 = unsafe {
20650        simd_shuffle!(
20651            b.0,
20652            b.0,
20653            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20654        )
20655    };
20656    b.1 = unsafe {
20657        simd_shuffle!(
20658            b.1,
20659            b.1,
20660            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20661        )
20662    };
20663    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20664    unsafe {
20665        let ret_val: uint8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20666        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20667    }
20668}
20669#[doc = "Extended table look-up"]
20670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20671#[inline(always)]
20672#[cfg(target_endian = "little")]
20673#[target_feature(enable = "neon")]
20674#[cfg_attr(test, assert_instr(tbx))]
20675#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20676pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20677    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20678}
20679#[doc = "Extended table look-up"]
20680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"]
20681#[inline(always)]
20682#[cfg(target_endian = "big")]
20683#[target_feature(enable = "neon")]
20684#[cfg_attr(test, assert_instr(tbx))]
20685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20686pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t {
20687    let mut b: uint8x16x2_t = b;
20688    let a: uint8x16_t =
20689        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20690    b.0 = unsafe {
20691        simd_shuffle!(
20692            b.0,
20693            b.0,
20694            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20695        )
20696    };
20697    b.1 = unsafe {
20698        simd_shuffle!(
20699            b.1,
20700            b.1,
20701            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20702        )
20703    };
20704    let c: uint8x16_t =
20705        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20706    unsafe {
20707        let ret_val: uint8x16_t =
20708            transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20709        simd_shuffle!(
20710            ret_val,
20711            ret_val,
20712            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20713        )
20714    }
20715}
20716#[doc = "Extended table look-up"]
20717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20718#[inline(always)]
20719#[cfg(target_endian = "little")]
20720#[target_feature(enable = "neon")]
20721#[cfg_attr(test, assert_instr(tbx))]
20722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20723pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20724    unsafe { transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c)) }
20725}
20726#[doc = "Extended table look-up"]
20727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"]
20728#[inline(always)]
20729#[cfg(target_endian = "big")]
20730#[target_feature(enable = "neon")]
20731#[cfg_attr(test, assert_instr(tbx))]
20732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20733pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t {
20734    let mut b: poly8x16x2_t = b;
20735    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20736    b.0 = unsafe {
20737        simd_shuffle!(
20738            b.0,
20739            b.0,
20740            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20741        )
20742    };
20743    b.1 = unsafe {
20744        simd_shuffle!(
20745            b.1,
20746            b.1,
20747            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20748        )
20749    };
20750    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20751    unsafe {
20752        let ret_val: poly8x8_t = transmute(vqtbx2(transmute(a), transmute(b.0), transmute(b.1), c));
20753        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20754    }
20755}
20756#[doc = "Extended table look-up"]
20757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20758#[inline(always)]
20759#[cfg(target_endian = "little")]
20760#[target_feature(enable = "neon")]
20761#[cfg_attr(test, assert_instr(tbx))]
20762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20763pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20764    unsafe { transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c)) }
20765}
20766#[doc = "Extended table look-up"]
20767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"]
20768#[inline(always)]
20769#[cfg(target_endian = "big")]
20770#[target_feature(enable = "neon")]
20771#[cfg_attr(test, assert_instr(tbx))]
20772#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20773pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t {
20774    let mut b: poly8x16x2_t = b;
20775    let a: poly8x16_t =
20776        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20777    b.0 = unsafe {
20778        simd_shuffle!(
20779            b.0,
20780            b.0,
20781            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20782        )
20783    };
20784    b.1 = unsafe {
20785        simd_shuffle!(
20786            b.1,
20787            b.1,
20788            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20789        )
20790    };
20791    let c: uint8x16_t =
20792        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20793    unsafe {
20794        let ret_val: poly8x16_t =
20795            transmute(vqtbx2q(transmute(a), transmute(b.0), transmute(b.1), c));
20796        simd_shuffle!(
20797            ret_val,
20798            ret_val,
20799            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20800        )
20801    }
20802}
20803#[doc = "Extended table look-up"]
20804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"]
20805#[inline(always)]
20806#[target_feature(enable = "neon")]
20807#[cfg_attr(test, assert_instr(tbx))]
20808#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20809fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) -> int8x8_t {
20810    unsafe extern "unadjusted" {
20811        #[cfg_attr(
20812            any(target_arch = "aarch64", target_arch = "arm64ec"),
20813            link_name = "llvm.aarch64.neon.tbx3.v8i8"
20814        )]
20815        fn _vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t)
20816            -> int8x8_t;
20817    }
20818    unsafe { _vqtbx3(a, b, c, d, e) }
20819}
20820#[doc = "Extended table look-up"]
20821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"]
20822#[inline(always)]
20823#[target_feature(enable = "neon")]
20824#[cfg_attr(test, assert_instr(tbx))]
20825#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20826fn vqtbx3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t) -> int8x16_t {
20827    unsafe extern "unadjusted" {
20828        #[cfg_attr(
20829            any(target_arch = "aarch64", target_arch = "arm64ec"),
20830            link_name = "llvm.aarch64.neon.tbx3.v16i8"
20831        )]
20832        fn _vqtbx3q(
20833            a: int8x16_t,
20834            b: int8x16_t,
20835            c: int8x16_t,
20836            d: int8x16_t,
20837            e: uint8x16_t,
20838        ) -> int8x16_t;
20839    }
20840    unsafe { _vqtbx3q(a, b, c, d, e) }
20841}
20842#[doc = "Extended table look-up"]
20843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"]
20844#[inline(always)]
20845#[target_feature(enable = "neon")]
20846#[cfg_attr(test, assert_instr(tbx))]
20847#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20848pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t {
20849    vqtbx3(a, b.0, b.1, b.2, c)
20850}
20851#[doc = "Extended table look-up"]
20852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"]
20853#[inline(always)]
20854#[target_feature(enable = "neon")]
20855#[cfg_attr(test, assert_instr(tbx))]
20856#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20857pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t {
20858    vqtbx3q(a, b.0, b.1, b.2, c)
20859}
20860#[doc = "Extended table look-up"]
20861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20862#[inline(always)]
20863#[cfg(target_endian = "little")]
20864#[target_feature(enable = "neon")]
20865#[cfg_attr(test, assert_instr(tbx))]
20866#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20867pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20868    unsafe {
20869        transmute(vqtbx3(
20870            transmute(a),
20871            transmute(b.0),
20872            transmute(b.1),
20873            transmute(b.2),
20874            c,
20875        ))
20876    }
20877}
20878#[doc = "Extended table look-up"]
20879#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"]
20880#[inline(always)]
20881#[cfg(target_endian = "big")]
20882#[target_feature(enable = "neon")]
20883#[cfg_attr(test, assert_instr(tbx))]
20884#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20885pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t {
20886    let mut b: uint8x16x3_t = b;
20887    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
20888    b.0 = unsafe {
20889        simd_shuffle!(
20890            b.0,
20891            b.0,
20892            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20893        )
20894    };
20895    b.1 = unsafe {
20896        simd_shuffle!(
20897            b.1,
20898            b.1,
20899            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20900        )
20901    };
20902    b.2 = unsafe {
20903        simd_shuffle!(
20904            b.2,
20905            b.2,
20906            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20907        )
20908    };
20909    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
20910    unsafe {
20911        let ret_val: uint8x8_t = transmute(vqtbx3(
20912            transmute(a),
20913            transmute(b.0),
20914            transmute(b.1),
20915            transmute(b.2),
20916            c,
20917        ));
20918        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
20919    }
20920}
20921#[doc = "Extended table look-up"]
20922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20923#[inline(always)]
20924#[cfg(target_endian = "little")]
20925#[target_feature(enable = "neon")]
20926#[cfg_attr(test, assert_instr(tbx))]
20927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20928pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20929    unsafe {
20930        transmute(vqtbx3q(
20931            transmute(a),
20932            transmute(b.0),
20933            transmute(b.1),
20934            transmute(b.2),
20935            c,
20936        ))
20937    }
20938}
20939#[doc = "Extended table look-up"]
20940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"]
20941#[inline(always)]
20942#[cfg(target_endian = "big")]
20943#[target_feature(enable = "neon")]
20944#[cfg_attr(test, assert_instr(tbx))]
20945#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20946pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t {
20947    let mut b: uint8x16x3_t = b;
20948    let a: uint8x16_t =
20949        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20950    b.0 = unsafe {
20951        simd_shuffle!(
20952            b.0,
20953            b.0,
20954            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20955        )
20956    };
20957    b.1 = unsafe {
20958        simd_shuffle!(
20959            b.1,
20960            b.1,
20961            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20962        )
20963    };
20964    b.2 = unsafe {
20965        simd_shuffle!(
20966            b.2,
20967            b.2,
20968            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20969        )
20970    };
20971    let c: uint8x16_t =
20972        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
20973    unsafe {
20974        let ret_val: uint8x16_t = transmute(vqtbx3q(
20975            transmute(a),
20976            transmute(b.0),
20977            transmute(b.1),
20978            transmute(b.2),
20979            c,
20980        ));
20981        simd_shuffle!(
20982            ret_val,
20983            ret_val,
20984            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
20985        )
20986    }
20987}
20988#[doc = "Extended table look-up"]
20989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
20990#[inline(always)]
20991#[cfg(target_endian = "little")]
20992#[target_feature(enable = "neon")]
20993#[cfg_attr(test, assert_instr(tbx))]
20994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
20995pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
20996    unsafe {
20997        transmute(vqtbx3(
20998            transmute(a),
20999            transmute(b.0),
21000            transmute(b.1),
21001            transmute(b.2),
21002            c,
21003        ))
21004    }
21005}
21006#[doc = "Extended table look-up"]
21007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"]
21008#[inline(always)]
21009#[cfg(target_endian = "big")]
21010#[target_feature(enable = "neon")]
21011#[cfg_attr(test, assert_instr(tbx))]
21012#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21013pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t {
21014    let mut b: poly8x16x3_t = b;
21015    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21016    b.0 = unsafe {
21017        simd_shuffle!(
21018            b.0,
21019            b.0,
21020            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21021        )
21022    };
21023    b.1 = unsafe {
21024        simd_shuffle!(
21025            b.1,
21026            b.1,
21027            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21028        )
21029    };
21030    b.2 = unsafe {
21031        simd_shuffle!(
21032            b.2,
21033            b.2,
21034            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21035        )
21036    };
21037    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
21038    unsafe {
21039        let ret_val: poly8x8_t = transmute(vqtbx3(
21040            transmute(a),
21041            transmute(b.0),
21042            transmute(b.1),
21043            transmute(b.2),
21044            c,
21045        ));
21046        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21047    }
21048}
21049#[doc = "Extended table look-up"]
21050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
21051#[inline(always)]
21052#[cfg(target_endian = "little")]
21053#[target_feature(enable = "neon")]
21054#[cfg_attr(test, assert_instr(tbx))]
21055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21056pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
21057    unsafe {
21058        transmute(vqtbx3q(
21059            transmute(a),
21060            transmute(b.0),
21061            transmute(b.1),
21062            transmute(b.2),
21063            c,
21064        ))
21065    }
21066}
21067#[doc = "Extended table look-up"]
21068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"]
21069#[inline(always)]
21070#[cfg(target_endian = "big")]
21071#[target_feature(enable = "neon")]
21072#[cfg_attr(test, assert_instr(tbx))]
21073#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21074pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t {
21075    let mut b: poly8x16x3_t = b;
21076    let a: poly8x16_t =
21077        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21078    b.0 = unsafe {
21079        simd_shuffle!(
21080            b.0,
21081            b.0,
21082            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21083        )
21084    };
21085    b.1 = unsafe {
21086        simd_shuffle!(
21087            b.1,
21088            b.1,
21089            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21090        )
21091    };
21092    b.2 = unsafe {
21093        simd_shuffle!(
21094            b.2,
21095            b.2,
21096            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21097        )
21098    };
21099    let c: uint8x16_t =
21100        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21101    unsafe {
21102        let ret_val: poly8x16_t = transmute(vqtbx3q(
21103            transmute(a),
21104            transmute(b.0),
21105            transmute(b.1),
21106            transmute(b.2),
21107            c,
21108        ));
21109        simd_shuffle!(
21110            ret_val,
21111            ret_val,
21112            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21113        )
21114    }
21115}
21116#[doc = "Extended table look-up"]
21117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"]
21118#[inline(always)]
21119#[target_feature(enable = "neon")]
21120#[cfg_attr(test, assert_instr(tbx))]
21121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21122fn vqtbx4(
21123    a: int8x8_t,
21124    b: int8x16_t,
21125    c: int8x16_t,
21126    d: int8x16_t,
21127    e: int8x16_t,
21128    f: uint8x8_t,
21129) -> int8x8_t {
21130    unsafe extern "unadjusted" {
21131        #[cfg_attr(
21132            any(target_arch = "aarch64", target_arch = "arm64ec"),
21133            link_name = "llvm.aarch64.neon.tbx4.v8i8"
21134        )]
21135        fn _vqtbx4(
21136            a: int8x8_t,
21137            b: int8x16_t,
21138            c: int8x16_t,
21139            d: int8x16_t,
21140            e: int8x16_t,
21141            f: uint8x8_t,
21142        ) -> int8x8_t;
21143    }
21144    unsafe { _vqtbx4(a, b, c, d, e, f) }
21145}
21146#[doc = "Extended table look-up"]
21147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"]
21148#[inline(always)]
21149#[target_feature(enable = "neon")]
21150#[cfg_attr(test, assert_instr(tbx))]
21151#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21152fn vqtbx4q(
21153    a: int8x16_t,
21154    b: int8x16_t,
21155    c: int8x16_t,
21156    d: int8x16_t,
21157    e: int8x16_t,
21158    f: uint8x16_t,
21159) -> int8x16_t {
21160    unsafe extern "unadjusted" {
21161        #[cfg_attr(
21162            any(target_arch = "aarch64", target_arch = "arm64ec"),
21163            link_name = "llvm.aarch64.neon.tbx4.v16i8"
21164        )]
21165        fn _vqtbx4q(
21166            a: int8x16_t,
21167            b: int8x16_t,
21168            c: int8x16_t,
21169            d: int8x16_t,
21170            e: int8x16_t,
21171            f: uint8x16_t,
21172        ) -> int8x16_t;
21173    }
21174    unsafe { _vqtbx4q(a, b, c, d, e, f) }
21175}
21176#[doc = "Extended table look-up"]
21177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"]
21178#[inline(always)]
21179#[target_feature(enable = "neon")]
21180#[cfg_attr(test, assert_instr(tbx))]
21181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21182pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t {
21183    vqtbx4(a, b.0, b.1, b.2, b.3, c)
21184}
21185#[doc = "Extended table look-up"]
21186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"]
21187#[inline(always)]
21188#[target_feature(enable = "neon")]
21189#[cfg_attr(test, assert_instr(tbx))]
21190#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21191pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t {
21192    vqtbx4q(a, b.0, b.1, b.2, b.3, c)
21193}
21194#[doc = "Extended table look-up"]
21195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
21196#[inline(always)]
21197#[cfg(target_endian = "little")]
21198#[target_feature(enable = "neon")]
21199#[cfg_attr(test, assert_instr(tbx))]
21200#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21201pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
21202    unsafe {
21203        transmute(vqtbx4(
21204            transmute(a),
21205            transmute(b.0),
21206            transmute(b.1),
21207            transmute(b.2),
21208            transmute(b.3),
21209            c,
21210        ))
21211    }
21212}
21213#[doc = "Extended table look-up"]
21214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"]
21215#[inline(always)]
21216#[cfg(target_endian = "big")]
21217#[target_feature(enable = "neon")]
21218#[cfg_attr(test, assert_instr(tbx))]
21219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21220pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t {
21221    let mut b: uint8x16x4_t = b;
21222    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21223    b.0 = unsafe {
21224        simd_shuffle!(
21225            b.0,
21226            b.0,
21227            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21228        )
21229    };
21230    b.1 = unsafe {
21231        simd_shuffle!(
21232            b.1,
21233            b.1,
21234            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21235        )
21236    };
21237    b.2 = unsafe {
21238        simd_shuffle!(
21239            b.2,
21240            b.2,
21241            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21242        )
21243    };
21244    b.3 = unsafe {
21245        simd_shuffle!(
21246            b.3,
21247            b.3,
21248            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21249        )
21250    };
21251    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
21252    unsafe {
21253        let ret_val: uint8x8_t = transmute(vqtbx4(
21254            transmute(a),
21255            transmute(b.0),
21256            transmute(b.1),
21257            transmute(b.2),
21258            transmute(b.3),
21259            c,
21260        ));
21261        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21262    }
21263}
21264#[doc = "Extended table look-up"]
21265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
21266#[inline(always)]
21267#[cfg(target_endian = "little")]
21268#[target_feature(enable = "neon")]
21269#[cfg_attr(test, assert_instr(tbx))]
21270#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21271pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
21272    unsafe {
21273        transmute(vqtbx4q(
21274            transmute(a),
21275            transmute(b.0),
21276            transmute(b.1),
21277            transmute(b.2),
21278            transmute(b.3),
21279            c,
21280        ))
21281    }
21282}
21283#[doc = "Extended table look-up"]
21284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"]
21285#[inline(always)]
21286#[cfg(target_endian = "big")]
21287#[target_feature(enable = "neon")]
21288#[cfg_attr(test, assert_instr(tbx))]
21289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21290pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t {
21291    let mut b: uint8x16x4_t = b;
21292    let a: uint8x16_t =
21293        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21294    b.0 = unsafe {
21295        simd_shuffle!(
21296            b.0,
21297            b.0,
21298            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21299        )
21300    };
21301    b.1 = unsafe {
21302        simd_shuffle!(
21303            b.1,
21304            b.1,
21305            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21306        )
21307    };
21308    b.2 = unsafe {
21309        simd_shuffle!(
21310            b.2,
21311            b.2,
21312            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21313        )
21314    };
21315    b.3 = unsafe {
21316        simd_shuffle!(
21317            b.3,
21318            b.3,
21319            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21320        )
21321    };
21322    let c: uint8x16_t =
21323        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21324    unsafe {
21325        let ret_val: uint8x16_t = transmute(vqtbx4q(
21326            transmute(a),
21327            transmute(b.0),
21328            transmute(b.1),
21329            transmute(b.2),
21330            transmute(b.3),
21331            c,
21332        ));
21333        simd_shuffle!(
21334            ret_val,
21335            ret_val,
21336            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21337        )
21338    }
21339}
21340#[doc = "Extended table look-up"]
21341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
21342#[inline(always)]
21343#[cfg(target_endian = "little")]
21344#[target_feature(enable = "neon")]
21345#[cfg_attr(test, assert_instr(tbx))]
21346#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21347pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
21348    unsafe {
21349        transmute(vqtbx4(
21350            transmute(a),
21351            transmute(b.0),
21352            transmute(b.1),
21353            transmute(b.2),
21354            transmute(b.3),
21355            c,
21356        ))
21357    }
21358}
21359#[doc = "Extended table look-up"]
21360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"]
21361#[inline(always)]
21362#[cfg(target_endian = "big")]
21363#[target_feature(enable = "neon")]
21364#[cfg_attr(test, assert_instr(tbx))]
21365#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21366pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t {
21367    let mut b: poly8x16x4_t = b;
21368    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21369    b.0 = unsafe {
21370        simd_shuffle!(
21371            b.0,
21372            b.0,
21373            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21374        )
21375    };
21376    b.1 = unsafe {
21377        simd_shuffle!(
21378            b.1,
21379            b.1,
21380            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21381        )
21382    };
21383    b.2 = unsafe {
21384        simd_shuffle!(
21385            b.2,
21386            b.2,
21387            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21388        )
21389    };
21390    b.3 = unsafe {
21391        simd_shuffle!(
21392            b.3,
21393            b.3,
21394            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21395        )
21396    };
21397    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
21398    unsafe {
21399        let ret_val: poly8x8_t = transmute(vqtbx4(
21400            transmute(a),
21401            transmute(b.0),
21402            transmute(b.1),
21403            transmute(b.2),
21404            transmute(b.3),
21405            c,
21406        ));
21407        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21408    }
21409}
21410#[doc = "Extended table look-up"]
21411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
21412#[inline(always)]
21413#[cfg(target_endian = "little")]
21414#[target_feature(enable = "neon")]
21415#[cfg_attr(test, assert_instr(tbx))]
21416#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21417pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
21418    unsafe {
21419        transmute(vqtbx4q(
21420            transmute(a),
21421            transmute(b.0),
21422            transmute(b.1),
21423            transmute(b.2),
21424            transmute(b.3),
21425            c,
21426        ))
21427    }
21428}
21429#[doc = "Extended table look-up"]
21430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"]
21431#[inline(always)]
21432#[cfg(target_endian = "big")]
21433#[target_feature(enable = "neon")]
21434#[cfg_attr(test, assert_instr(tbx))]
21435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21436pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t {
21437    let mut b: poly8x16x4_t = b;
21438    let a: poly8x16_t =
21439        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21440    b.0 = unsafe {
21441        simd_shuffle!(
21442            b.0,
21443            b.0,
21444            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21445        )
21446    };
21447    b.1 = unsafe {
21448        simd_shuffle!(
21449            b.1,
21450            b.1,
21451            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21452        )
21453    };
21454    b.2 = unsafe {
21455        simd_shuffle!(
21456            b.2,
21457            b.2,
21458            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21459        )
21460    };
21461    b.3 = unsafe {
21462        simd_shuffle!(
21463            b.3,
21464            b.3,
21465            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21466        )
21467    };
21468    let c: uint8x16_t =
21469        unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21470    unsafe {
21471        let ret_val: poly8x16_t = transmute(vqtbx4q(
21472            transmute(a),
21473            transmute(b.0),
21474            transmute(b.1),
21475            transmute(b.2),
21476            transmute(b.3),
21477            c,
21478        ));
21479        simd_shuffle!(
21480            ret_val,
21481            ret_val,
21482            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21483        )
21484    }
21485}
21486#[doc = "Rotate and exclusive OR"]
21487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"]
21488#[inline(always)]
21489#[target_feature(enable = "neon,sha3")]
21490#[cfg_attr(test, assert_instr(rax1))]
21491#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
21492pub fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
21493    unsafe extern "unadjusted" {
21494        #[cfg_attr(
21495            any(target_arch = "aarch64", target_arch = "arm64ec"),
21496            link_name = "llvm.aarch64.crypto.rax1"
21497        )]
21498        fn _vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
21499    }
21500    unsafe { _vrax1q_u64(a, b) }
21501}
21502#[doc = "Reverse bit order"]
21503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"]
21504#[inline(always)]
21505#[target_feature(enable = "neon")]
21506#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21507#[cfg_attr(test, assert_instr(rbit))]
21508pub fn vrbit_s8(a: int8x8_t) -> int8x8_t {
21509    unsafe { simd_bitreverse(a) }
21510}
21511#[doc = "Reverse bit order"]
21512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"]
21513#[inline(always)]
21514#[target_feature(enable = "neon")]
21515#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21516#[cfg_attr(test, assert_instr(rbit))]
21517pub fn vrbitq_s8(a: int8x16_t) -> int8x16_t {
21518    unsafe { simd_bitreverse(a) }
21519}
21520#[doc = "Reverse bit order"]
21521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
21522#[inline(always)]
21523#[cfg(target_endian = "little")]
21524#[target_feature(enable = "neon")]
21525#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21526#[cfg_attr(test, assert_instr(rbit))]
21527pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
21528    unsafe { transmute(vrbit_s8(transmute(a))) }
21529}
21530#[doc = "Reverse bit order"]
21531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"]
21532#[inline(always)]
21533#[cfg(target_endian = "big")]
21534#[target_feature(enable = "neon")]
21535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21536#[cfg_attr(test, assert_instr(rbit))]
21537pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t {
21538    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21539    unsafe {
21540        let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a)));
21541        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21542    }
21543}
21544#[doc = "Reverse bit order"]
21545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
21546#[inline(always)]
21547#[cfg(target_endian = "little")]
21548#[target_feature(enable = "neon")]
21549#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21550#[cfg_attr(test, assert_instr(rbit))]
21551pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21552    unsafe { transmute(vrbitq_s8(transmute(a))) }
21553}
21554#[doc = "Reverse bit order"]
21555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"]
21556#[inline(always)]
21557#[cfg(target_endian = "big")]
21558#[target_feature(enable = "neon")]
21559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21560#[cfg_attr(test, assert_instr(rbit))]
21561pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t {
21562    let a: uint8x16_t =
21563        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21564    unsafe {
21565        let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a)));
21566        simd_shuffle!(
21567            ret_val,
21568            ret_val,
21569            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21570        )
21571    }
21572}
21573#[doc = "Reverse bit order"]
21574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21575#[inline(always)]
21576#[cfg(target_endian = "little")]
21577#[target_feature(enable = "neon")]
21578#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21579#[cfg_attr(test, assert_instr(rbit))]
21580pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21581    unsafe { transmute(vrbit_s8(transmute(a))) }
21582}
21583#[doc = "Reverse bit order"]
21584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"]
21585#[inline(always)]
21586#[cfg(target_endian = "big")]
21587#[target_feature(enable = "neon")]
21588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21589#[cfg_attr(test, assert_instr(rbit))]
21590pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t {
21591    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21592    unsafe {
21593        let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a)));
21594        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21595    }
21596}
21597#[doc = "Reverse bit order"]
21598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21599#[inline(always)]
21600#[cfg(target_endian = "little")]
21601#[target_feature(enable = "neon")]
21602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21603#[cfg_attr(test, assert_instr(rbit))]
21604pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21605    unsafe { transmute(vrbitq_s8(transmute(a))) }
21606}
21607#[doc = "Reverse bit order"]
21608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"]
21609#[inline(always)]
21610#[cfg(target_endian = "big")]
21611#[target_feature(enable = "neon")]
21612#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21613#[cfg_attr(test, assert_instr(rbit))]
21614pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t {
21615    let a: poly8x16_t =
21616        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
21617    unsafe {
21618        let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a)));
21619        simd_shuffle!(
21620            ret_val,
21621            ret_val,
21622            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
21623        )
21624    }
21625}
21626#[doc = "Reciprocal estimate."]
21627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"]
21628#[inline(always)]
21629#[target_feature(enable = "neon")]
21630#[cfg_attr(test, assert_instr(frecpe))]
21631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21632pub fn vrecpe_f64(a: float64x1_t) -> float64x1_t {
21633    unsafe extern "unadjusted" {
21634        #[cfg_attr(
21635            any(target_arch = "aarch64", target_arch = "arm64ec"),
21636            link_name = "llvm.aarch64.neon.frecpe.v1f64"
21637        )]
21638        fn _vrecpe_f64(a: float64x1_t) -> float64x1_t;
21639    }
21640    unsafe { _vrecpe_f64(a) }
21641}
21642#[doc = "Reciprocal estimate."]
21643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"]
21644#[inline(always)]
21645#[target_feature(enable = "neon")]
21646#[cfg_attr(test, assert_instr(frecpe))]
21647#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21648pub fn vrecpeq_f64(a: float64x2_t) -> float64x2_t {
21649    unsafe extern "unadjusted" {
21650        #[cfg_attr(
21651            any(target_arch = "aarch64", target_arch = "arm64ec"),
21652            link_name = "llvm.aarch64.neon.frecpe.v2f64"
21653        )]
21654        fn _vrecpeq_f64(a: float64x2_t) -> float64x2_t;
21655    }
21656    unsafe { _vrecpeq_f64(a) }
21657}
21658#[doc = "Reciprocal estimate."]
21659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"]
21660#[inline(always)]
21661#[target_feature(enable = "neon")]
21662#[cfg_attr(test, assert_instr(frecpe))]
21663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21664pub fn vrecped_f64(a: f64) -> f64 {
21665    unsafe extern "unadjusted" {
21666        #[cfg_attr(
21667            any(target_arch = "aarch64", target_arch = "arm64ec"),
21668            link_name = "llvm.aarch64.neon.frecpe.f64"
21669        )]
21670        fn _vrecped_f64(a: f64) -> f64;
21671    }
21672    unsafe { _vrecped_f64(a) }
21673}
21674#[doc = "Reciprocal estimate."]
21675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"]
21676#[inline(always)]
21677#[target_feature(enable = "neon")]
21678#[cfg_attr(test, assert_instr(frecpe))]
21679#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21680pub fn vrecpes_f32(a: f32) -> f32 {
21681    unsafe extern "unadjusted" {
21682        #[cfg_attr(
21683            any(target_arch = "aarch64", target_arch = "arm64ec"),
21684            link_name = "llvm.aarch64.neon.frecpe.f32"
21685        )]
21686        fn _vrecpes_f32(a: f32) -> f32;
21687    }
21688    unsafe { _vrecpes_f32(a) }
21689}
21690#[doc = "Reciprocal estimate."]
21691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeh_f16)"]
21692#[inline(always)]
21693#[cfg_attr(test, assert_instr(frecpe))]
21694#[target_feature(enable = "neon,fp16")]
21695#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21696#[cfg(not(target_arch = "arm64ec"))]
21697pub fn vrecpeh_f16(a: f16) -> f16 {
21698    unsafe extern "unadjusted" {
21699        #[cfg_attr(
21700            any(target_arch = "aarch64", target_arch = "arm64ec"),
21701            link_name = "llvm.aarch64.neon.frecpe.f16"
21702        )]
21703        fn _vrecpeh_f16(a: f16) -> f16;
21704    }
21705    unsafe { _vrecpeh_f16(a) }
21706}
21707#[doc = "Floating-point reciprocal step"]
21708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"]
21709#[inline(always)]
21710#[target_feature(enable = "neon")]
21711#[cfg_attr(test, assert_instr(frecps))]
21712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21713pub fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
21714    unsafe extern "unadjusted" {
21715        #[cfg_attr(
21716            any(target_arch = "aarch64", target_arch = "arm64ec"),
21717            link_name = "llvm.aarch64.neon.frecps.v1f64"
21718        )]
21719        fn _vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
21720    }
21721    unsafe { _vrecps_f64(a, b) }
21722}
21723#[doc = "Floating-point reciprocal step"]
21724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"]
21725#[inline(always)]
21726#[target_feature(enable = "neon")]
21727#[cfg_attr(test, assert_instr(frecps))]
21728#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21729pub fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
21730    unsafe extern "unadjusted" {
21731        #[cfg_attr(
21732            any(target_arch = "aarch64", target_arch = "arm64ec"),
21733            link_name = "llvm.aarch64.neon.frecps.v2f64"
21734        )]
21735        fn _vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
21736    }
21737    unsafe { _vrecpsq_f64(a, b) }
21738}
21739#[doc = "Floating-point reciprocal step"]
21740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"]
21741#[inline(always)]
21742#[target_feature(enable = "neon")]
21743#[cfg_attr(test, assert_instr(frecps))]
21744#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21745pub fn vrecpsd_f64(a: f64, b: f64) -> f64 {
21746    unsafe extern "unadjusted" {
21747        #[cfg_attr(
21748            any(target_arch = "aarch64", target_arch = "arm64ec"),
21749            link_name = "llvm.aarch64.neon.frecps.f64"
21750        )]
21751        fn _vrecpsd_f64(a: f64, b: f64) -> f64;
21752    }
21753    unsafe { _vrecpsd_f64(a, b) }
21754}
21755#[doc = "Floating-point reciprocal step"]
21756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"]
21757#[inline(always)]
21758#[target_feature(enable = "neon")]
21759#[cfg_attr(test, assert_instr(frecps))]
21760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21761pub fn vrecpss_f32(a: f32, b: f32) -> f32 {
21762    unsafe extern "unadjusted" {
21763        #[cfg_attr(
21764            any(target_arch = "aarch64", target_arch = "arm64ec"),
21765            link_name = "llvm.aarch64.neon.frecps.f32"
21766        )]
21767        fn _vrecpss_f32(a: f32, b: f32) -> f32;
21768    }
21769    unsafe { _vrecpss_f32(a, b) }
21770}
21771#[doc = "Floating-point reciprocal step"]
21772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsh_f16)"]
21773#[inline(always)]
21774#[cfg_attr(test, assert_instr(frecps))]
21775#[target_feature(enable = "neon,fp16")]
21776#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21777#[cfg(not(target_arch = "arm64ec"))]
21778pub fn vrecpsh_f16(a: f16, b: f16) -> f16 {
21779    unsafe extern "unadjusted" {
21780        #[cfg_attr(
21781            any(target_arch = "aarch64", target_arch = "arm64ec"),
21782            link_name = "llvm.aarch64.neon.frecps.f16"
21783        )]
21784        fn _vrecpsh_f16(a: f16, b: f16) -> f16;
21785    }
21786    unsafe { _vrecpsh_f16(a, b) }
21787}
21788#[doc = "Floating-point reciprocal exponent"]
21789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"]
21790#[inline(always)]
21791#[target_feature(enable = "neon")]
21792#[cfg_attr(test, assert_instr(frecpx))]
21793#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21794pub fn vrecpxd_f64(a: f64) -> f64 {
21795    unsafe extern "unadjusted" {
21796        #[cfg_attr(
21797            any(target_arch = "aarch64", target_arch = "arm64ec"),
21798            link_name = "llvm.aarch64.neon.frecpx.f64"
21799        )]
21800        fn _vrecpxd_f64(a: f64) -> f64;
21801    }
21802    unsafe { _vrecpxd_f64(a) }
21803}
21804#[doc = "Floating-point reciprocal exponent"]
21805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"]
21806#[inline(always)]
21807#[target_feature(enable = "neon")]
21808#[cfg_attr(test, assert_instr(frecpx))]
21809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21810pub fn vrecpxs_f32(a: f32) -> f32 {
21811    unsafe extern "unadjusted" {
21812        #[cfg_attr(
21813            any(target_arch = "aarch64", target_arch = "arm64ec"),
21814            link_name = "llvm.aarch64.neon.frecpx.f32"
21815        )]
21816        fn _vrecpxs_f32(a: f32) -> f32;
21817    }
21818    unsafe { _vrecpxs_f32(a) }
21819}
21820#[doc = "Floating-point reciprocal exponent"]
21821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxh_f16)"]
21822#[inline(always)]
21823#[cfg_attr(test, assert_instr(frecpx))]
21824#[target_feature(enable = "neon,fp16")]
21825#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
21826#[cfg(not(target_arch = "arm64ec"))]
21827pub fn vrecpxh_f16(a: f16) -> f16 {
21828    unsafe extern "unadjusted" {
21829        #[cfg_attr(
21830            any(target_arch = "aarch64", target_arch = "arm64ec"),
21831            link_name = "llvm.aarch64.neon.frecpx.f16"
21832        )]
21833        fn _vrecpxh_f16(a: f16) -> f16;
21834    }
21835    unsafe { _vrecpxh_f16(a) }
21836}
21837#[doc = "Vector reinterpret cast operation"]
21838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21839#[inline(always)]
21840#[cfg(target_endian = "little")]
21841#[target_feature(enable = "neon")]
21842#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21843#[cfg(not(target_arch = "arm64ec"))]
21844#[cfg_attr(test, assert_instr(nop))]
21845pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21846    unsafe { transmute(a) }
21847}
21848#[doc = "Vector reinterpret cast operation"]
21849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"]
21850#[inline(always)]
21851#[cfg(target_endian = "big")]
21852#[target_feature(enable = "neon")]
21853#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21854#[cfg(not(target_arch = "arm64ec"))]
21855#[cfg_attr(test, assert_instr(nop))]
21856pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t {
21857    let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
21858    unsafe { transmute(a) }
21859}
21860#[doc = "Vector reinterpret cast operation"]
21861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21862#[inline(always)]
21863#[cfg(target_endian = "little")]
21864#[target_feature(enable = "neon")]
21865#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21866#[cfg(not(target_arch = "arm64ec"))]
21867#[cfg_attr(test, assert_instr(nop))]
21868pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21869    unsafe { transmute(a) }
21870}
21871#[doc = "Vector reinterpret cast operation"]
21872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"]
21873#[inline(always)]
21874#[cfg(target_endian = "big")]
21875#[target_feature(enable = "neon")]
21876#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21877#[cfg(not(target_arch = "arm64ec"))]
21878#[cfg_attr(test, assert_instr(nop))]
21879pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t {
21880    let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
21881    unsafe {
21882        let ret_val: float64x2_t = transmute(a);
21883        simd_shuffle!(ret_val, ret_val, [1, 0])
21884    }
21885}
21886#[doc = "Vector reinterpret cast operation"]
21887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21888#[inline(always)]
21889#[cfg(target_endian = "little")]
21890#[target_feature(enable = "neon")]
21891#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21892#[cfg(not(target_arch = "arm64ec"))]
21893#[cfg_attr(test, assert_instr(nop))]
21894pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21895    unsafe { transmute(a) }
21896}
21897#[doc = "Vector reinterpret cast operation"]
21898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"]
21899#[inline(always)]
21900#[cfg(target_endian = "big")]
21901#[target_feature(enable = "neon")]
21902#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21903#[cfg(not(target_arch = "arm64ec"))]
21904#[cfg_attr(test, assert_instr(nop))]
21905pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t {
21906    unsafe {
21907        let ret_val: float16x4_t = transmute(a);
21908        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
21909    }
21910}
21911#[doc = "Vector reinterpret cast operation"]
21912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21913#[inline(always)]
21914#[cfg(target_endian = "little")]
21915#[target_feature(enable = "neon")]
21916#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21917#[cfg(not(target_arch = "arm64ec"))]
21918#[cfg_attr(test, assert_instr(nop))]
21919pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21920    unsafe { transmute(a) }
21921}
21922#[doc = "Vector reinterpret cast operation"]
21923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"]
21924#[inline(always)]
21925#[cfg(target_endian = "big")]
21926#[target_feature(enable = "neon")]
21927#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
21928#[cfg(not(target_arch = "arm64ec"))]
21929#[cfg_attr(test, assert_instr(nop))]
21930pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t {
21931    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21932    unsafe {
21933        let ret_val: float16x8_t = transmute(a);
21934        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
21935    }
21936}
21937#[doc = "Vector reinterpret cast operation"]
21938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21939#[inline(always)]
21940#[cfg(target_endian = "little")]
21941#[target_feature(enable = "neon")]
21942#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21943#[cfg_attr(test, assert_instr(nop))]
21944pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21945    unsafe { transmute(a) }
21946}
21947#[doc = "Vector reinterpret cast operation"]
21948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"]
21949#[inline(always)]
21950#[cfg(target_endian = "big")]
21951#[target_feature(enable = "neon")]
21952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21953#[cfg_attr(test, assert_instr(nop))]
21954pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t {
21955    unsafe {
21956        let ret_val: float64x2_t = transmute(a);
21957        simd_shuffle!(ret_val, ret_val, [1, 0])
21958    }
21959}
21960#[doc = "Vector reinterpret cast operation"]
21961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21962#[inline(always)]
21963#[cfg(target_endian = "little")]
21964#[target_feature(enable = "neon")]
21965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21966#[cfg_attr(test, assert_instr(nop))]
21967pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21968    unsafe { transmute(a) }
21969}
21970#[doc = "Vector reinterpret cast operation"]
21971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"]
21972#[inline(always)]
21973#[cfg(target_endian = "big")]
21974#[target_feature(enable = "neon")]
21975#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21976#[cfg_attr(test, assert_instr(nop))]
21977pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t {
21978    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
21979    unsafe { transmute(a) }
21980}
21981#[doc = "Vector reinterpret cast operation"]
21982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21983#[inline(always)]
21984#[cfg(target_endian = "little")]
21985#[target_feature(enable = "neon")]
21986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21987#[cfg_attr(test, assert_instr(nop))]
21988pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21989    unsafe { transmute(a) }
21990}
21991#[doc = "Vector reinterpret cast operation"]
21992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"]
21993#[inline(always)]
21994#[cfg(target_endian = "big")]
21995#[target_feature(enable = "neon")]
21996#[stable(feature = "neon_intrinsics", since = "1.59.0")]
21997#[cfg_attr(test, assert_instr(nop))]
21998pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t {
21999    let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22000    unsafe { transmute(a) }
22001}
22002#[doc = "Vector reinterpret cast operation"]
22003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
22004#[inline(always)]
22005#[cfg(target_endian = "little")]
22006#[target_feature(enable = "neon")]
22007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22008#[cfg_attr(test, assert_instr(nop))]
22009pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
22010    unsafe { transmute(a) }
22011}
22012#[doc = "Vector reinterpret cast operation"]
22013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"]
22014#[inline(always)]
22015#[cfg(target_endian = "big")]
22016#[target_feature(enable = "neon")]
22017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22018#[cfg_attr(test, assert_instr(nop))]
22019pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t {
22020    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22021    unsafe {
22022        let ret_val: float64x2_t = transmute(a);
22023        simd_shuffle!(ret_val, ret_val, [1, 0])
22024    }
22025}
22026#[doc = "Vector reinterpret cast operation"]
22027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
22028#[inline(always)]
22029#[cfg(target_endian = "little")]
22030#[target_feature(enable = "neon")]
22031#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22032#[cfg_attr(test, assert_instr(nop))]
22033pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
22034    unsafe { transmute(a) }
22035}
22036#[doc = "Vector reinterpret cast operation"]
22037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"]
22038#[inline(always)]
22039#[cfg(target_endian = "big")]
22040#[target_feature(enable = "neon")]
22041#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22042#[cfg_attr(test, assert_instr(nop))]
22043pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t {
22044    let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22045    unsafe {
22046        let ret_val: poly64x2_t = transmute(a);
22047        simd_shuffle!(ret_val, ret_val, [1, 0])
22048    }
22049}
22050#[doc = "Vector reinterpret cast operation"]
22051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
22052#[inline(always)]
22053#[cfg(target_endian = "little")]
22054#[target_feature(enable = "neon")]
22055#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22056#[cfg_attr(test, assert_instr(nop))]
22057pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
22058    unsafe { transmute(a) }
22059}
22060#[doc = "Vector reinterpret cast operation"]
22061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"]
22062#[inline(always)]
22063#[cfg(target_endian = "big")]
22064#[target_feature(enable = "neon")]
22065#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22066#[cfg_attr(test, assert_instr(nop))]
22067pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t {
22068    unsafe {
22069        let ret_val: float32x2_t = transmute(a);
22070        simd_shuffle!(ret_val, ret_val, [1, 0])
22071    }
22072}
22073#[doc = "Vector reinterpret cast operation"]
22074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
22075#[inline(always)]
22076#[cfg(target_endian = "little")]
22077#[target_feature(enable = "neon")]
22078#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22079#[cfg_attr(test, assert_instr(nop))]
22080pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
22081    unsafe { transmute(a) }
22082}
22083#[doc = "Vector reinterpret cast operation"]
22084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"]
22085#[inline(always)]
22086#[cfg(target_endian = "big")]
22087#[target_feature(enable = "neon")]
22088#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22089#[cfg_attr(test, assert_instr(nop))]
22090pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t {
22091    unsafe {
22092        let ret_val: int8x8_t = transmute(a);
22093        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22094    }
22095}
22096#[doc = "Vector reinterpret cast operation"]
22097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
22098#[inline(always)]
22099#[cfg(target_endian = "little")]
22100#[target_feature(enable = "neon")]
22101#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22102#[cfg_attr(test, assert_instr(nop))]
22103pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
22104    unsafe { transmute(a) }
22105}
22106#[doc = "Vector reinterpret cast operation"]
22107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"]
22108#[inline(always)]
22109#[cfg(target_endian = "big")]
22110#[target_feature(enable = "neon")]
22111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22112#[cfg_attr(test, assert_instr(nop))]
22113pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t {
22114    unsafe {
22115        let ret_val: int16x4_t = transmute(a);
22116        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22117    }
22118}
22119#[doc = "Vector reinterpret cast operation"]
22120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
22121#[inline(always)]
22122#[cfg(target_endian = "little")]
22123#[target_feature(enable = "neon")]
22124#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22125#[cfg_attr(test, assert_instr(nop))]
22126pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
22127    unsafe { transmute(a) }
22128}
22129#[doc = "Vector reinterpret cast operation"]
22130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"]
22131#[inline(always)]
22132#[cfg(target_endian = "big")]
22133#[target_feature(enable = "neon")]
22134#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22135#[cfg_attr(test, assert_instr(nop))]
22136pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t {
22137    unsafe {
22138        let ret_val: int32x2_t = transmute(a);
22139        simd_shuffle!(ret_val, ret_val, [1, 0])
22140    }
22141}
22142#[doc = "Vector reinterpret cast operation"]
22143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"]
22144#[inline(always)]
22145#[target_feature(enable = "neon")]
22146#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22147#[cfg_attr(test, assert_instr(nop))]
22148pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t {
22149    unsafe { transmute(a) }
22150}
22151#[doc = "Vector reinterpret cast operation"]
22152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
22153#[inline(always)]
22154#[cfg(target_endian = "little")]
22155#[target_feature(enable = "neon")]
22156#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22157#[cfg_attr(test, assert_instr(nop))]
22158pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
22159    unsafe { transmute(a) }
22160}
22161#[doc = "Vector reinterpret cast operation"]
22162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"]
22163#[inline(always)]
22164#[cfg(target_endian = "big")]
22165#[target_feature(enable = "neon")]
22166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22167#[cfg_attr(test, assert_instr(nop))]
22168pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t {
22169    unsafe {
22170        let ret_val: uint8x8_t = transmute(a);
22171        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22172    }
22173}
22174#[doc = "Vector reinterpret cast operation"]
22175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
22176#[inline(always)]
22177#[cfg(target_endian = "little")]
22178#[target_feature(enable = "neon")]
22179#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22180#[cfg_attr(test, assert_instr(nop))]
22181pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
22182    unsafe { transmute(a) }
22183}
22184#[doc = "Vector reinterpret cast operation"]
22185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"]
22186#[inline(always)]
22187#[cfg(target_endian = "big")]
22188#[target_feature(enable = "neon")]
22189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22190#[cfg_attr(test, assert_instr(nop))]
22191pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t {
22192    unsafe {
22193        let ret_val: uint16x4_t = transmute(a);
22194        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22195    }
22196}
22197#[doc = "Vector reinterpret cast operation"]
22198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
22199#[inline(always)]
22200#[cfg(target_endian = "little")]
22201#[target_feature(enable = "neon")]
22202#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22203#[cfg_attr(test, assert_instr(nop))]
22204pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
22205    unsafe { transmute(a) }
22206}
22207#[doc = "Vector reinterpret cast operation"]
22208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"]
22209#[inline(always)]
22210#[cfg(target_endian = "big")]
22211#[target_feature(enable = "neon")]
22212#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22213#[cfg_attr(test, assert_instr(nop))]
22214pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t {
22215    unsafe {
22216        let ret_val: uint32x2_t = transmute(a);
22217        simd_shuffle!(ret_val, ret_val, [1, 0])
22218    }
22219}
22220#[doc = "Vector reinterpret cast operation"]
22221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"]
22222#[inline(always)]
22223#[target_feature(enable = "neon")]
22224#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22225#[cfg_attr(test, assert_instr(nop))]
22226pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t {
22227    unsafe { transmute(a) }
22228}
22229#[doc = "Vector reinterpret cast operation"]
22230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
22231#[inline(always)]
22232#[cfg(target_endian = "little")]
22233#[target_feature(enable = "neon")]
22234#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22235#[cfg_attr(test, assert_instr(nop))]
22236pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
22237    unsafe { transmute(a) }
22238}
22239#[doc = "Vector reinterpret cast operation"]
22240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"]
22241#[inline(always)]
22242#[cfg(target_endian = "big")]
22243#[target_feature(enable = "neon")]
22244#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22245#[cfg_attr(test, assert_instr(nop))]
22246pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t {
22247    unsafe {
22248        let ret_val: poly8x8_t = transmute(a);
22249        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22250    }
22251}
22252#[doc = "Vector reinterpret cast operation"]
22253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
22254#[inline(always)]
22255#[cfg(target_endian = "little")]
22256#[target_feature(enable = "neon")]
22257#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22258#[cfg_attr(test, assert_instr(nop))]
22259pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
22260    unsafe { transmute(a) }
22261}
22262#[doc = "Vector reinterpret cast operation"]
22263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"]
22264#[inline(always)]
22265#[cfg(target_endian = "big")]
22266#[target_feature(enable = "neon")]
22267#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22268#[cfg_attr(test, assert_instr(nop))]
22269pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t {
22270    unsafe {
22271        let ret_val: poly16x4_t = transmute(a);
22272        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22273    }
22274}
22275#[doc = "Vector reinterpret cast operation"]
22276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"]
22277#[inline(always)]
22278#[target_feature(enable = "neon")]
22279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22280#[cfg_attr(test, assert_instr(nop))]
22281pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t {
22282    unsafe { transmute(a) }
22283}
22284#[doc = "Vector reinterpret cast operation"]
22285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
22286#[inline(always)]
22287#[cfg(target_endian = "little")]
22288#[target_feature(enable = "neon")]
22289#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22290#[cfg_attr(test, assert_instr(nop))]
22291pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
22292    unsafe { transmute(a) }
22293}
22294#[doc = "Vector reinterpret cast operation"]
22295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"]
22296#[inline(always)]
22297#[cfg(target_endian = "big")]
22298#[target_feature(enable = "neon")]
22299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22300#[cfg_attr(test, assert_instr(nop))]
22301pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 {
22302    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22303    unsafe { transmute(a) }
22304}
22305#[doc = "Vector reinterpret cast operation"]
22306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
22307#[inline(always)]
22308#[cfg(target_endian = "little")]
22309#[target_feature(enable = "neon")]
22310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22311#[cfg_attr(test, assert_instr(nop))]
22312pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
22313    unsafe { transmute(a) }
22314}
22315#[doc = "Vector reinterpret cast operation"]
22316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"]
22317#[inline(always)]
22318#[cfg(target_endian = "big")]
22319#[target_feature(enable = "neon")]
22320#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22321#[cfg_attr(test, assert_instr(nop))]
22322pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t {
22323    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22324    unsafe {
22325        let ret_val: float32x4_t = transmute(a);
22326        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22327    }
22328}
22329#[doc = "Vector reinterpret cast operation"]
22330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
22331#[inline(always)]
22332#[cfg(target_endian = "little")]
22333#[target_feature(enable = "neon")]
22334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22335#[cfg_attr(test, assert_instr(nop))]
22336pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
22337    unsafe { transmute(a) }
22338}
22339#[doc = "Vector reinterpret cast operation"]
22340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"]
22341#[inline(always)]
22342#[cfg(target_endian = "big")]
22343#[target_feature(enable = "neon")]
22344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22345#[cfg_attr(test, assert_instr(nop))]
22346pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t {
22347    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22348    unsafe {
22349        let ret_val: int8x16_t = transmute(a);
22350        simd_shuffle!(
22351            ret_val,
22352            ret_val,
22353            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22354        )
22355    }
22356}
22357#[doc = "Vector reinterpret cast operation"]
22358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
22359#[inline(always)]
22360#[cfg(target_endian = "little")]
22361#[target_feature(enable = "neon")]
22362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22363#[cfg_attr(test, assert_instr(nop))]
22364pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
22365    unsafe { transmute(a) }
22366}
22367#[doc = "Vector reinterpret cast operation"]
22368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"]
22369#[inline(always)]
22370#[cfg(target_endian = "big")]
22371#[target_feature(enable = "neon")]
22372#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22373#[cfg_attr(test, assert_instr(nop))]
22374pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t {
22375    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22376    unsafe {
22377        let ret_val: int16x8_t = transmute(a);
22378        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22379    }
22380}
22381#[doc = "Vector reinterpret cast operation"]
22382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
22383#[inline(always)]
22384#[cfg(target_endian = "little")]
22385#[target_feature(enable = "neon")]
22386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22387#[cfg_attr(test, assert_instr(nop))]
22388pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
22389    unsafe { transmute(a) }
22390}
22391#[doc = "Vector reinterpret cast operation"]
22392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"]
22393#[inline(always)]
22394#[cfg(target_endian = "big")]
22395#[target_feature(enable = "neon")]
22396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22397#[cfg_attr(test, assert_instr(nop))]
22398pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t {
22399    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22400    unsafe {
22401        let ret_val: int32x4_t = transmute(a);
22402        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22403    }
22404}
22405#[doc = "Vector reinterpret cast operation"]
22406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
22407#[inline(always)]
22408#[cfg(target_endian = "little")]
22409#[target_feature(enable = "neon")]
22410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22411#[cfg_attr(test, assert_instr(nop))]
22412pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
22413    unsafe { transmute(a) }
22414}
22415#[doc = "Vector reinterpret cast operation"]
22416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"]
22417#[inline(always)]
22418#[cfg(target_endian = "big")]
22419#[target_feature(enable = "neon")]
22420#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22421#[cfg_attr(test, assert_instr(nop))]
22422pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t {
22423    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22424    unsafe {
22425        let ret_val: int64x2_t = transmute(a);
22426        simd_shuffle!(ret_val, ret_val, [1, 0])
22427    }
22428}
22429#[doc = "Vector reinterpret cast operation"]
22430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
22431#[inline(always)]
22432#[cfg(target_endian = "little")]
22433#[target_feature(enable = "neon")]
22434#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22435#[cfg_attr(test, assert_instr(nop))]
22436pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
22437    unsafe { transmute(a) }
22438}
22439#[doc = "Vector reinterpret cast operation"]
22440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"]
22441#[inline(always)]
22442#[cfg(target_endian = "big")]
22443#[target_feature(enable = "neon")]
22444#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22445#[cfg_attr(test, assert_instr(nop))]
22446pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t {
22447    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22448    unsafe {
22449        let ret_val: uint8x16_t = transmute(a);
22450        simd_shuffle!(
22451            ret_val,
22452            ret_val,
22453            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22454        )
22455    }
22456}
22457#[doc = "Vector reinterpret cast operation"]
22458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
22459#[inline(always)]
22460#[cfg(target_endian = "little")]
22461#[target_feature(enable = "neon")]
22462#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22463#[cfg_attr(test, assert_instr(nop))]
22464pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
22465    unsafe { transmute(a) }
22466}
22467#[doc = "Vector reinterpret cast operation"]
22468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"]
22469#[inline(always)]
22470#[cfg(target_endian = "big")]
22471#[target_feature(enable = "neon")]
22472#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22473#[cfg_attr(test, assert_instr(nop))]
22474pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t {
22475    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22476    unsafe {
22477        let ret_val: uint16x8_t = transmute(a);
22478        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22479    }
22480}
22481#[doc = "Vector reinterpret cast operation"]
22482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
22483#[inline(always)]
22484#[cfg(target_endian = "little")]
22485#[target_feature(enable = "neon")]
22486#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22487#[cfg_attr(test, assert_instr(nop))]
22488pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
22489    unsafe { transmute(a) }
22490}
22491#[doc = "Vector reinterpret cast operation"]
22492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"]
22493#[inline(always)]
22494#[cfg(target_endian = "big")]
22495#[target_feature(enable = "neon")]
22496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22497#[cfg_attr(test, assert_instr(nop))]
22498pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t {
22499    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22500    unsafe {
22501        let ret_val: uint32x4_t = transmute(a);
22502        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
22503    }
22504}
22505#[doc = "Vector reinterpret cast operation"]
22506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
22507#[inline(always)]
22508#[cfg(target_endian = "little")]
22509#[target_feature(enable = "neon")]
22510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22511#[cfg_attr(test, assert_instr(nop))]
22512pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
22513    unsafe { transmute(a) }
22514}
22515#[doc = "Vector reinterpret cast operation"]
22516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"]
22517#[inline(always)]
22518#[cfg(target_endian = "big")]
22519#[target_feature(enable = "neon")]
22520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22521#[cfg_attr(test, assert_instr(nop))]
22522pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t {
22523    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22524    unsafe {
22525        let ret_val: uint64x2_t = transmute(a);
22526        simd_shuffle!(ret_val, ret_val, [1, 0])
22527    }
22528}
22529#[doc = "Vector reinterpret cast operation"]
22530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
22531#[inline(always)]
22532#[cfg(target_endian = "little")]
22533#[target_feature(enable = "neon")]
22534#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22535#[cfg_attr(test, assert_instr(nop))]
22536pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
22537    unsafe { transmute(a) }
22538}
22539#[doc = "Vector reinterpret cast operation"]
22540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"]
22541#[inline(always)]
22542#[cfg(target_endian = "big")]
22543#[target_feature(enable = "neon")]
22544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22545#[cfg_attr(test, assert_instr(nop))]
22546pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t {
22547    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22548    unsafe {
22549        let ret_val: poly8x16_t = transmute(a);
22550        simd_shuffle!(
22551            ret_val,
22552            ret_val,
22553            [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
22554        )
22555    }
22556}
22557#[doc = "Vector reinterpret cast operation"]
22558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
22559#[inline(always)]
22560#[cfg(target_endian = "little")]
22561#[target_feature(enable = "neon")]
22562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22563#[cfg_attr(test, assert_instr(nop))]
22564pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
22565    unsafe { transmute(a) }
22566}
22567#[doc = "Vector reinterpret cast operation"]
22568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"]
22569#[inline(always)]
22570#[cfg(target_endian = "big")]
22571#[target_feature(enable = "neon")]
22572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22573#[cfg_attr(test, assert_instr(nop))]
22574pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t {
22575    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22576    unsafe {
22577        let ret_val: poly16x8_t = transmute(a);
22578        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
22579    }
22580}
22581#[doc = "Vector reinterpret cast operation"]
22582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
22583#[inline(always)]
22584#[cfg(target_endian = "little")]
22585#[target_feature(enable = "neon")]
22586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22587#[cfg_attr(test, assert_instr(nop))]
22588pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
22589    unsafe { transmute(a) }
22590}
22591#[doc = "Vector reinterpret cast operation"]
22592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"]
22593#[inline(always)]
22594#[cfg(target_endian = "big")]
22595#[target_feature(enable = "neon")]
22596#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22597#[cfg_attr(test, assert_instr(nop))]
22598pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t {
22599    let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22600    unsafe {
22601        let ret_val: poly64x2_t = transmute(a);
22602        simd_shuffle!(ret_val, ret_val, [1, 0])
22603    }
22604}
22605#[doc = "Vector reinterpret cast operation"]
22606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
22607#[inline(always)]
22608#[cfg(target_endian = "little")]
22609#[target_feature(enable = "neon")]
22610#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22611#[cfg_attr(test, assert_instr(nop))]
22612pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
22613    unsafe { transmute(a) }
22614}
22615#[doc = "Vector reinterpret cast operation"]
22616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"]
22617#[inline(always)]
22618#[cfg(target_endian = "big")]
22619#[target_feature(enable = "neon")]
22620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22621#[cfg_attr(test, assert_instr(nop))]
22622pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t {
22623    let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22624    unsafe { transmute(a) }
22625}
22626#[doc = "Vector reinterpret cast operation"]
22627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
22628#[inline(always)]
22629#[cfg(target_endian = "little")]
22630#[target_feature(enable = "neon")]
22631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22632#[cfg_attr(test, assert_instr(nop))]
22633pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
22634    unsafe { transmute(a) }
22635}
22636#[doc = "Vector reinterpret cast operation"]
22637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"]
22638#[inline(always)]
22639#[cfg(target_endian = "big")]
22640#[target_feature(enable = "neon")]
22641#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22642#[cfg_attr(test, assert_instr(nop))]
22643pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t {
22644    let a: int8x16_t =
22645        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22646    unsafe {
22647        let ret_val: float64x2_t = transmute(a);
22648        simd_shuffle!(ret_val, ret_val, [1, 0])
22649    }
22650}
22651#[doc = "Vector reinterpret cast operation"]
22652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
22653#[inline(always)]
22654#[cfg(target_endian = "little")]
22655#[target_feature(enable = "neon")]
22656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22657#[cfg_attr(test, assert_instr(nop))]
22658pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
22659    unsafe { transmute(a) }
22660}
22661#[doc = "Vector reinterpret cast operation"]
22662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"]
22663#[inline(always)]
22664#[cfg(target_endian = "big")]
22665#[target_feature(enable = "neon")]
22666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22667#[cfg_attr(test, assert_instr(nop))]
22668pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t {
22669    let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22670    unsafe { transmute(a) }
22671}
22672#[doc = "Vector reinterpret cast operation"]
22673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
22674#[inline(always)]
22675#[cfg(target_endian = "little")]
22676#[target_feature(enable = "neon")]
22677#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22678#[cfg_attr(test, assert_instr(nop))]
22679pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
22680    unsafe { transmute(a) }
22681}
22682#[doc = "Vector reinterpret cast operation"]
22683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"]
22684#[inline(always)]
22685#[cfg(target_endian = "big")]
22686#[target_feature(enable = "neon")]
22687#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22688#[cfg_attr(test, assert_instr(nop))]
22689pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t {
22690    let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22691    unsafe {
22692        let ret_val: float64x2_t = transmute(a);
22693        simd_shuffle!(ret_val, ret_val, [1, 0])
22694    }
22695}
22696#[doc = "Vector reinterpret cast operation"]
22697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
22698#[inline(always)]
22699#[cfg(target_endian = "little")]
22700#[target_feature(enable = "neon")]
22701#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22702#[cfg_attr(test, assert_instr(nop))]
22703pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
22704    unsafe { transmute(a) }
22705}
22706#[doc = "Vector reinterpret cast operation"]
22707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"]
22708#[inline(always)]
22709#[cfg(target_endian = "big")]
22710#[target_feature(enable = "neon")]
22711#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22712#[cfg_attr(test, assert_instr(nop))]
22713pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t {
22714    let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22715    unsafe { transmute(a) }
22716}
22717#[doc = "Vector reinterpret cast operation"]
22718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
22719#[inline(always)]
22720#[cfg(target_endian = "little")]
22721#[target_feature(enable = "neon")]
22722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22723#[cfg_attr(test, assert_instr(nop))]
22724pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
22725    unsafe { transmute(a) }
22726}
22727#[doc = "Vector reinterpret cast operation"]
22728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"]
22729#[inline(always)]
22730#[cfg(target_endian = "big")]
22731#[target_feature(enable = "neon")]
22732#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22733#[cfg_attr(test, assert_instr(nop))]
22734pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t {
22735    let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22736    unsafe {
22737        let ret_val: float64x2_t = transmute(a);
22738        simd_shuffle!(ret_val, ret_val, [1, 0])
22739    }
22740}
22741#[doc = "Vector reinterpret cast operation"]
22742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"]
22743#[inline(always)]
22744#[target_feature(enable = "neon")]
22745#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22746#[cfg_attr(test, assert_instr(nop))]
22747pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t {
22748    unsafe { transmute(a) }
22749}
22750#[doc = "Vector reinterpret cast operation"]
22751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"]
22752#[inline(always)]
22753#[target_feature(enable = "neon")]
22754#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22755#[cfg_attr(test, assert_instr(nop))]
22756pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t {
22757    unsafe { transmute(a) }
22758}
22759#[doc = "Vector reinterpret cast operation"]
22760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
22761#[inline(always)]
22762#[cfg(target_endian = "little")]
22763#[target_feature(enable = "neon")]
22764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22765#[cfg_attr(test, assert_instr(nop))]
22766pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
22767    unsafe { transmute(a) }
22768}
22769#[doc = "Vector reinterpret cast operation"]
22770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"]
22771#[inline(always)]
22772#[cfg(target_endian = "big")]
22773#[target_feature(enable = "neon")]
22774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22775#[cfg_attr(test, assert_instr(nop))]
22776pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t {
22777    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22778    unsafe {
22779        let ret_val: float64x2_t = transmute(a);
22780        simd_shuffle!(ret_val, ret_val, [1, 0])
22781    }
22782}
22783#[doc = "Vector reinterpret cast operation"]
22784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
22785#[inline(always)]
22786#[cfg(target_endian = "little")]
22787#[target_feature(enable = "neon")]
22788#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22789#[cfg_attr(test, assert_instr(nop))]
22790pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
22791    unsafe { transmute(a) }
22792}
22793#[doc = "Vector reinterpret cast operation"]
22794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"]
22795#[inline(always)]
22796#[cfg(target_endian = "big")]
22797#[target_feature(enable = "neon")]
22798#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22799#[cfg_attr(test, assert_instr(nop))]
22800pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t {
22801    let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22802    unsafe {
22803        let ret_val: poly64x2_t = transmute(a);
22804        simd_shuffle!(ret_val, ret_val, [1, 0])
22805    }
22806}
22807#[doc = "Vector reinterpret cast operation"]
22808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
22809#[inline(always)]
22810#[cfg(target_endian = "little")]
22811#[target_feature(enable = "neon")]
22812#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22813#[cfg_attr(test, assert_instr(nop))]
22814pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
22815    unsafe { transmute(a) }
22816}
22817#[doc = "Vector reinterpret cast operation"]
22818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"]
22819#[inline(always)]
22820#[cfg(target_endian = "big")]
22821#[target_feature(enable = "neon")]
22822#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22823#[cfg_attr(test, assert_instr(nop))]
22824pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t {
22825    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22826    unsafe { transmute(a) }
22827}
22828#[doc = "Vector reinterpret cast operation"]
22829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
22830#[inline(always)]
22831#[cfg(target_endian = "little")]
22832#[target_feature(enable = "neon")]
22833#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22834#[cfg_attr(test, assert_instr(nop))]
22835pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
22836    unsafe { transmute(a) }
22837}
22838#[doc = "Vector reinterpret cast operation"]
22839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"]
22840#[inline(always)]
22841#[cfg(target_endian = "big")]
22842#[target_feature(enable = "neon")]
22843#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22844#[cfg_attr(test, assert_instr(nop))]
22845pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t {
22846    let a: uint8x16_t =
22847        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
22848    unsafe {
22849        let ret_val: float64x2_t = transmute(a);
22850        simd_shuffle!(ret_val, ret_val, [1, 0])
22851    }
22852}
22853#[doc = "Vector reinterpret cast operation"]
22854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
22855#[inline(always)]
22856#[cfg(target_endian = "little")]
22857#[target_feature(enable = "neon")]
22858#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22859#[cfg_attr(test, assert_instr(nop))]
22860pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
22861    unsafe { transmute(a) }
22862}
22863#[doc = "Vector reinterpret cast operation"]
22864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"]
22865#[inline(always)]
22866#[cfg(target_endian = "big")]
22867#[target_feature(enable = "neon")]
22868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22869#[cfg_attr(test, assert_instr(nop))]
22870pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t {
22871    let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22872    unsafe { transmute(a) }
22873}
22874#[doc = "Vector reinterpret cast operation"]
22875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
22876#[inline(always)]
22877#[cfg(target_endian = "little")]
22878#[target_feature(enable = "neon")]
22879#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22880#[cfg_attr(test, assert_instr(nop))]
22881pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
22882    unsafe { transmute(a) }
22883}
22884#[doc = "Vector reinterpret cast operation"]
22885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"]
22886#[inline(always)]
22887#[cfg(target_endian = "big")]
22888#[target_feature(enable = "neon")]
22889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22890#[cfg_attr(test, assert_instr(nop))]
22891pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t {
22892    let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
22893    unsafe {
22894        let ret_val: float64x2_t = transmute(a);
22895        simd_shuffle!(ret_val, ret_val, [1, 0])
22896    }
22897}
22898#[doc = "Vector reinterpret cast operation"]
22899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
22900#[inline(always)]
22901#[cfg(target_endian = "little")]
22902#[target_feature(enable = "neon")]
22903#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22904#[cfg_attr(test, assert_instr(nop))]
22905pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
22906    unsafe { transmute(a) }
22907}
22908#[doc = "Vector reinterpret cast operation"]
22909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"]
22910#[inline(always)]
22911#[cfg(target_endian = "big")]
22912#[target_feature(enable = "neon")]
22913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22914#[cfg_attr(test, assert_instr(nop))]
22915pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t {
22916    let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22917    unsafe { transmute(a) }
22918}
22919#[doc = "Vector reinterpret cast operation"]
22920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
22921#[inline(always)]
22922#[cfg(target_endian = "little")]
22923#[target_feature(enable = "neon")]
22924#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22925#[cfg_attr(test, assert_instr(nop))]
22926pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
22927    unsafe { transmute(a) }
22928}
22929#[doc = "Vector reinterpret cast operation"]
22930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"]
22931#[inline(always)]
22932#[cfg(target_endian = "big")]
22933#[target_feature(enable = "neon")]
22934#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22935#[cfg_attr(test, assert_instr(nop))]
22936pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t {
22937    let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
22938    unsafe {
22939        let ret_val: float64x2_t = transmute(a);
22940        simd_shuffle!(ret_val, ret_val, [1, 0])
22941    }
22942}
22943#[doc = "Vector reinterpret cast operation"]
22944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"]
22945#[inline(always)]
22946#[target_feature(enable = "neon")]
22947#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22948#[cfg_attr(test, assert_instr(nop))]
22949pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t {
22950    unsafe { transmute(a) }
22951}
22952#[doc = "Vector reinterpret cast operation"]
22953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"]
22954#[inline(always)]
22955#[target_feature(enable = "neon")]
22956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22957#[cfg_attr(test, assert_instr(nop))]
22958pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t {
22959    unsafe { transmute(a) }
22960}
22961#[doc = "Vector reinterpret cast operation"]
22962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
22963#[inline(always)]
22964#[cfg(target_endian = "little")]
22965#[target_feature(enable = "neon")]
22966#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22967#[cfg_attr(test, assert_instr(nop))]
22968pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
22969    unsafe { transmute(a) }
22970}
22971#[doc = "Vector reinterpret cast operation"]
22972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"]
22973#[inline(always)]
22974#[cfg(target_endian = "big")]
22975#[target_feature(enable = "neon")]
22976#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22977#[cfg_attr(test, assert_instr(nop))]
22978pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t {
22979    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
22980    unsafe {
22981        let ret_val: float64x2_t = transmute(a);
22982        simd_shuffle!(ret_val, ret_val, [1, 0])
22983    }
22984}
22985#[doc = "Vector reinterpret cast operation"]
22986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
22987#[inline(always)]
22988#[cfg(target_endian = "little")]
22989#[target_feature(enable = "neon")]
22990#[stable(feature = "neon_intrinsics", since = "1.59.0")]
22991#[cfg_attr(test, assert_instr(nop))]
22992pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
22993    unsafe { transmute(a) }
22994}
22995#[doc = "Vector reinterpret cast operation"]
22996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"]
22997#[inline(always)]
22998#[cfg(target_endian = "big")]
22999#[target_feature(enable = "neon")]
23000#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23001#[cfg_attr(test, assert_instr(nop))]
23002pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t {
23003    let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23004    unsafe {
23005        let ret_val: poly64x2_t = transmute(a);
23006        simd_shuffle!(ret_val, ret_val, [1, 0])
23007    }
23008}
23009#[doc = "Vector reinterpret cast operation"]
23010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
23011#[inline(always)]
23012#[cfg(target_endian = "little")]
23013#[target_feature(enable = "neon")]
23014#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23015#[cfg_attr(test, assert_instr(nop))]
23016pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
23017    unsafe { transmute(a) }
23018}
23019#[doc = "Vector reinterpret cast operation"]
23020#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"]
23021#[inline(always)]
23022#[cfg(target_endian = "big")]
23023#[target_feature(enable = "neon")]
23024#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23025#[cfg_attr(test, assert_instr(nop))]
23026pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t {
23027    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
23028    unsafe { transmute(a) }
23029}
23030#[doc = "Vector reinterpret cast operation"]
23031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
23032#[inline(always)]
23033#[cfg(target_endian = "little")]
23034#[target_feature(enable = "neon")]
23035#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23036#[cfg_attr(test, assert_instr(nop))]
23037pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
23038    unsafe { transmute(a) }
23039}
23040#[doc = "Vector reinterpret cast operation"]
23041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"]
23042#[inline(always)]
23043#[cfg(target_endian = "big")]
23044#[target_feature(enable = "neon")]
23045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23046#[cfg_attr(test, assert_instr(nop))]
23047pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t {
23048    let a: poly8x16_t =
23049        unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) };
23050    unsafe {
23051        let ret_val: float64x2_t = transmute(a);
23052        simd_shuffle!(ret_val, ret_val, [1, 0])
23053    }
23054}
23055#[doc = "Vector reinterpret cast operation"]
23056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
23057#[inline(always)]
23058#[cfg(target_endian = "little")]
23059#[target_feature(enable = "neon")]
23060#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23061#[cfg_attr(test, assert_instr(nop))]
23062pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
23063    unsafe { transmute(a) }
23064}
23065#[doc = "Vector reinterpret cast operation"]
23066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"]
23067#[inline(always)]
23068#[cfg(target_endian = "big")]
23069#[target_feature(enable = "neon")]
23070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23071#[cfg_attr(test, assert_instr(nop))]
23072pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t {
23073    let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) };
23074    unsafe { transmute(a) }
23075}
23076#[doc = "Vector reinterpret cast operation"]
23077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
23078#[inline(always)]
23079#[cfg(target_endian = "little")]
23080#[target_feature(enable = "neon")]
23081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23082#[cfg_attr(test, assert_instr(nop))]
23083pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
23084    unsafe { transmute(a) }
23085}
23086#[doc = "Vector reinterpret cast operation"]
23087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"]
23088#[inline(always)]
23089#[cfg(target_endian = "big")]
23090#[target_feature(enable = "neon")]
23091#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23092#[cfg_attr(test, assert_instr(nop))]
23093pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t {
23094    let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
23095    unsafe {
23096        let ret_val: float64x2_t = transmute(a);
23097        simd_shuffle!(ret_val, ret_val, [1, 0])
23098    }
23099}
23100#[doc = "Vector reinterpret cast operation"]
23101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
23102#[inline(always)]
23103#[cfg(target_endian = "little")]
23104#[target_feature(enable = "neon")]
23105#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23106#[cfg_attr(test, assert_instr(nop))]
23107pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
23108    unsafe { transmute(a) }
23109}
23110#[doc = "Vector reinterpret cast operation"]
23111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"]
23112#[inline(always)]
23113#[cfg(target_endian = "big")]
23114#[target_feature(enable = "neon")]
23115#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23116#[cfg_attr(test, assert_instr(nop))]
23117pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t {
23118    unsafe {
23119        let ret_val: float32x2_t = transmute(a);
23120        simd_shuffle!(ret_val, ret_val, [1, 0])
23121    }
23122}
23123#[doc = "Vector reinterpret cast operation"]
23124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"]
23125#[inline(always)]
23126#[target_feature(enable = "neon")]
23127#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23128#[cfg_attr(test, assert_instr(nop))]
23129pub fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t {
23130    unsafe { transmute(a) }
23131}
23132#[doc = "Vector reinterpret cast operation"]
23133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"]
23134#[inline(always)]
23135#[target_feature(enable = "neon")]
23136#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23137#[cfg_attr(test, assert_instr(nop))]
23138pub fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t {
23139    unsafe { transmute(a) }
23140}
23141#[doc = "Vector reinterpret cast operation"]
23142#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"]
23143#[inline(always)]
23144#[target_feature(enable = "neon")]
23145#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23146#[cfg_attr(test, assert_instr(nop))]
23147pub fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t {
23148    unsafe { transmute(a) }
23149}
23150#[doc = "Vector reinterpret cast operation"]
23151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
23152#[inline(always)]
23153#[cfg(target_endian = "little")]
23154#[target_feature(enable = "neon")]
23155#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23156#[cfg_attr(test, assert_instr(nop))]
23157pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
23158    unsafe { transmute(a) }
23159}
23160#[doc = "Vector reinterpret cast operation"]
23161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"]
23162#[inline(always)]
23163#[cfg(target_endian = "big")]
23164#[target_feature(enable = "neon")]
23165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23166#[cfg_attr(test, assert_instr(nop))]
23167pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t {
23168    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23169    unsafe {
23170        let ret_val: float32x4_t = transmute(a);
23171        simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0])
23172    }
23173}
23174#[doc = "Vector reinterpret cast operation"]
23175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
23176#[inline(always)]
23177#[cfg(target_endian = "little")]
23178#[target_feature(enable = "neon")]
23179#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23180#[cfg_attr(test, assert_instr(nop))]
23181pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
23182    unsafe { transmute(a) }
23183}
23184#[doc = "Vector reinterpret cast operation"]
23185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"]
23186#[inline(always)]
23187#[cfg(target_endian = "big")]
23188#[target_feature(enable = "neon")]
23189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23190#[cfg_attr(test, assert_instr(nop))]
23191pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t {
23192    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23193    unsafe {
23194        let ret_val: float64x2_t = transmute(a);
23195        simd_shuffle!(ret_val, ret_val, [1, 0])
23196    }
23197}
23198#[doc = "Vector reinterpret cast operation"]
23199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
23200#[inline(always)]
23201#[cfg(target_endian = "little")]
23202#[target_feature(enable = "neon")]
23203#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23204#[cfg_attr(test, assert_instr(nop))]
23205pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
23206    unsafe { transmute(a) }
23207}
23208#[doc = "Vector reinterpret cast operation"]
23209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"]
23210#[inline(always)]
23211#[cfg(target_endian = "big")]
23212#[target_feature(enable = "neon")]
23213#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23214#[cfg_attr(test, assert_instr(nop))]
23215pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t {
23216    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23217    unsafe {
23218        let ret_val: int64x2_t = transmute(a);
23219        simd_shuffle!(ret_val, ret_val, [1, 0])
23220    }
23221}
23222#[doc = "Vector reinterpret cast operation"]
23223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
23224#[inline(always)]
23225#[cfg(target_endian = "little")]
23226#[target_feature(enable = "neon")]
23227#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23228#[cfg_attr(test, assert_instr(nop))]
23229pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
23230    unsafe { transmute(a) }
23231}
23232#[doc = "Vector reinterpret cast operation"]
23233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"]
23234#[inline(always)]
23235#[cfg(target_endian = "big")]
23236#[target_feature(enable = "neon")]
23237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23238#[cfg_attr(test, assert_instr(nop))]
23239pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t {
23240    let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) };
23241    unsafe {
23242        let ret_val: uint64x2_t = transmute(a);
23243        simd_shuffle!(ret_val, ret_val, [1, 0])
23244    }
23245}
23246#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"]
23248#[inline(always)]
23249#[target_feature(enable = "neon,frintts")]
23250#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23251#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23252pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t {
23253    unsafe extern "unadjusted" {
23254        #[cfg_attr(
23255            any(target_arch = "aarch64", target_arch = "arm64ec"),
23256            link_name = "llvm.aarch64.neon.frint32x.v2f32"
23257        )]
23258        fn _vrnd32x_f32(a: float32x2_t) -> float32x2_t;
23259    }
23260    unsafe { _vrnd32x_f32(a) }
23261}
23262#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"]
23264#[inline(always)]
23265#[target_feature(enable = "neon,frintts")]
23266#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23267#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23268pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t {
23269    unsafe extern "unadjusted" {
23270        #[cfg_attr(
23271            any(target_arch = "aarch64", target_arch = "arm64ec"),
23272            link_name = "llvm.aarch64.neon.frint32x.v4f32"
23273        )]
23274        fn _vrnd32xq_f32(a: float32x4_t) -> float32x4_t;
23275    }
23276    unsafe { _vrnd32xq_f32(a) }
23277}
23278#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"]
23280#[inline(always)]
23281#[target_feature(enable = "neon,frintts")]
23282#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23283#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23284pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t {
23285    unsafe extern "unadjusted" {
23286        #[cfg_attr(
23287            any(target_arch = "aarch64", target_arch = "arm64ec"),
23288            link_name = "llvm.aarch64.neon.frint32x.v2f64"
23289        )]
23290        fn _vrnd32xq_f64(a: float64x2_t) -> float64x2_t;
23291    }
23292    unsafe { _vrnd32xq_f64(a) }
23293}
23294#[doc = "Floating-point round to 32-bit integer, using current rounding mode"]
23295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"]
23296#[inline(always)]
23297#[target_feature(enable = "neon,frintts")]
23298#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23299#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))]
23300pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t {
23301    unsafe extern "unadjusted" {
23302        #[cfg_attr(
23303            any(target_arch = "aarch64", target_arch = "arm64ec"),
23304            link_name = "llvm.aarch64.frint32x.f64"
23305        )]
23306        fn _vrnd32x_f64(a: f64) -> f64;
23307    }
23308    unsafe { transmute(_vrnd32x_f64(simd_extract!(a, 0))) }
23309}
23310#[doc = "Floating-point round to 32-bit integer toward zero"]
23311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"]
23312#[inline(always)]
23313#[target_feature(enable = "neon,frintts")]
23314#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23315#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23316pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t {
23317    unsafe extern "unadjusted" {
23318        #[cfg_attr(
23319            any(target_arch = "aarch64", target_arch = "arm64ec"),
23320            link_name = "llvm.aarch64.neon.frint32z.v2f32"
23321        )]
23322        fn _vrnd32z_f32(a: float32x2_t) -> float32x2_t;
23323    }
23324    unsafe { _vrnd32z_f32(a) }
23325}
23326#[doc = "Floating-point round to 32-bit integer toward zero"]
23327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"]
23328#[inline(always)]
23329#[target_feature(enable = "neon,frintts")]
23330#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23331#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23332pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t {
23333    unsafe extern "unadjusted" {
23334        #[cfg_attr(
23335            any(target_arch = "aarch64", target_arch = "arm64ec"),
23336            link_name = "llvm.aarch64.neon.frint32z.v4f32"
23337        )]
23338        fn _vrnd32zq_f32(a: float32x4_t) -> float32x4_t;
23339    }
23340    unsafe { _vrnd32zq_f32(a) }
23341}
23342#[doc = "Floating-point round to 32-bit integer toward zero"]
23343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"]
23344#[inline(always)]
23345#[target_feature(enable = "neon,frintts")]
23346#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23347#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23348pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t {
23349    unsafe extern "unadjusted" {
23350        #[cfg_attr(
23351            any(target_arch = "aarch64", target_arch = "arm64ec"),
23352            link_name = "llvm.aarch64.neon.frint32z.v2f64"
23353        )]
23354        fn _vrnd32zq_f64(a: float64x2_t) -> float64x2_t;
23355    }
23356    unsafe { _vrnd32zq_f64(a) }
23357}
23358#[doc = "Floating-point round to 32-bit integer toward zero"]
23359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"]
23360#[inline(always)]
23361#[target_feature(enable = "neon,frintts")]
23362#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23363#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))]
23364pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t {
23365    unsafe extern "unadjusted" {
23366        #[cfg_attr(
23367            any(target_arch = "aarch64", target_arch = "arm64ec"),
23368            link_name = "llvm.aarch64.frint32z.f64"
23369        )]
23370        fn _vrnd32z_f64(a: f64) -> f64;
23371    }
23372    unsafe { transmute(_vrnd32z_f64(simd_extract!(a, 0))) }
23373}
23374#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"]
23376#[inline(always)]
23377#[target_feature(enable = "neon,frintts")]
23378#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23379#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23380pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t {
23381    unsafe extern "unadjusted" {
23382        #[cfg_attr(
23383            any(target_arch = "aarch64", target_arch = "arm64ec"),
23384            link_name = "llvm.aarch64.neon.frint64x.v2f32"
23385        )]
23386        fn _vrnd64x_f32(a: float32x2_t) -> float32x2_t;
23387    }
23388    unsafe { _vrnd64x_f32(a) }
23389}
23390#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"]
23392#[inline(always)]
23393#[target_feature(enable = "neon,frintts")]
23394#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23395#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23396pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t {
23397    unsafe extern "unadjusted" {
23398        #[cfg_attr(
23399            any(target_arch = "aarch64", target_arch = "arm64ec"),
23400            link_name = "llvm.aarch64.neon.frint64x.v4f32"
23401        )]
23402        fn _vrnd64xq_f32(a: float32x4_t) -> float32x4_t;
23403    }
23404    unsafe { _vrnd64xq_f32(a) }
23405}
23406#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"]
23408#[inline(always)]
23409#[target_feature(enable = "neon,frintts")]
23410#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23411#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23412pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t {
23413    unsafe extern "unadjusted" {
23414        #[cfg_attr(
23415            any(target_arch = "aarch64", target_arch = "arm64ec"),
23416            link_name = "llvm.aarch64.neon.frint64x.v2f64"
23417        )]
23418        fn _vrnd64xq_f64(a: float64x2_t) -> float64x2_t;
23419    }
23420    unsafe { _vrnd64xq_f64(a) }
23421}
23422#[doc = "Floating-point round to 64-bit integer, using current rounding mode"]
23423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"]
23424#[inline(always)]
23425#[target_feature(enable = "neon,frintts")]
23426#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23427#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))]
23428pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t {
23429    unsafe extern "unadjusted" {
23430        #[cfg_attr(
23431            any(target_arch = "aarch64", target_arch = "arm64ec"),
23432            link_name = "llvm.aarch64.frint64x.f64"
23433        )]
23434        fn _vrnd64x_f64(a: f64) -> f64;
23435    }
23436    unsafe { transmute(_vrnd64x_f64(simd_extract!(a, 0))) }
23437}
23438#[doc = "Floating-point round to 64-bit integer toward zero"]
23439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"]
23440#[inline(always)]
23441#[target_feature(enable = "neon,frintts")]
23442#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23443#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23444pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t {
23445    unsafe extern "unadjusted" {
23446        #[cfg_attr(
23447            any(target_arch = "aarch64", target_arch = "arm64ec"),
23448            link_name = "llvm.aarch64.neon.frint64z.v2f32"
23449        )]
23450        fn _vrnd64z_f32(a: float32x2_t) -> float32x2_t;
23451    }
23452    unsafe { _vrnd64z_f32(a) }
23453}
23454#[doc = "Floating-point round to 64-bit integer toward zero"]
23455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"]
23456#[inline(always)]
23457#[target_feature(enable = "neon,frintts")]
23458#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23459#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23460pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t {
23461    unsafe extern "unadjusted" {
23462        #[cfg_attr(
23463            any(target_arch = "aarch64", target_arch = "arm64ec"),
23464            link_name = "llvm.aarch64.neon.frint64z.v4f32"
23465        )]
23466        fn _vrnd64zq_f32(a: float32x4_t) -> float32x4_t;
23467    }
23468    unsafe { _vrnd64zq_f32(a) }
23469}
23470#[doc = "Floating-point round to 64-bit integer toward zero"]
23471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"]
23472#[inline(always)]
23473#[target_feature(enable = "neon,frintts")]
23474#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23475#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23476pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t {
23477    unsafe extern "unadjusted" {
23478        #[cfg_attr(
23479            any(target_arch = "aarch64", target_arch = "arm64ec"),
23480            link_name = "llvm.aarch64.neon.frint64z.v2f64"
23481        )]
23482        fn _vrnd64zq_f64(a: float64x2_t) -> float64x2_t;
23483    }
23484    unsafe { _vrnd64zq_f64(a) }
23485}
23486#[doc = "Floating-point round to 64-bit integer toward zero"]
23487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"]
23488#[inline(always)]
23489#[target_feature(enable = "neon,frintts")]
23490#[unstable(feature = "stdarch_neon_ftts", issue = "117227")]
23491#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))]
23492pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t {
23493    unsafe extern "unadjusted" {
23494        #[cfg_attr(
23495            any(target_arch = "aarch64", target_arch = "arm64ec"),
23496            link_name = "llvm.aarch64.frint64z.f64"
23497        )]
23498        fn _vrnd64z_f64(a: f64) -> f64;
23499    }
23500    unsafe { transmute(_vrnd64z_f64(simd_extract!(a, 0))) }
23501}
23502#[doc = "Floating-point round to integral, toward zero"]
23503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"]
23504#[inline(always)]
23505#[target_feature(enable = "neon,fp16")]
23506#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23507#[cfg(not(target_arch = "arm64ec"))]
23508#[cfg_attr(test, assert_instr(frintz))]
23509pub fn vrnd_f16(a: float16x4_t) -> float16x4_t {
23510    unsafe { simd_trunc(a) }
23511}
23512#[doc = "Floating-point round to integral, toward zero"]
23513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"]
23514#[inline(always)]
23515#[target_feature(enable = "neon,fp16")]
23516#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23517#[cfg(not(target_arch = "arm64ec"))]
23518#[cfg_attr(test, assert_instr(frintz))]
23519pub fn vrndq_f16(a: float16x8_t) -> float16x8_t {
23520    unsafe { simd_trunc(a) }
23521}
23522#[doc = "Floating-point round to integral, toward zero"]
23523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"]
23524#[inline(always)]
23525#[target_feature(enable = "neon")]
23526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23527#[cfg_attr(test, assert_instr(frintz))]
23528pub fn vrnd_f32(a: float32x2_t) -> float32x2_t {
23529    unsafe { simd_trunc(a) }
23530}
23531#[doc = "Floating-point round to integral, toward zero"]
23532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"]
23533#[inline(always)]
23534#[target_feature(enable = "neon")]
23535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23536#[cfg_attr(test, assert_instr(frintz))]
23537pub fn vrndq_f32(a: float32x4_t) -> float32x4_t {
23538    unsafe { simd_trunc(a) }
23539}
23540#[doc = "Floating-point round to integral, toward zero"]
23541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"]
23542#[inline(always)]
23543#[target_feature(enable = "neon")]
23544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23545#[cfg_attr(test, assert_instr(frintz))]
23546pub fn vrnd_f64(a: float64x1_t) -> float64x1_t {
23547    unsafe { simd_trunc(a) }
23548}
23549#[doc = "Floating-point round to integral, toward zero"]
23550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"]
23551#[inline(always)]
23552#[target_feature(enable = "neon")]
23553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23554#[cfg_attr(test, assert_instr(frintz))]
23555pub fn vrndq_f64(a: float64x2_t) -> float64x2_t {
23556    unsafe { simd_trunc(a) }
23557}
23558#[doc = "Floating-point round to integral, to nearest with ties to away"]
23559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"]
23560#[inline(always)]
23561#[target_feature(enable = "neon,fp16")]
23562#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23563#[cfg(not(target_arch = "arm64ec"))]
23564#[cfg_attr(test, assert_instr(frinta))]
23565pub fn vrnda_f16(a: float16x4_t) -> float16x4_t {
23566    unsafe { simd_round(a) }
23567}
23568#[doc = "Floating-point round to integral, to nearest with ties to away"]
23569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"]
23570#[inline(always)]
23571#[target_feature(enable = "neon,fp16")]
23572#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23573#[cfg(not(target_arch = "arm64ec"))]
23574#[cfg_attr(test, assert_instr(frinta))]
23575pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t {
23576    unsafe { simd_round(a) }
23577}
23578#[doc = "Floating-point round to integral, to nearest with ties to away"]
23579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"]
23580#[inline(always)]
23581#[target_feature(enable = "neon")]
23582#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23583#[cfg_attr(test, assert_instr(frinta))]
23584pub fn vrnda_f32(a: float32x2_t) -> float32x2_t {
23585    unsafe { simd_round(a) }
23586}
23587#[doc = "Floating-point round to integral, to nearest with ties to away"]
23588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"]
23589#[inline(always)]
23590#[target_feature(enable = "neon")]
23591#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23592#[cfg_attr(test, assert_instr(frinta))]
23593pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t {
23594    unsafe { simd_round(a) }
23595}
23596#[doc = "Floating-point round to integral, to nearest with ties to away"]
23597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"]
23598#[inline(always)]
23599#[target_feature(enable = "neon")]
23600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23601#[cfg_attr(test, assert_instr(frinta))]
23602pub fn vrnda_f64(a: float64x1_t) -> float64x1_t {
23603    unsafe { simd_round(a) }
23604}
23605#[doc = "Floating-point round to integral, to nearest with ties to away"]
23606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"]
23607#[inline(always)]
23608#[target_feature(enable = "neon")]
23609#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23610#[cfg_attr(test, assert_instr(frinta))]
23611pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t {
23612    unsafe { simd_round(a) }
23613}
23614#[doc = "Floating-point round to integral, to nearest with ties to away"]
23615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"]
23616#[inline(always)]
23617#[target_feature(enable = "neon,fp16")]
23618#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23619#[cfg(not(target_arch = "arm64ec"))]
23620#[cfg_attr(test, assert_instr(frinta))]
23621pub fn vrndah_f16(a: f16) -> f16 {
23622    roundf16(a)
23623}
23624#[doc = "Floating-point round to integral, to nearest with ties to away"]
23625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"]
23626#[inline(always)]
23627#[target_feature(enable = "neon,fp16")]
23628#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23629#[cfg(not(target_arch = "arm64ec"))]
23630#[cfg_attr(test, assert_instr(frintz))]
23631pub fn vrndh_f16(a: f16) -> f16 {
23632    truncf16(a)
23633}
23634#[doc = "Floating-point round to integral, using current rounding mode"]
23635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"]
23636#[inline(always)]
23637#[target_feature(enable = "neon,fp16")]
23638#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23639#[cfg(not(target_arch = "arm64ec"))]
23640#[cfg_attr(test, assert_instr(frinti))]
23641pub fn vrndi_f16(a: float16x4_t) -> float16x4_t {
23642    unsafe extern "unadjusted" {
23643        #[cfg_attr(
23644            any(target_arch = "aarch64", target_arch = "arm64ec"),
23645            link_name = "llvm.nearbyint.v4f16"
23646        )]
23647        fn _vrndi_f16(a: float16x4_t) -> float16x4_t;
23648    }
23649    unsafe { _vrndi_f16(a) }
23650}
23651#[doc = "Floating-point round to integral, using current rounding mode"]
23652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"]
23653#[inline(always)]
23654#[target_feature(enable = "neon,fp16")]
23655#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23656#[cfg(not(target_arch = "arm64ec"))]
23657#[cfg_attr(test, assert_instr(frinti))]
23658pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t {
23659    unsafe extern "unadjusted" {
23660        #[cfg_attr(
23661            any(target_arch = "aarch64", target_arch = "arm64ec"),
23662            link_name = "llvm.nearbyint.v8f16"
23663        )]
23664        fn _vrndiq_f16(a: float16x8_t) -> float16x8_t;
23665    }
23666    unsafe { _vrndiq_f16(a) }
23667}
23668#[doc = "Floating-point round to integral, using current rounding mode"]
23669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"]
23670#[inline(always)]
23671#[target_feature(enable = "neon")]
23672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23673#[cfg_attr(test, assert_instr(frinti))]
23674pub fn vrndi_f32(a: float32x2_t) -> float32x2_t {
23675    unsafe extern "unadjusted" {
23676        #[cfg_attr(
23677            any(target_arch = "aarch64", target_arch = "arm64ec"),
23678            link_name = "llvm.nearbyint.v2f32"
23679        )]
23680        fn _vrndi_f32(a: float32x2_t) -> float32x2_t;
23681    }
23682    unsafe { _vrndi_f32(a) }
23683}
23684#[doc = "Floating-point round to integral, using current rounding mode"]
23685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"]
23686#[inline(always)]
23687#[target_feature(enable = "neon")]
23688#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23689#[cfg_attr(test, assert_instr(frinti))]
23690pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t {
23691    unsafe extern "unadjusted" {
23692        #[cfg_attr(
23693            any(target_arch = "aarch64", target_arch = "arm64ec"),
23694            link_name = "llvm.nearbyint.v4f32"
23695        )]
23696        fn _vrndiq_f32(a: float32x4_t) -> float32x4_t;
23697    }
23698    unsafe { _vrndiq_f32(a) }
23699}
23700#[doc = "Floating-point round to integral, using current rounding mode"]
23701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"]
23702#[inline(always)]
23703#[target_feature(enable = "neon")]
23704#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23705#[cfg_attr(test, assert_instr(frinti))]
23706pub fn vrndi_f64(a: float64x1_t) -> float64x1_t {
23707    unsafe extern "unadjusted" {
23708        #[cfg_attr(
23709            any(target_arch = "aarch64", target_arch = "arm64ec"),
23710            link_name = "llvm.nearbyint.v1f64"
23711        )]
23712        fn _vrndi_f64(a: float64x1_t) -> float64x1_t;
23713    }
23714    unsafe { _vrndi_f64(a) }
23715}
23716#[doc = "Floating-point round to integral, using current rounding mode"]
23717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"]
23718#[inline(always)]
23719#[target_feature(enable = "neon")]
23720#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23721#[cfg_attr(test, assert_instr(frinti))]
23722pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t {
23723    unsafe extern "unadjusted" {
23724        #[cfg_attr(
23725            any(target_arch = "aarch64", target_arch = "arm64ec"),
23726            link_name = "llvm.nearbyint.v2f64"
23727        )]
23728        fn _vrndiq_f64(a: float64x2_t) -> float64x2_t;
23729    }
23730    unsafe { _vrndiq_f64(a) }
23731}
23732#[doc = "Floating-point round to integral, using current rounding mode"]
23733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"]
23734#[inline(always)]
23735#[target_feature(enable = "neon,fp16")]
23736#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23737#[cfg(not(target_arch = "arm64ec"))]
23738#[cfg_attr(test, assert_instr(frinti))]
23739pub fn vrndih_f16(a: f16) -> f16 {
23740    unsafe extern "unadjusted" {
23741        #[cfg_attr(
23742            any(target_arch = "aarch64", target_arch = "arm64ec"),
23743            link_name = "llvm.nearbyint.f16"
23744        )]
23745        fn _vrndih_f16(a: f16) -> f16;
23746    }
23747    unsafe { _vrndih_f16(a) }
23748}
23749#[doc = "Floating-point round to integral, toward minus infinity"]
23750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"]
23751#[inline(always)]
23752#[target_feature(enable = "neon,fp16")]
23753#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23754#[cfg(not(target_arch = "arm64ec"))]
23755#[cfg_attr(test, assert_instr(frintm))]
23756pub fn vrndm_f16(a: float16x4_t) -> float16x4_t {
23757    unsafe { simd_floor(a) }
23758}
23759#[doc = "Floating-point round to integral, toward minus infinity"]
23760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"]
23761#[inline(always)]
23762#[target_feature(enable = "neon,fp16")]
23763#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23764#[cfg(not(target_arch = "arm64ec"))]
23765#[cfg_attr(test, assert_instr(frintm))]
23766pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t {
23767    unsafe { simd_floor(a) }
23768}
23769#[doc = "Floating-point round to integral, toward minus infinity"]
23770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"]
23771#[inline(always)]
23772#[target_feature(enable = "neon")]
23773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23774#[cfg_attr(test, assert_instr(frintm))]
23775pub fn vrndm_f32(a: float32x2_t) -> float32x2_t {
23776    unsafe { simd_floor(a) }
23777}
23778#[doc = "Floating-point round to integral, toward minus infinity"]
23779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"]
23780#[inline(always)]
23781#[target_feature(enable = "neon")]
23782#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23783#[cfg_attr(test, assert_instr(frintm))]
23784pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t {
23785    unsafe { simd_floor(a) }
23786}
23787#[doc = "Floating-point round to integral, toward minus infinity"]
23788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"]
23789#[inline(always)]
23790#[target_feature(enable = "neon")]
23791#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23792#[cfg_attr(test, assert_instr(frintm))]
23793pub fn vrndm_f64(a: float64x1_t) -> float64x1_t {
23794    unsafe { simd_floor(a) }
23795}
23796#[doc = "Floating-point round to integral, toward minus infinity"]
23797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"]
23798#[inline(always)]
23799#[target_feature(enable = "neon")]
23800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23801#[cfg_attr(test, assert_instr(frintm))]
23802pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t {
23803    unsafe { simd_floor(a) }
23804}
23805#[doc = "Floating-point round to integral, toward minus infinity"]
23806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"]
23807#[inline(always)]
23808#[target_feature(enable = "neon,fp16")]
23809#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23810#[cfg(not(target_arch = "arm64ec"))]
23811#[cfg_attr(test, assert_instr(frintm))]
23812pub fn vrndmh_f16(a: f16) -> f16 {
23813    floorf16(a)
23814}
23815#[doc = "Floating-point round to integral, to nearest with ties to even"]
23816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"]
23817#[inline(always)]
23818#[target_feature(enable = "neon")]
23819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23820#[cfg_attr(test, assert_instr(frintn))]
23821pub fn vrndn_f64(a: float64x1_t) -> float64x1_t {
23822    unsafe extern "unadjusted" {
23823        #[cfg_attr(
23824            any(target_arch = "aarch64", target_arch = "arm64ec"),
23825            link_name = "llvm.roundeven.v1f64"
23826        )]
23827        fn _vrndn_f64(a: float64x1_t) -> float64x1_t;
23828    }
23829    unsafe { _vrndn_f64(a) }
23830}
23831#[doc = "Floating-point round to integral, to nearest with ties to even"]
23832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"]
23833#[inline(always)]
23834#[target_feature(enable = "neon")]
23835#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23836#[cfg_attr(test, assert_instr(frintn))]
23837pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t {
23838    unsafe extern "unadjusted" {
23839        #[cfg_attr(
23840            any(target_arch = "aarch64", target_arch = "arm64ec"),
23841            link_name = "llvm.roundeven.v2f64"
23842        )]
23843        fn _vrndnq_f64(a: float64x2_t) -> float64x2_t;
23844    }
23845    unsafe { _vrndnq_f64(a) }
23846}
23847#[doc = "Floating-point round to integral, toward minus infinity"]
23848#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"]
23849#[inline(always)]
23850#[target_feature(enable = "neon,fp16")]
23851#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23852#[cfg(not(target_arch = "arm64ec"))]
23853#[cfg_attr(test, assert_instr(frintn))]
23854pub fn vrndnh_f16(a: f16) -> f16 {
23855    unsafe extern "unadjusted" {
23856        #[cfg_attr(
23857            any(target_arch = "aarch64", target_arch = "arm64ec"),
23858            link_name = "llvm.roundeven.f16"
23859        )]
23860        fn _vrndnh_f16(a: f16) -> f16;
23861    }
23862    unsafe { _vrndnh_f16(a) }
23863}
23864#[doc = "Floating-point round to integral, to nearest with ties to even"]
23865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"]
23866#[inline(always)]
23867#[target_feature(enable = "neon")]
23868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23869#[cfg_attr(test, assert_instr(frintn))]
23870pub fn vrndns_f32(a: f32) -> f32 {
23871    unsafe extern "unadjusted" {
23872        #[cfg_attr(
23873            any(target_arch = "aarch64", target_arch = "arm64ec"),
23874            link_name = "llvm.roundeven.f32"
23875        )]
23876        fn _vrndns_f32(a: f32) -> f32;
23877    }
23878    unsafe { _vrndns_f32(a) }
23879}
23880#[doc = "Floating-point round to integral, toward plus infinity"]
23881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"]
23882#[inline(always)]
23883#[target_feature(enable = "neon,fp16")]
23884#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23885#[cfg(not(target_arch = "arm64ec"))]
23886#[cfg_attr(test, assert_instr(frintp))]
23887pub fn vrndp_f16(a: float16x4_t) -> float16x4_t {
23888    unsafe { simd_ceil(a) }
23889}
23890#[doc = "Floating-point round to integral, toward plus infinity"]
23891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"]
23892#[inline(always)]
23893#[target_feature(enable = "neon,fp16")]
23894#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23895#[cfg(not(target_arch = "arm64ec"))]
23896#[cfg_attr(test, assert_instr(frintp))]
23897pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t {
23898    unsafe { simd_ceil(a) }
23899}
23900#[doc = "Floating-point round to integral, toward plus infinity"]
23901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"]
23902#[inline(always)]
23903#[target_feature(enable = "neon")]
23904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23905#[cfg_attr(test, assert_instr(frintp))]
23906pub fn vrndp_f32(a: float32x2_t) -> float32x2_t {
23907    unsafe { simd_ceil(a) }
23908}
23909#[doc = "Floating-point round to integral, toward plus infinity"]
23910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"]
23911#[inline(always)]
23912#[target_feature(enable = "neon")]
23913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23914#[cfg_attr(test, assert_instr(frintp))]
23915pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t {
23916    unsafe { simd_ceil(a) }
23917}
23918#[doc = "Floating-point round to integral, toward plus infinity"]
23919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"]
23920#[inline(always)]
23921#[target_feature(enable = "neon")]
23922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23923#[cfg_attr(test, assert_instr(frintp))]
23924pub fn vrndp_f64(a: float64x1_t) -> float64x1_t {
23925    unsafe { simd_ceil(a) }
23926}
23927#[doc = "Floating-point round to integral, toward plus infinity"]
23928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"]
23929#[inline(always)]
23930#[target_feature(enable = "neon")]
23931#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23932#[cfg_attr(test, assert_instr(frintp))]
23933pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t {
23934    unsafe { simd_ceil(a) }
23935}
23936#[doc = "Floating-point round to integral, toward plus infinity"]
23937#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"]
23938#[inline(always)]
23939#[target_feature(enable = "neon,fp16")]
23940#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
23941#[cfg(not(target_arch = "arm64ec"))]
23942#[cfg_attr(test, assert_instr(frintp))]
23943pub fn vrndph_f16(a: f16) -> f16 {
23944    ceilf16(a)
23945}
23946#[doc = "Floating-point round to integral exact, using current rounding mode"]
23947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"]
23948#[inline(always)]
23949#[target_feature(enable = "neon,fp16")]
23950#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23951#[cfg(not(target_arch = "arm64ec"))]
23952#[cfg_attr(test, assert_instr(frintx))]
23953pub fn vrndx_f16(a: float16x4_t) -> float16x4_t {
23954    unsafe { simd_round_ties_even(a) }
23955}
23956#[doc = "Floating-point round to integral exact, using current rounding mode"]
23957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"]
23958#[inline(always)]
23959#[target_feature(enable = "neon,fp16")]
23960#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
23961#[cfg(not(target_arch = "arm64ec"))]
23962#[cfg_attr(test, assert_instr(frintx))]
23963pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t {
23964    unsafe { simd_round_ties_even(a) }
23965}
23966#[doc = "Floating-point round to integral exact, using current rounding mode"]
23967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"]
23968#[inline(always)]
23969#[target_feature(enable = "neon")]
23970#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23971#[cfg_attr(test, assert_instr(frintx))]
23972pub fn vrndx_f32(a: float32x2_t) -> float32x2_t {
23973    unsafe { simd_round_ties_even(a) }
23974}
23975#[doc = "Floating-point round to integral exact, using current rounding mode"]
23976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"]
23977#[inline(always)]
23978#[target_feature(enable = "neon")]
23979#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23980#[cfg_attr(test, assert_instr(frintx))]
23981pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t {
23982    unsafe { simd_round_ties_even(a) }
23983}
23984#[doc = "Floating-point round to integral exact, using current rounding mode"]
23985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"]
23986#[inline(always)]
23987#[target_feature(enable = "neon")]
23988#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23989#[cfg_attr(test, assert_instr(frintx))]
23990pub fn vrndx_f64(a: float64x1_t) -> float64x1_t {
23991    unsafe { simd_round_ties_even(a) }
23992}
23993#[doc = "Floating-point round to integral exact, using current rounding mode"]
23994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"]
23995#[inline(always)]
23996#[target_feature(enable = "neon")]
23997#[stable(feature = "neon_intrinsics", since = "1.59.0")]
23998#[cfg_attr(test, assert_instr(frintx))]
23999pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t {
24000    unsafe { simd_round_ties_even(a) }
24001}
24002#[doc = "Floating-point round to integral, using current rounding mode"]
24003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"]
24004#[inline(always)]
24005#[target_feature(enable = "neon,fp16")]
24006#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24007#[cfg(not(target_arch = "arm64ec"))]
24008#[cfg_attr(test, assert_instr(frintx))]
24009pub fn vrndxh_f16(a: f16) -> f16 {
24010    round_ties_even_f16(a)
24011}
24012#[doc = "Signed rounding shift left"]
24013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"]
24014#[inline(always)]
24015#[target_feature(enable = "neon")]
24016#[cfg_attr(test, assert_instr(srshl))]
24017#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24018pub fn vrshld_s64(a: i64, b: i64) -> i64 {
24019    unsafe extern "unadjusted" {
24020        #[cfg_attr(
24021            any(target_arch = "aarch64", target_arch = "arm64ec"),
24022            link_name = "llvm.aarch64.neon.srshl.i64"
24023        )]
24024        fn _vrshld_s64(a: i64, b: i64) -> i64;
24025    }
24026    unsafe { _vrshld_s64(a, b) }
24027}
24028#[doc = "Unsigned rounding shift left"]
24029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"]
24030#[inline(always)]
24031#[target_feature(enable = "neon")]
24032#[cfg_attr(test, assert_instr(urshl))]
24033#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24034pub fn vrshld_u64(a: u64, b: i64) -> u64 {
24035    unsafe extern "unadjusted" {
24036        #[cfg_attr(
24037            any(target_arch = "aarch64", target_arch = "arm64ec"),
24038            link_name = "llvm.aarch64.neon.urshl.i64"
24039        )]
24040        fn _vrshld_u64(a: u64, b: i64) -> u64;
24041    }
24042    unsafe { _vrshld_u64(a, b) }
24043}
24044#[doc = "Signed rounding shift right"]
24045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"]
24046#[inline(always)]
24047#[target_feature(enable = "neon")]
24048#[cfg_attr(test, assert_instr(srshr, N = 2))]
24049#[rustc_legacy_const_generics(1)]
24050#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24051pub fn vrshrd_n_s64<const N: i32>(a: i64) -> i64 {
24052    static_assert!(N >= 1 && N <= 64);
24053    vrshld_s64(a, -N as i64)
24054}
24055#[doc = "Unsigned rounding shift right"]
24056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"]
24057#[inline(always)]
24058#[target_feature(enable = "neon")]
24059#[cfg_attr(test, assert_instr(urshr, N = 2))]
24060#[rustc_legacy_const_generics(1)]
24061#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24062pub fn vrshrd_n_u64<const N: i32>(a: u64) -> u64 {
24063    static_assert!(N >= 1 && N <= 64);
24064    vrshld_u64(a, -N as i64)
24065}
24066#[doc = "Rounding shift right narrow"]
24067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"]
24068#[inline(always)]
24069#[target_feature(enable = "neon")]
24070#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24071#[rustc_legacy_const_generics(2)]
24072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24073pub fn vrshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
24074    static_assert!(N >= 1 && N <= 8);
24075    unsafe {
24076        simd_shuffle!(
24077            a,
24078            vrshrn_n_s16::<N>(b),
24079            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24080        )
24081    }
24082}
24083#[doc = "Rounding shift right narrow"]
24084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"]
24085#[inline(always)]
24086#[target_feature(enable = "neon")]
24087#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24088#[rustc_legacy_const_generics(2)]
24089#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24090pub fn vrshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
24091    static_assert!(N >= 1 && N <= 16);
24092    unsafe { simd_shuffle!(a, vrshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24093}
24094#[doc = "Rounding shift right narrow"]
24095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"]
24096#[inline(always)]
24097#[target_feature(enable = "neon")]
24098#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24099#[rustc_legacy_const_generics(2)]
24100#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24101pub fn vrshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
24102    static_assert!(N >= 1 && N <= 32);
24103    unsafe { simd_shuffle!(a, vrshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
24104}
24105#[doc = "Rounding shift right narrow"]
24106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"]
24107#[inline(always)]
24108#[target_feature(enable = "neon")]
24109#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24110#[rustc_legacy_const_generics(2)]
24111#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24112pub fn vrshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
24113    static_assert!(N >= 1 && N <= 8);
24114    unsafe {
24115        simd_shuffle!(
24116            a,
24117            vrshrn_n_u16::<N>(b),
24118            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24119        )
24120    }
24121}
24122#[doc = "Rounding shift right narrow"]
24123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"]
24124#[inline(always)]
24125#[target_feature(enable = "neon")]
24126#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24127#[rustc_legacy_const_generics(2)]
24128#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24129pub fn vrshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
24130    static_assert!(N >= 1 && N <= 16);
24131    unsafe { simd_shuffle!(a, vrshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24132}
24133#[doc = "Rounding shift right narrow"]
24134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"]
24135#[inline(always)]
24136#[target_feature(enable = "neon")]
24137#[cfg_attr(test, assert_instr(rshrn2, N = 2))]
24138#[rustc_legacy_const_generics(2)]
24139#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24140pub fn vrshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
24141    static_assert!(N >= 1 && N <= 32);
24142    unsafe { simd_shuffle!(a, vrshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
24143}
24144#[doc = "Reciprocal square-root estimate."]
24145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"]
24146#[inline(always)]
24147#[target_feature(enable = "neon")]
24148#[cfg_attr(test, assert_instr(frsqrte))]
24149#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24150pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t {
24151    unsafe extern "unadjusted" {
24152        #[cfg_attr(
24153            any(target_arch = "aarch64", target_arch = "arm64ec"),
24154            link_name = "llvm.aarch64.neon.frsqrte.v1f64"
24155        )]
24156        fn _vrsqrte_f64(a: float64x1_t) -> float64x1_t;
24157    }
24158    unsafe { _vrsqrte_f64(a) }
24159}
24160#[doc = "Reciprocal square-root estimate."]
24161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"]
24162#[inline(always)]
24163#[target_feature(enable = "neon")]
24164#[cfg_attr(test, assert_instr(frsqrte))]
24165#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24166pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t {
24167    unsafe extern "unadjusted" {
24168        #[cfg_attr(
24169            any(target_arch = "aarch64", target_arch = "arm64ec"),
24170            link_name = "llvm.aarch64.neon.frsqrte.v2f64"
24171        )]
24172        fn _vrsqrteq_f64(a: float64x2_t) -> float64x2_t;
24173    }
24174    unsafe { _vrsqrteq_f64(a) }
24175}
24176#[doc = "Reciprocal square-root estimate."]
24177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"]
24178#[inline(always)]
24179#[target_feature(enable = "neon")]
24180#[cfg_attr(test, assert_instr(frsqrte))]
24181#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24182pub fn vrsqrted_f64(a: f64) -> f64 {
24183    unsafe extern "unadjusted" {
24184        #[cfg_attr(
24185            any(target_arch = "aarch64", target_arch = "arm64ec"),
24186            link_name = "llvm.aarch64.neon.frsqrte.f64"
24187        )]
24188        fn _vrsqrted_f64(a: f64) -> f64;
24189    }
24190    unsafe { _vrsqrted_f64(a) }
24191}
24192#[doc = "Reciprocal square-root estimate."]
24193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"]
24194#[inline(always)]
24195#[target_feature(enable = "neon")]
24196#[cfg_attr(test, assert_instr(frsqrte))]
24197#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24198pub fn vrsqrtes_f32(a: f32) -> f32 {
24199    unsafe extern "unadjusted" {
24200        #[cfg_attr(
24201            any(target_arch = "aarch64", target_arch = "arm64ec"),
24202            link_name = "llvm.aarch64.neon.frsqrte.f32"
24203        )]
24204        fn _vrsqrtes_f32(a: f32) -> f32;
24205    }
24206    unsafe { _vrsqrtes_f32(a) }
24207}
24208#[doc = "Reciprocal square-root estimate."]
24209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"]
24210#[inline(always)]
24211#[cfg_attr(test, assert_instr(frsqrte))]
24212#[target_feature(enable = "neon,fp16")]
24213#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24214#[cfg(not(target_arch = "arm64ec"))]
24215pub fn vrsqrteh_f16(a: f16) -> f16 {
24216    unsafe extern "unadjusted" {
24217        #[cfg_attr(
24218            any(target_arch = "aarch64", target_arch = "arm64ec"),
24219            link_name = "llvm.aarch64.neon.frsqrte.f16"
24220        )]
24221        fn _vrsqrteh_f16(a: f16) -> f16;
24222    }
24223    unsafe { _vrsqrteh_f16(a) }
24224}
24225#[doc = "Floating-point reciprocal square root step"]
24226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"]
24227#[inline(always)]
24228#[target_feature(enable = "neon")]
24229#[cfg_attr(test, assert_instr(frsqrts))]
24230#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24231pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
24232    unsafe extern "unadjusted" {
24233        #[cfg_attr(
24234            any(target_arch = "aarch64", target_arch = "arm64ec"),
24235            link_name = "llvm.aarch64.neon.frsqrts.v1f64"
24236        )]
24237        fn _vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t;
24238    }
24239    unsafe { _vrsqrts_f64(a, b) }
24240}
24241#[doc = "Floating-point reciprocal square root step"]
24242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"]
24243#[inline(always)]
24244#[target_feature(enable = "neon")]
24245#[cfg_attr(test, assert_instr(frsqrts))]
24246#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24247pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
24248    unsafe extern "unadjusted" {
24249        #[cfg_attr(
24250            any(target_arch = "aarch64", target_arch = "arm64ec"),
24251            link_name = "llvm.aarch64.neon.frsqrts.v2f64"
24252        )]
24253        fn _vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t;
24254    }
24255    unsafe { _vrsqrtsq_f64(a, b) }
24256}
24257#[doc = "Floating-point reciprocal square root step"]
24258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"]
24259#[inline(always)]
24260#[target_feature(enable = "neon")]
24261#[cfg_attr(test, assert_instr(frsqrts))]
24262#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24263pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 {
24264    unsafe extern "unadjusted" {
24265        #[cfg_attr(
24266            any(target_arch = "aarch64", target_arch = "arm64ec"),
24267            link_name = "llvm.aarch64.neon.frsqrts.f64"
24268        )]
24269        fn _vrsqrtsd_f64(a: f64, b: f64) -> f64;
24270    }
24271    unsafe { _vrsqrtsd_f64(a, b) }
24272}
24273#[doc = "Floating-point reciprocal square root step"]
24274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"]
24275#[inline(always)]
24276#[target_feature(enable = "neon")]
24277#[cfg_attr(test, assert_instr(frsqrts))]
24278#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24279pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 {
24280    unsafe extern "unadjusted" {
24281        #[cfg_attr(
24282            any(target_arch = "aarch64", target_arch = "arm64ec"),
24283            link_name = "llvm.aarch64.neon.frsqrts.f32"
24284        )]
24285        fn _vrsqrtss_f32(a: f32, b: f32) -> f32;
24286    }
24287    unsafe { _vrsqrtss_f32(a, b) }
24288}
24289#[doc = "Floating-point reciprocal square root step"]
24290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"]
24291#[inline(always)]
24292#[target_feature(enable = "neon,fp16")]
24293#[cfg_attr(test, assert_instr(frsqrts))]
24294#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
24295#[cfg(not(target_arch = "arm64ec"))]
24296pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 {
24297    unsafe extern "unadjusted" {
24298        #[cfg_attr(
24299            any(target_arch = "aarch64", target_arch = "arm64ec"),
24300            link_name = "llvm.aarch64.neon.frsqrts.f16"
24301        )]
24302        fn _vrsqrtsh_f16(a: f16, b: f16) -> f16;
24303    }
24304    unsafe { _vrsqrtsh_f16(a, b) }
24305}
24306#[doc = "Signed rounding shift right and accumulate."]
24307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"]
24308#[inline(always)]
24309#[target_feature(enable = "neon")]
24310#[cfg_attr(test, assert_instr(srshr, N = 2))]
24311#[rustc_legacy_const_generics(2)]
24312#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24313pub fn vrsrad_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
24314    static_assert!(N >= 1 && N <= 64);
24315    let b: i64 = vrshrd_n_s64::<N>(b);
24316    a.wrapping_add(b)
24317}
24318#[doc = "Unsigned rounding shift right and accumulate."]
24319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"]
24320#[inline(always)]
24321#[target_feature(enable = "neon")]
24322#[cfg_attr(test, assert_instr(urshr, N = 2))]
24323#[rustc_legacy_const_generics(2)]
24324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24325pub fn vrsrad_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
24326    static_assert!(N >= 1 && N <= 64);
24327    let b: u64 = vrshrd_n_u64::<N>(b);
24328    a.wrapping_add(b)
24329}
24330#[doc = "Rounding subtract returning high narrow"]
24331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
24332#[inline(always)]
24333#[target_feature(enable = "neon")]
24334#[cfg(target_endian = "little")]
24335#[cfg_attr(test, assert_instr(rsubhn2))]
24336#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24337pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
24338    let x: int8x8_t = vrsubhn_s16(b, c);
24339    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24340}
24341#[doc = "Rounding subtract returning high narrow"]
24342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
24343#[inline(always)]
24344#[target_feature(enable = "neon")]
24345#[cfg(target_endian = "little")]
24346#[cfg_attr(test, assert_instr(rsubhn2))]
24347#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24348pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
24349    let x: int16x4_t = vrsubhn_s32(b, c);
24350    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24351}
24352#[doc = "Rounding subtract returning high narrow"]
24353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
24354#[inline(always)]
24355#[target_feature(enable = "neon")]
24356#[cfg(target_endian = "little")]
24357#[cfg_attr(test, assert_instr(rsubhn2))]
24358#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24359pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
24360    let x: int32x2_t = vrsubhn_s64(b, c);
24361    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24362}
24363#[doc = "Rounding subtract returning high narrow"]
24364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
24365#[inline(always)]
24366#[target_feature(enable = "neon")]
24367#[cfg(target_endian = "little")]
24368#[cfg_attr(test, assert_instr(rsubhn2))]
24369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24370pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
24371    let x: uint8x8_t = vrsubhn_u16(b, c);
24372    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24373}
24374#[doc = "Rounding subtract returning high narrow"]
24375#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
24376#[inline(always)]
24377#[target_feature(enable = "neon")]
24378#[cfg(target_endian = "little")]
24379#[cfg_attr(test, assert_instr(rsubhn2))]
24380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24381pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
24382    let x: uint16x4_t = vrsubhn_u32(b, c);
24383    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24384}
24385#[doc = "Rounding subtract returning high narrow"]
24386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
24387#[inline(always)]
24388#[target_feature(enable = "neon")]
24389#[cfg(target_endian = "little")]
24390#[cfg_attr(test, assert_instr(rsubhn2))]
24391#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24392pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
24393    let x: uint32x2_t = vrsubhn_u64(b, c);
24394    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24395}
24396#[doc = "Rounding subtract returning high narrow"]
24397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"]
24398#[inline(always)]
24399#[target_feature(enable = "neon")]
24400#[cfg(target_endian = "big")]
24401#[cfg_attr(test, assert_instr(rsubhn))]
24402#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24403pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t {
24404    let x: int8x8_t = vrsubhn_s16(b, c);
24405    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24406}
24407#[doc = "Rounding subtract returning high narrow"]
24408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"]
24409#[inline(always)]
24410#[target_feature(enable = "neon")]
24411#[cfg(target_endian = "big")]
24412#[cfg_attr(test, assert_instr(rsubhn))]
24413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24414pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t {
24415    let x: int16x4_t = vrsubhn_s32(b, c);
24416    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24417}
24418#[doc = "Rounding subtract returning high narrow"]
24419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"]
24420#[inline(always)]
24421#[target_feature(enable = "neon")]
24422#[cfg(target_endian = "big")]
24423#[cfg_attr(test, assert_instr(rsubhn))]
24424#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24425pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t {
24426    let x: int32x2_t = vrsubhn_s64(b, c);
24427    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24428}
24429#[doc = "Rounding subtract returning high narrow"]
24430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"]
24431#[inline(always)]
24432#[target_feature(enable = "neon")]
24433#[cfg(target_endian = "big")]
24434#[cfg_attr(test, assert_instr(rsubhn))]
24435#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24436pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t {
24437    let x: uint8x8_t = vrsubhn_u16(b, c);
24438    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }
24439}
24440#[doc = "Rounding subtract returning high narrow"]
24441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"]
24442#[inline(always)]
24443#[target_feature(enable = "neon")]
24444#[cfg(target_endian = "big")]
24445#[cfg_attr(test, assert_instr(rsubhn))]
24446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24447pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t {
24448    let x: uint16x4_t = vrsubhn_u32(b, c);
24449    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3, 4, 5, 6, 7]) }
24450}
24451#[doc = "Rounding subtract returning high narrow"]
24452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"]
24453#[inline(always)]
24454#[target_feature(enable = "neon")]
24455#[cfg(target_endian = "big")]
24456#[cfg_attr(test, assert_instr(rsubhn))]
24457#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24458pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t {
24459    let x: uint32x2_t = vrsubhn_u64(b, c);
24460    unsafe { simd_shuffle!(a, x, [0, 1, 2, 3]) }
24461}
24462#[doc = "Multi-vector floating-point adjust exponent"]
24463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f16)"]
24464#[inline(always)]
24465#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
24466#[target_feature(enable = "neon,fp8")]
24467#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
24468pub fn vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t {
24469    unsafe extern "unadjusted" {
24470        #[cfg_attr(
24471            any(target_arch = "aarch64", target_arch = "arm64ec"),
24472            link_name = "llvm.aarch64.neon.fp8.fscale.v4f16"
24473        )]
24474        fn _vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t;
24475    }
24476    unsafe { _vscale_f16(vn, vm) }
24477}
24478#[doc = "Multi-vector floating-point adjust exponent"]
24479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f16)"]
24480#[inline(always)]
24481#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
24482#[target_feature(enable = "neon,fp8")]
24483#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
24484pub fn vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t {
24485    unsafe extern "unadjusted" {
24486        #[cfg_attr(
24487            any(target_arch = "aarch64", target_arch = "arm64ec"),
24488            link_name = "llvm.aarch64.neon.fp8.fscale.v8f16"
24489        )]
24490        fn _vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t;
24491    }
24492    unsafe { _vscaleq_f16(vn, vm) }
24493}
24494#[doc = "Multi-vector floating-point adjust exponent"]
24495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f32)"]
24496#[inline(always)]
24497#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
24498#[target_feature(enable = "neon,fp8")]
24499#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
24500pub fn vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t {
24501    unsafe extern "unadjusted" {
24502        #[cfg_attr(
24503            any(target_arch = "aarch64", target_arch = "arm64ec"),
24504            link_name = "llvm.aarch64.neon.fp8.fscale.v2f32"
24505        )]
24506        fn _vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t;
24507    }
24508    unsafe { _vscale_f32(vn, vm) }
24509}
24510#[doc = "Multi-vector floating-point adjust exponent"]
24511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f32)"]
24512#[inline(always)]
24513#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
24514#[target_feature(enable = "neon,fp8")]
24515#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
24516pub fn vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t {
24517    unsafe extern "unadjusted" {
24518        #[cfg_attr(
24519            any(target_arch = "aarch64", target_arch = "arm64ec"),
24520            link_name = "llvm.aarch64.neon.fp8.fscale.v4f32"
24521        )]
24522        fn _vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t;
24523    }
24524    unsafe { _vscaleq_f32(vn, vm) }
24525}
24526#[doc = "Multi-vector floating-point adjust exponent"]
24527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f64)"]
24528#[inline(always)]
24529#[unstable(feature = "stdarch_neon_fp8", issue = "none")]
24530#[target_feature(enable = "neon,fp8")]
24531#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))]
24532pub fn vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t {
24533    unsafe extern "unadjusted" {
24534        #[cfg_attr(
24535            any(target_arch = "aarch64", target_arch = "arm64ec"),
24536            link_name = "llvm.aarch64.neon.fp8.fscale.v2f64"
24537        )]
24538        fn _vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t;
24539    }
24540    unsafe { _vscaleq_f64(vn, vm) }
24541}
24542#[doc = "Insert vector element from another vector element"]
24543#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"]
24544#[inline(always)]
24545#[target_feature(enable = "neon")]
24546#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24547#[rustc_legacy_const_generics(2)]
24548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24549pub fn vset_lane_f64<const LANE: i32>(a: f64, b: float64x1_t) -> float64x1_t {
24550    static_assert!(LANE == 0);
24551    unsafe { simd_insert!(b, LANE as u32, a) }
24552}
24553#[doc = "Insert vector element from another vector element"]
24554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"]
24555#[inline(always)]
24556#[target_feature(enable = "neon")]
24557#[cfg_attr(test, assert_instr(nop, LANE = 0))]
24558#[rustc_legacy_const_generics(2)]
24559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24560pub fn vsetq_lane_f64<const LANE: i32>(a: f64, b: float64x2_t) -> float64x2_t {
24561    static_assert_uimm_bits!(LANE, 1);
24562    unsafe { simd_insert!(b, LANE as u32, a) }
24563}
24564#[doc = "SHA512 hash update part 2"]
24565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"]
24566#[inline(always)]
24567#[target_feature(enable = "neon,sha3")]
24568#[cfg_attr(test, assert_instr(sha512h2))]
24569#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24570pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24571    unsafe extern "unadjusted" {
24572        #[cfg_attr(
24573            any(target_arch = "aarch64", target_arch = "arm64ec"),
24574            link_name = "llvm.aarch64.crypto.sha512h2"
24575        )]
24576        fn _vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24577    }
24578    unsafe { _vsha512h2q_u64(a, b, c) }
24579}
24580#[doc = "SHA512 hash update part 1"]
24581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"]
24582#[inline(always)]
24583#[target_feature(enable = "neon,sha3")]
24584#[cfg_attr(test, assert_instr(sha512h))]
24585#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24586pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24587    unsafe extern "unadjusted" {
24588        #[cfg_attr(
24589            any(target_arch = "aarch64", target_arch = "arm64ec"),
24590            link_name = "llvm.aarch64.crypto.sha512h"
24591        )]
24592        fn _vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24593    }
24594    unsafe { _vsha512hq_u64(a, b, c) }
24595}
24596#[doc = "SHA512 schedule update 0"]
24597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"]
24598#[inline(always)]
24599#[target_feature(enable = "neon,sha3")]
24600#[cfg_attr(test, assert_instr(sha512su0))]
24601#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24602pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
24603    unsafe extern "unadjusted" {
24604        #[cfg_attr(
24605            any(target_arch = "aarch64", target_arch = "arm64ec"),
24606            link_name = "llvm.aarch64.crypto.sha512su0"
24607        )]
24608        fn _vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t;
24609    }
24610    unsafe { _vsha512su0q_u64(a, b) }
24611}
24612#[doc = "SHA512 schedule update 1"]
24613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"]
24614#[inline(always)]
24615#[target_feature(enable = "neon,sha3")]
24616#[cfg_attr(test, assert_instr(sha512su1))]
24617#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
24618pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t {
24619    unsafe extern "unadjusted" {
24620        #[cfg_attr(
24621            any(target_arch = "aarch64", target_arch = "arm64ec"),
24622            link_name = "llvm.aarch64.crypto.sha512su1"
24623        )]
24624        fn _vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t;
24625    }
24626    unsafe { _vsha512su1q_u64(a, b, c) }
24627}
24628#[doc = "Signed Shift left"]
24629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"]
24630#[inline(always)]
24631#[target_feature(enable = "neon")]
24632#[cfg_attr(test, assert_instr(sshl))]
24633#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24634pub fn vshld_s64(a: i64, b: i64) -> i64 {
24635    unsafe { transmute(vshl_s64(transmute(a), transmute(b))) }
24636}
24637#[doc = "Unsigned Shift left"]
24638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"]
24639#[inline(always)]
24640#[target_feature(enable = "neon")]
24641#[cfg_attr(test, assert_instr(ushl))]
24642#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24643pub fn vshld_u64(a: u64, b: i64) -> u64 {
24644    unsafe { transmute(vshl_u64(transmute(a), transmute(b))) }
24645}
24646#[doc = "Signed shift left long"]
24647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"]
24648#[inline(always)]
24649#[target_feature(enable = "neon")]
24650#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24651#[rustc_legacy_const_generics(1)]
24652#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24653pub fn vshll_high_n_s8<const N: i32>(a: int8x16_t) -> int16x8_t {
24654    static_assert!(N >= 0 && N <= 8);
24655    unsafe {
24656        let b: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
24657        vshll_n_s8::<N>(b)
24658    }
24659}
24660#[doc = "Signed shift left long"]
24661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"]
24662#[inline(always)]
24663#[target_feature(enable = "neon")]
24664#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24665#[rustc_legacy_const_generics(1)]
24666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24667pub fn vshll_high_n_s16<const N: i32>(a: int16x8_t) -> int32x4_t {
24668    static_assert!(N >= 0 && N <= 16);
24669    unsafe {
24670        let b: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
24671        vshll_n_s16::<N>(b)
24672    }
24673}
24674#[doc = "Signed shift left long"]
24675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"]
24676#[inline(always)]
24677#[target_feature(enable = "neon")]
24678#[cfg_attr(test, assert_instr(sshll2, N = 2))]
24679#[rustc_legacy_const_generics(1)]
24680#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24681pub fn vshll_high_n_s32<const N: i32>(a: int32x4_t) -> int64x2_t {
24682    static_assert!(N >= 0 && N <= 32);
24683    unsafe {
24684        let b: int32x2_t = simd_shuffle!(a, a, [2, 3]);
24685        vshll_n_s32::<N>(b)
24686    }
24687}
24688#[doc = "Signed shift left long"]
24689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"]
24690#[inline(always)]
24691#[target_feature(enable = "neon")]
24692#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24693#[rustc_legacy_const_generics(1)]
24694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24695pub fn vshll_high_n_u8<const N: i32>(a: uint8x16_t) -> uint16x8_t {
24696    static_assert!(N >= 0 && N <= 8);
24697    unsafe {
24698        let b: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
24699        vshll_n_u8::<N>(b)
24700    }
24701}
24702#[doc = "Signed shift left long"]
24703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"]
24704#[inline(always)]
24705#[target_feature(enable = "neon")]
24706#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24707#[rustc_legacy_const_generics(1)]
24708#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24709pub fn vshll_high_n_u16<const N: i32>(a: uint16x8_t) -> uint32x4_t {
24710    static_assert!(N >= 0 && N <= 16);
24711    unsafe {
24712        let b: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
24713        vshll_n_u16::<N>(b)
24714    }
24715}
24716#[doc = "Signed shift left long"]
24717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"]
24718#[inline(always)]
24719#[target_feature(enable = "neon")]
24720#[cfg_attr(test, assert_instr(ushll2, N = 2))]
24721#[rustc_legacy_const_generics(1)]
24722#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24723pub fn vshll_high_n_u32<const N: i32>(a: uint32x4_t) -> uint64x2_t {
24724    static_assert!(N >= 0 && N <= 32);
24725    unsafe {
24726        let b: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
24727        vshll_n_u32::<N>(b)
24728    }
24729}
24730#[doc = "Shift right narrow"]
24731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"]
24732#[inline(always)]
24733#[target_feature(enable = "neon")]
24734#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24735#[rustc_legacy_const_generics(2)]
24736#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24737pub fn vshrn_high_n_s16<const N: i32>(a: int8x8_t, b: int16x8_t) -> int8x16_t {
24738    static_assert!(N >= 1 && N <= 8);
24739    unsafe {
24740        simd_shuffle!(
24741            a,
24742            vshrn_n_s16::<N>(b),
24743            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24744        )
24745    }
24746}
24747#[doc = "Shift right narrow"]
24748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"]
24749#[inline(always)]
24750#[target_feature(enable = "neon")]
24751#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24752#[rustc_legacy_const_generics(2)]
24753#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24754pub fn vshrn_high_n_s32<const N: i32>(a: int16x4_t, b: int32x4_t) -> int16x8_t {
24755    static_assert!(N >= 1 && N <= 16);
24756    unsafe { simd_shuffle!(a, vshrn_n_s32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24757}
24758#[doc = "Shift right narrow"]
24759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"]
24760#[inline(always)]
24761#[target_feature(enable = "neon")]
24762#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24763#[rustc_legacy_const_generics(2)]
24764#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24765pub fn vshrn_high_n_s64<const N: i32>(a: int32x2_t, b: int64x2_t) -> int32x4_t {
24766    static_assert!(N >= 1 && N <= 32);
24767    unsafe { simd_shuffle!(a, vshrn_n_s64::<N>(b), [0, 1, 2, 3]) }
24768}
24769#[doc = "Shift right narrow"]
24770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"]
24771#[inline(always)]
24772#[target_feature(enable = "neon")]
24773#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24774#[rustc_legacy_const_generics(2)]
24775#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24776pub fn vshrn_high_n_u16<const N: i32>(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t {
24777    static_assert!(N >= 1 && N <= 8);
24778    unsafe {
24779        simd_shuffle!(
24780            a,
24781            vshrn_n_u16::<N>(b),
24782            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
24783        )
24784    }
24785}
24786#[doc = "Shift right narrow"]
24787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"]
24788#[inline(always)]
24789#[target_feature(enable = "neon")]
24790#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24791#[rustc_legacy_const_generics(2)]
24792#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24793pub fn vshrn_high_n_u32<const N: i32>(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t {
24794    static_assert!(N >= 1 && N <= 16);
24795    unsafe { simd_shuffle!(a, vshrn_n_u32::<N>(b), [0, 1, 2, 3, 4, 5, 6, 7]) }
24796}
24797#[doc = "Shift right narrow"]
24798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"]
24799#[inline(always)]
24800#[target_feature(enable = "neon")]
24801#[cfg_attr(test, assert_instr(shrn2, N = 2))]
24802#[rustc_legacy_const_generics(2)]
24803#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24804pub fn vshrn_high_n_u64<const N: i32>(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t {
24805    static_assert!(N >= 1 && N <= 32);
24806    unsafe { simd_shuffle!(a, vshrn_n_u64::<N>(b), [0, 1, 2, 3]) }
24807}
24808#[doc = "Shift Left and Insert (immediate)"]
24809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"]
24810#[inline(always)]
24811#[target_feature(enable = "neon")]
24812#[cfg_attr(test, assert_instr(sli, N = 1))]
24813#[rustc_legacy_const_generics(2)]
24814#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24815pub fn vsli_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
24816    static_assert_uimm_bits!(N, 3);
24817    unsafe extern "unadjusted" {
24818        #[cfg_attr(
24819            any(target_arch = "aarch64", target_arch = "arm64ec"),
24820            link_name = "llvm.aarch64.neon.vsli.v8i8"
24821        )]
24822        fn _vsli_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
24823    }
24824    unsafe { _vsli_n_s8(a, b, N) }
24825}
24826#[doc = "Shift Left and Insert (immediate)"]
24827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"]
24828#[inline(always)]
24829#[target_feature(enable = "neon")]
24830#[cfg_attr(test, assert_instr(sli, N = 1))]
24831#[rustc_legacy_const_generics(2)]
24832#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24833pub fn vsliq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
24834    static_assert_uimm_bits!(N, 3);
24835    unsafe extern "unadjusted" {
24836        #[cfg_attr(
24837            any(target_arch = "aarch64", target_arch = "arm64ec"),
24838            link_name = "llvm.aarch64.neon.vsli.v16i8"
24839        )]
24840        fn _vsliq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
24841    }
24842    unsafe { _vsliq_n_s8(a, b, N) }
24843}
24844#[doc = "Shift Left and Insert (immediate)"]
24845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"]
24846#[inline(always)]
24847#[target_feature(enable = "neon")]
24848#[cfg_attr(test, assert_instr(sli, N = 1))]
24849#[rustc_legacy_const_generics(2)]
24850#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24851pub fn vsli_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
24852    static_assert_uimm_bits!(N, 4);
24853    unsafe extern "unadjusted" {
24854        #[cfg_attr(
24855            any(target_arch = "aarch64", target_arch = "arm64ec"),
24856            link_name = "llvm.aarch64.neon.vsli.v4i16"
24857        )]
24858        fn _vsli_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
24859    }
24860    unsafe { _vsli_n_s16(a, b, N) }
24861}
24862#[doc = "Shift Left and Insert (immediate)"]
24863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"]
24864#[inline(always)]
24865#[target_feature(enable = "neon")]
24866#[cfg_attr(test, assert_instr(sli, N = 1))]
24867#[rustc_legacy_const_generics(2)]
24868#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24869pub fn vsliq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
24870    static_assert_uimm_bits!(N, 4);
24871    unsafe extern "unadjusted" {
24872        #[cfg_attr(
24873            any(target_arch = "aarch64", target_arch = "arm64ec"),
24874            link_name = "llvm.aarch64.neon.vsli.v8i16"
24875        )]
24876        fn _vsliq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
24877    }
24878    unsafe { _vsliq_n_s16(a, b, N) }
24879}
24880#[doc = "Shift Left and Insert (immediate)"]
24881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"]
24882#[inline(always)]
24883#[target_feature(enable = "neon")]
24884#[cfg_attr(test, assert_instr(sli, N = 1))]
24885#[rustc_legacy_const_generics(2)]
24886#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24887pub fn vsli_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
24888    static_assert!(N >= 0 && N <= 31);
24889    unsafe extern "unadjusted" {
24890        #[cfg_attr(
24891            any(target_arch = "aarch64", target_arch = "arm64ec"),
24892            link_name = "llvm.aarch64.neon.vsli.v2i32"
24893        )]
24894        fn _vsli_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
24895    }
24896    unsafe { _vsli_n_s32(a, b, N) }
24897}
24898#[doc = "Shift Left and Insert (immediate)"]
24899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"]
24900#[inline(always)]
24901#[target_feature(enable = "neon")]
24902#[cfg_attr(test, assert_instr(sli, N = 1))]
24903#[rustc_legacy_const_generics(2)]
24904#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24905pub fn vsliq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
24906    static_assert!(N >= 0 && N <= 31);
24907    unsafe extern "unadjusted" {
24908        #[cfg_attr(
24909            any(target_arch = "aarch64", target_arch = "arm64ec"),
24910            link_name = "llvm.aarch64.neon.vsli.v4i32"
24911        )]
24912        fn _vsliq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
24913    }
24914    unsafe { _vsliq_n_s32(a, b, N) }
24915}
24916#[doc = "Shift Left and Insert (immediate)"]
24917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"]
24918#[inline(always)]
24919#[target_feature(enable = "neon")]
24920#[cfg_attr(test, assert_instr(sli, N = 1))]
24921#[rustc_legacy_const_generics(2)]
24922#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24923pub fn vsli_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
24924    static_assert!(N >= 0 && N <= 63);
24925    unsafe extern "unadjusted" {
24926        #[cfg_attr(
24927            any(target_arch = "aarch64", target_arch = "arm64ec"),
24928            link_name = "llvm.aarch64.neon.vsli.v1i64"
24929        )]
24930        fn _vsli_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
24931    }
24932    unsafe { _vsli_n_s64(a, b, N) }
24933}
24934#[doc = "Shift Left and Insert (immediate)"]
24935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"]
24936#[inline(always)]
24937#[target_feature(enable = "neon")]
24938#[cfg_attr(test, assert_instr(sli, N = 1))]
24939#[rustc_legacy_const_generics(2)]
24940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24941pub fn vsliq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
24942    static_assert!(N >= 0 && N <= 63);
24943    unsafe extern "unadjusted" {
24944        #[cfg_attr(
24945            any(target_arch = "aarch64", target_arch = "arm64ec"),
24946            link_name = "llvm.aarch64.neon.vsli.v2i64"
24947        )]
24948        fn _vsliq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
24949    }
24950    unsafe { _vsliq_n_s64(a, b, N) }
24951}
24952#[doc = "Shift Left and Insert (immediate)"]
24953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"]
24954#[inline(always)]
24955#[target_feature(enable = "neon")]
24956#[cfg_attr(test, assert_instr(sli, N = 1))]
24957#[rustc_legacy_const_generics(2)]
24958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24959pub fn vsli_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
24960    static_assert_uimm_bits!(N, 3);
24961    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
24962}
24963#[doc = "Shift Left and Insert (immediate)"]
24964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"]
24965#[inline(always)]
24966#[target_feature(enable = "neon")]
24967#[cfg_attr(test, assert_instr(sli, N = 1))]
24968#[rustc_legacy_const_generics(2)]
24969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24970pub fn vsliq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
24971    static_assert_uimm_bits!(N, 3);
24972    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
24973}
24974#[doc = "Shift Left and Insert (immediate)"]
24975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"]
24976#[inline(always)]
24977#[target_feature(enable = "neon")]
24978#[cfg_attr(test, assert_instr(sli, N = 1))]
24979#[rustc_legacy_const_generics(2)]
24980#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24981pub fn vsli_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
24982    static_assert_uimm_bits!(N, 4);
24983    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
24984}
24985#[doc = "Shift Left and Insert (immediate)"]
24986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"]
24987#[inline(always)]
24988#[target_feature(enable = "neon")]
24989#[cfg_attr(test, assert_instr(sli, N = 1))]
24990#[rustc_legacy_const_generics(2)]
24991#[stable(feature = "neon_intrinsics", since = "1.59.0")]
24992pub fn vsliq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
24993    static_assert_uimm_bits!(N, 4);
24994    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
24995}
24996#[doc = "Shift Left and Insert (immediate)"]
24997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"]
24998#[inline(always)]
24999#[target_feature(enable = "neon")]
25000#[cfg_attr(test, assert_instr(sli, N = 1))]
25001#[rustc_legacy_const_generics(2)]
25002#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25003pub fn vsli_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
25004    static_assert!(N >= 0 && N <= 31);
25005    unsafe { transmute(vsli_n_s32::<N>(transmute(a), transmute(b))) }
25006}
25007#[doc = "Shift Left and Insert (immediate)"]
25008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"]
25009#[inline(always)]
25010#[target_feature(enable = "neon")]
25011#[cfg_attr(test, assert_instr(sli, N = 1))]
25012#[rustc_legacy_const_generics(2)]
25013#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25014pub fn vsliq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25015    static_assert!(N >= 0 && N <= 31);
25016    unsafe { transmute(vsliq_n_s32::<N>(transmute(a), transmute(b))) }
25017}
25018#[doc = "Shift Left and Insert (immediate)"]
25019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"]
25020#[inline(always)]
25021#[target_feature(enable = "neon")]
25022#[cfg_attr(test, assert_instr(sli, N = 1))]
25023#[rustc_legacy_const_generics(2)]
25024#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25025pub fn vsli_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
25026    static_assert!(N >= 0 && N <= 63);
25027    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
25028}
25029#[doc = "Shift Left and Insert (immediate)"]
25030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"]
25031#[inline(always)]
25032#[target_feature(enable = "neon")]
25033#[cfg_attr(test, assert_instr(sli, N = 1))]
25034#[rustc_legacy_const_generics(2)]
25035#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25036pub fn vsliq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
25037    static_assert!(N >= 0 && N <= 63);
25038    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
25039}
25040#[doc = "Shift Left and Insert (immediate)"]
25041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"]
25042#[inline(always)]
25043#[target_feature(enable = "neon")]
25044#[cfg_attr(test, assert_instr(sli, N = 1))]
25045#[rustc_legacy_const_generics(2)]
25046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25047pub fn vsli_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
25048    static_assert_uimm_bits!(N, 3);
25049    unsafe { transmute(vsli_n_s8::<N>(transmute(a), transmute(b))) }
25050}
25051#[doc = "Shift Left and Insert (immediate)"]
25052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"]
25053#[inline(always)]
25054#[target_feature(enable = "neon")]
25055#[cfg_attr(test, assert_instr(sli, N = 1))]
25056#[rustc_legacy_const_generics(2)]
25057#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25058pub fn vsliq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
25059    static_assert_uimm_bits!(N, 3);
25060    unsafe { transmute(vsliq_n_s8::<N>(transmute(a), transmute(b))) }
25061}
25062#[doc = "Shift Left and Insert (immediate)"]
25063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"]
25064#[inline(always)]
25065#[target_feature(enable = "neon")]
25066#[cfg_attr(test, assert_instr(sli, N = 1))]
25067#[rustc_legacy_const_generics(2)]
25068#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25069pub fn vsli_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
25070    static_assert_uimm_bits!(N, 4);
25071    unsafe { transmute(vsli_n_s16::<N>(transmute(a), transmute(b))) }
25072}
25073#[doc = "Shift Left and Insert (immediate)"]
25074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"]
25075#[inline(always)]
25076#[target_feature(enable = "neon")]
25077#[cfg_attr(test, assert_instr(sli, N = 1))]
25078#[rustc_legacy_const_generics(2)]
25079#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25080pub fn vsliq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
25081    static_assert_uimm_bits!(N, 4);
25082    unsafe { transmute(vsliq_n_s16::<N>(transmute(a), transmute(b))) }
25083}
25084#[doc = "Shift Left and Insert (immediate)"]
25085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"]
25086#[inline(always)]
25087#[target_feature(enable = "neon,aes")]
25088#[cfg_attr(test, assert_instr(sli, N = 1))]
25089#[rustc_legacy_const_generics(2)]
25090#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25091pub fn vsli_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
25092    static_assert!(N >= 0 && N <= 63);
25093    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
25094}
25095#[doc = "Shift Left and Insert (immediate)"]
25096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"]
25097#[inline(always)]
25098#[target_feature(enable = "neon,aes")]
25099#[cfg_attr(test, assert_instr(sli, N = 1))]
25100#[rustc_legacy_const_generics(2)]
25101#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25102pub fn vsliq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
25103    static_assert!(N >= 0 && N <= 63);
25104    unsafe { transmute(vsliq_n_s64::<N>(transmute(a), transmute(b))) }
25105}
25106#[doc = "Shift left and insert"]
25107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"]
25108#[inline(always)]
25109#[target_feature(enable = "neon")]
25110#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25111#[rustc_legacy_const_generics(2)]
25112#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
25113pub fn vslid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
25114    static_assert!(N >= 0 && N <= 63);
25115    unsafe { transmute(vsli_n_s64::<N>(transmute(a), transmute(b))) }
25116}
25117#[doc = "Shift left and insert"]
25118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"]
25119#[inline(always)]
25120#[target_feature(enable = "neon")]
25121#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25122#[rustc_legacy_const_generics(2)]
25123#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sli, N = 2))]
25124pub fn vslid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
25125    static_assert!(N >= 0 && N <= 63);
25126    unsafe { transmute(vsli_n_u64::<N>(transmute(a), transmute(b))) }
25127}
25128#[doc = "SM3PARTW1"]
25129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"]
25130#[inline(always)]
25131#[target_feature(enable = "neon,sm4")]
25132#[cfg_attr(test, assert_instr(sm3partw1))]
25133#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25134pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25135    unsafe extern "unadjusted" {
25136        #[cfg_attr(
25137            any(target_arch = "aarch64", target_arch = "arm64ec"),
25138            link_name = "llvm.aarch64.crypto.sm3partw1"
25139        )]
25140        fn _vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
25141    }
25142    unsafe { _vsm3partw1q_u32(a, b, c) }
25143}
25144#[doc = "SM3PARTW2"]
25145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"]
25146#[inline(always)]
25147#[target_feature(enable = "neon,sm4")]
25148#[cfg_attr(test, assert_instr(sm3partw2))]
25149#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25150pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25151    unsafe extern "unadjusted" {
25152        #[cfg_attr(
25153            any(target_arch = "aarch64", target_arch = "arm64ec"),
25154            link_name = "llvm.aarch64.crypto.sm3partw2"
25155        )]
25156        fn _vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
25157    }
25158    unsafe { _vsm3partw2q_u32(a, b, c) }
25159}
25160#[doc = "SM3SS1"]
25161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"]
25162#[inline(always)]
25163#[target_feature(enable = "neon,sm4")]
25164#[cfg_attr(test, assert_instr(sm3ss1))]
25165#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25166pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25167    unsafe extern "unadjusted" {
25168        #[cfg_attr(
25169            any(target_arch = "aarch64", target_arch = "arm64ec"),
25170            link_name = "llvm.aarch64.crypto.sm3ss1"
25171        )]
25172        fn _vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t;
25173    }
25174    unsafe { _vsm3ss1q_u32(a, b, c) }
25175}
25176#[doc = "SM3TT1A"]
25177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"]
25178#[inline(always)]
25179#[target_feature(enable = "neon,sm4")]
25180#[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))]
25181#[rustc_legacy_const_generics(3)]
25182#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25183pub fn vsm3tt1aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25184    static_assert_uimm_bits!(IMM2, 2);
25185    unsafe extern "unadjusted" {
25186        #[cfg_attr(
25187            any(target_arch = "aarch64", target_arch = "arm64ec"),
25188            link_name = "llvm.aarch64.crypto.sm3tt1a"
25189        )]
25190        fn _vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25191    }
25192    unsafe { _vsm3tt1aq_u32(a, b, c, IMM2 as i64) }
25193}
25194#[doc = "SM3TT1B"]
25195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"]
25196#[inline(always)]
25197#[target_feature(enable = "neon,sm4")]
25198#[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))]
25199#[rustc_legacy_const_generics(3)]
25200#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25201pub fn vsm3tt1bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25202    static_assert_uimm_bits!(IMM2, 2);
25203    unsafe extern "unadjusted" {
25204        #[cfg_attr(
25205            any(target_arch = "aarch64", target_arch = "arm64ec"),
25206            link_name = "llvm.aarch64.crypto.sm3tt1b"
25207        )]
25208        fn _vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25209    }
25210    unsafe { _vsm3tt1bq_u32(a, b, c, IMM2 as i64) }
25211}
25212#[doc = "SM3TT2A"]
25213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"]
25214#[inline(always)]
25215#[target_feature(enable = "neon,sm4")]
25216#[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))]
25217#[rustc_legacy_const_generics(3)]
25218#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25219pub fn vsm3tt2aq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25220    static_assert_uimm_bits!(IMM2, 2);
25221    unsafe extern "unadjusted" {
25222        #[cfg_attr(
25223            any(target_arch = "aarch64", target_arch = "arm64ec"),
25224            link_name = "llvm.aarch64.crypto.sm3tt2a"
25225        )]
25226        fn _vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25227    }
25228    unsafe { _vsm3tt2aq_u32(a, b, c, IMM2 as i64) }
25229}
25230#[doc = "SM3TT2B"]
25231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"]
25232#[inline(always)]
25233#[target_feature(enable = "neon,sm4")]
25234#[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))]
25235#[rustc_legacy_const_generics(3)]
25236#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25237pub fn vsm3tt2bq_u32<const IMM2: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t {
25238    static_assert_uimm_bits!(IMM2, 2);
25239    unsafe extern "unadjusted" {
25240        #[cfg_attr(
25241            any(target_arch = "aarch64", target_arch = "arm64ec"),
25242            link_name = "llvm.aarch64.crypto.sm3tt2b"
25243        )]
25244        fn _vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t, n: i64) -> uint32x4_t;
25245    }
25246    unsafe { _vsm3tt2bq_u32(a, b, c, IMM2 as i64) }
25247}
25248#[doc = "SM4 key"]
25249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"]
25250#[inline(always)]
25251#[target_feature(enable = "neon,sm4")]
25252#[cfg_attr(test, assert_instr(sm4ekey))]
25253#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25254pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25255    unsafe extern "unadjusted" {
25256        #[cfg_attr(
25257            any(target_arch = "aarch64", target_arch = "arm64ec"),
25258            link_name = "llvm.aarch64.crypto.sm4ekey"
25259        )]
25260        fn _vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
25261    }
25262    unsafe { _vsm4ekeyq_u32(a, b) }
25263}
25264#[doc = "SM4 encode"]
25265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"]
25266#[inline(always)]
25267#[target_feature(enable = "neon,sm4")]
25268#[cfg_attr(test, assert_instr(sm4e))]
25269#[unstable(feature = "stdarch_neon_sm4", issue = "117226")]
25270pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25271    unsafe extern "unadjusted" {
25272        #[cfg_attr(
25273            any(target_arch = "aarch64", target_arch = "arm64ec"),
25274            link_name = "llvm.aarch64.crypto.sm4e"
25275        )]
25276        fn _vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t;
25277    }
25278    unsafe { _vsm4eq_u32(a, b) }
25279}
25280#[doc = "Unsigned saturating Accumulate of Signed value."]
25281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"]
25282#[inline(always)]
25283#[target_feature(enable = "neon")]
25284#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25285#[cfg_attr(test, assert_instr(usqadd))]
25286pub fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t {
25287    unsafe extern "unadjusted" {
25288        #[cfg_attr(
25289            any(target_arch = "aarch64", target_arch = "arm64ec"),
25290            link_name = "llvm.aarch64.neon.usqadd.v8i8"
25291        )]
25292        fn _vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t;
25293    }
25294    unsafe { _vsqadd_u8(a, b) }
25295}
25296#[doc = "Unsigned saturating Accumulate of Signed value."]
25297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"]
25298#[inline(always)]
25299#[target_feature(enable = "neon")]
25300#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25301#[cfg_attr(test, assert_instr(usqadd))]
25302pub fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t {
25303    unsafe extern "unadjusted" {
25304        #[cfg_attr(
25305            any(target_arch = "aarch64", target_arch = "arm64ec"),
25306            link_name = "llvm.aarch64.neon.usqadd.v16i8"
25307        )]
25308        fn _vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t;
25309    }
25310    unsafe { _vsqaddq_u8(a, b) }
25311}
25312#[doc = "Unsigned saturating Accumulate of Signed value."]
25313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"]
25314#[inline(always)]
25315#[target_feature(enable = "neon")]
25316#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25317#[cfg_attr(test, assert_instr(usqadd))]
25318pub fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t {
25319    unsafe extern "unadjusted" {
25320        #[cfg_attr(
25321            any(target_arch = "aarch64", target_arch = "arm64ec"),
25322            link_name = "llvm.aarch64.neon.usqadd.v4i16"
25323        )]
25324        fn _vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t;
25325    }
25326    unsafe { _vsqadd_u16(a, b) }
25327}
25328#[doc = "Unsigned saturating Accumulate of Signed value."]
25329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"]
25330#[inline(always)]
25331#[target_feature(enable = "neon")]
25332#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25333#[cfg_attr(test, assert_instr(usqadd))]
25334pub fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t {
25335    unsafe extern "unadjusted" {
25336        #[cfg_attr(
25337            any(target_arch = "aarch64", target_arch = "arm64ec"),
25338            link_name = "llvm.aarch64.neon.usqadd.v8i16"
25339        )]
25340        fn _vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t;
25341    }
25342    unsafe { _vsqaddq_u16(a, b) }
25343}
25344#[doc = "Unsigned saturating Accumulate of Signed value."]
25345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"]
25346#[inline(always)]
25347#[target_feature(enable = "neon")]
25348#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25349#[cfg_attr(test, assert_instr(usqadd))]
25350pub fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t {
25351    unsafe extern "unadjusted" {
25352        #[cfg_attr(
25353            any(target_arch = "aarch64", target_arch = "arm64ec"),
25354            link_name = "llvm.aarch64.neon.usqadd.v2i32"
25355        )]
25356        fn _vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t;
25357    }
25358    unsafe { _vsqadd_u32(a, b) }
25359}
25360#[doc = "Unsigned saturating Accumulate of Signed value."]
25361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"]
25362#[inline(always)]
25363#[target_feature(enable = "neon")]
25364#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25365#[cfg_attr(test, assert_instr(usqadd))]
25366pub fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t {
25367    unsafe extern "unadjusted" {
25368        #[cfg_attr(
25369            any(target_arch = "aarch64", target_arch = "arm64ec"),
25370            link_name = "llvm.aarch64.neon.usqadd.v4i32"
25371        )]
25372        fn _vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t;
25373    }
25374    unsafe { _vsqaddq_u32(a, b) }
25375}
25376#[doc = "Unsigned saturating Accumulate of Signed value."]
25377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"]
25378#[inline(always)]
25379#[target_feature(enable = "neon")]
25380#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25381#[cfg_attr(test, assert_instr(usqadd))]
25382pub fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t {
25383    unsafe extern "unadjusted" {
25384        #[cfg_attr(
25385            any(target_arch = "aarch64", target_arch = "arm64ec"),
25386            link_name = "llvm.aarch64.neon.usqadd.v1i64"
25387        )]
25388        fn _vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t;
25389    }
25390    unsafe { _vsqadd_u64(a, b) }
25391}
25392#[doc = "Unsigned saturating Accumulate of Signed value."]
25393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"]
25394#[inline(always)]
25395#[target_feature(enable = "neon")]
25396#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25397#[cfg_attr(test, assert_instr(usqadd))]
25398pub fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t {
25399    unsafe extern "unadjusted" {
25400        #[cfg_attr(
25401            any(target_arch = "aarch64", target_arch = "arm64ec"),
25402            link_name = "llvm.aarch64.neon.usqadd.v2i64"
25403        )]
25404        fn _vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t;
25405    }
25406    unsafe { _vsqaddq_u64(a, b) }
25407}
25408#[doc = "Unsigned saturating accumulate of signed value"]
25409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"]
25410#[inline(always)]
25411#[target_feature(enable = "neon")]
25412#[cfg_attr(test, assert_instr(usqadd))]
25413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25414pub fn vsqaddb_u8(a: u8, b: i8) -> u8 {
25415    unsafe { simd_extract!(vsqadd_u8(vdup_n_u8(a), vdup_n_s8(b)), 0) }
25416}
25417#[doc = "Unsigned saturating accumulate of signed value"]
25418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"]
25419#[inline(always)]
25420#[target_feature(enable = "neon")]
25421#[cfg_attr(test, assert_instr(usqadd))]
25422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25423pub fn vsqaddh_u16(a: u16, b: i16) -> u16 {
25424    unsafe { simd_extract!(vsqadd_u16(vdup_n_u16(a), vdup_n_s16(b)), 0) }
25425}
25426#[doc = "Unsigned saturating accumulate of signed value"]
25427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"]
25428#[inline(always)]
25429#[target_feature(enable = "neon")]
25430#[cfg_attr(test, assert_instr(usqadd))]
25431#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25432pub fn vsqaddd_u64(a: u64, b: i64) -> u64 {
25433    unsafe extern "unadjusted" {
25434        #[cfg_attr(
25435            any(target_arch = "aarch64", target_arch = "arm64ec"),
25436            link_name = "llvm.aarch64.neon.usqadd.i64"
25437        )]
25438        fn _vsqaddd_u64(a: u64, b: i64) -> u64;
25439    }
25440    unsafe { _vsqaddd_u64(a, b) }
25441}
25442#[doc = "Unsigned saturating accumulate of signed value"]
25443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"]
25444#[inline(always)]
25445#[target_feature(enable = "neon")]
25446#[cfg_attr(test, assert_instr(usqadd))]
25447#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25448pub fn vsqadds_u32(a: u32, b: i32) -> u32 {
25449    unsafe extern "unadjusted" {
25450        #[cfg_attr(
25451            any(target_arch = "aarch64", target_arch = "arm64ec"),
25452            link_name = "llvm.aarch64.neon.usqadd.i32"
25453        )]
25454        fn _vsqadds_u32(a: u32, b: i32) -> u32;
25455    }
25456    unsafe { _vsqadds_u32(a, b) }
25457}
25458#[doc = "Calculates the square root of each lane."]
25459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f16)"]
25460#[inline(always)]
25461#[cfg_attr(test, assert_instr(fsqrt))]
25462#[target_feature(enable = "neon,fp16")]
25463#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
25464#[cfg(not(target_arch = "arm64ec"))]
25465pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t {
25466    unsafe { simd_fsqrt(a) }
25467}
25468#[doc = "Calculates the square root of each lane."]
25469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f16)"]
25470#[inline(always)]
25471#[cfg_attr(test, assert_instr(fsqrt))]
25472#[target_feature(enable = "neon,fp16")]
25473#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
25474#[cfg(not(target_arch = "arm64ec"))]
25475pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t {
25476    unsafe { simd_fsqrt(a) }
25477}
25478#[doc = "Calculates the square root of each lane."]
25479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"]
25480#[inline(always)]
25481#[target_feature(enable = "neon")]
25482#[cfg_attr(test, assert_instr(fsqrt))]
25483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25484pub fn vsqrt_f32(a: float32x2_t) -> float32x2_t {
25485    unsafe { simd_fsqrt(a) }
25486}
25487#[doc = "Calculates the square root of each lane."]
25488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"]
25489#[inline(always)]
25490#[target_feature(enable = "neon")]
25491#[cfg_attr(test, assert_instr(fsqrt))]
25492#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25493pub fn vsqrtq_f32(a: float32x4_t) -> float32x4_t {
25494    unsafe { simd_fsqrt(a) }
25495}
25496#[doc = "Calculates the square root of each lane."]
25497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"]
25498#[inline(always)]
25499#[target_feature(enable = "neon")]
25500#[cfg_attr(test, assert_instr(fsqrt))]
25501#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25502pub fn vsqrt_f64(a: float64x1_t) -> float64x1_t {
25503    unsafe { simd_fsqrt(a) }
25504}
25505#[doc = "Calculates the square root of each lane."]
25506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"]
25507#[inline(always)]
25508#[target_feature(enable = "neon")]
25509#[cfg_attr(test, assert_instr(fsqrt))]
25510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25511pub fn vsqrtq_f64(a: float64x2_t) -> float64x2_t {
25512    unsafe { simd_fsqrt(a) }
25513}
25514#[doc = "Floating-point round to integral, using current rounding mode"]
25515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrth_f16)"]
25516#[inline(always)]
25517#[target_feature(enable = "neon,fp16")]
25518#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25519#[cfg(not(target_arch = "arm64ec"))]
25520#[cfg_attr(test, assert_instr(fsqrt))]
25521pub fn vsqrth_f16(a: f16) -> f16 {
25522    sqrtf16(a)
25523}
25524#[doc = "Shift Right and Insert (immediate)"]
25525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"]
25526#[inline(always)]
25527#[target_feature(enable = "neon")]
25528#[cfg_attr(test, assert_instr(sri, N = 1))]
25529#[rustc_legacy_const_generics(2)]
25530#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25531pub fn vsri_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t {
25532    static_assert!(N >= 1 && N <= 8);
25533    unsafe extern "unadjusted" {
25534        #[cfg_attr(
25535            any(target_arch = "aarch64", target_arch = "arm64ec"),
25536            link_name = "llvm.aarch64.neon.vsri.v8i8"
25537        )]
25538        fn _vsri_n_s8(a: int8x8_t, b: int8x8_t, n: i32) -> int8x8_t;
25539    }
25540    unsafe { _vsri_n_s8(a, b, N) }
25541}
25542#[doc = "Shift Right and Insert (immediate)"]
25543#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"]
25544#[inline(always)]
25545#[target_feature(enable = "neon")]
25546#[cfg_attr(test, assert_instr(sri, N = 1))]
25547#[rustc_legacy_const_generics(2)]
25548#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25549pub fn vsriq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t {
25550    static_assert!(N >= 1 && N <= 8);
25551    unsafe extern "unadjusted" {
25552        #[cfg_attr(
25553            any(target_arch = "aarch64", target_arch = "arm64ec"),
25554            link_name = "llvm.aarch64.neon.vsri.v16i8"
25555        )]
25556        fn _vsriq_n_s8(a: int8x16_t, b: int8x16_t, n: i32) -> int8x16_t;
25557    }
25558    unsafe { _vsriq_n_s8(a, b, N) }
25559}
25560#[doc = "Shift Right and Insert (immediate)"]
25561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"]
25562#[inline(always)]
25563#[target_feature(enable = "neon")]
25564#[cfg_attr(test, assert_instr(sri, N = 1))]
25565#[rustc_legacy_const_generics(2)]
25566#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25567pub fn vsri_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t {
25568    static_assert!(N >= 1 && N <= 16);
25569    unsafe extern "unadjusted" {
25570        #[cfg_attr(
25571            any(target_arch = "aarch64", target_arch = "arm64ec"),
25572            link_name = "llvm.aarch64.neon.vsri.v4i16"
25573        )]
25574        fn _vsri_n_s16(a: int16x4_t, b: int16x4_t, n: i32) -> int16x4_t;
25575    }
25576    unsafe { _vsri_n_s16(a, b, N) }
25577}
25578#[doc = "Shift Right and Insert (immediate)"]
25579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"]
25580#[inline(always)]
25581#[target_feature(enable = "neon")]
25582#[cfg_attr(test, assert_instr(sri, N = 1))]
25583#[rustc_legacy_const_generics(2)]
25584#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25585pub fn vsriq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t {
25586    static_assert!(N >= 1 && N <= 16);
25587    unsafe extern "unadjusted" {
25588        #[cfg_attr(
25589            any(target_arch = "aarch64", target_arch = "arm64ec"),
25590            link_name = "llvm.aarch64.neon.vsri.v8i16"
25591        )]
25592        fn _vsriq_n_s16(a: int16x8_t, b: int16x8_t, n: i32) -> int16x8_t;
25593    }
25594    unsafe { _vsriq_n_s16(a, b, N) }
25595}
25596#[doc = "Shift Right and Insert (immediate)"]
25597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"]
25598#[inline(always)]
25599#[target_feature(enable = "neon")]
25600#[cfg_attr(test, assert_instr(sri, N = 1))]
25601#[rustc_legacy_const_generics(2)]
25602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25603pub fn vsri_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t {
25604    static_assert!(N >= 1 && N <= 32);
25605    unsafe extern "unadjusted" {
25606        #[cfg_attr(
25607            any(target_arch = "aarch64", target_arch = "arm64ec"),
25608            link_name = "llvm.aarch64.neon.vsri.v2i32"
25609        )]
25610        fn _vsri_n_s32(a: int32x2_t, b: int32x2_t, n: i32) -> int32x2_t;
25611    }
25612    unsafe { _vsri_n_s32(a, b, N) }
25613}
25614#[doc = "Shift Right and Insert (immediate)"]
25615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"]
25616#[inline(always)]
25617#[target_feature(enable = "neon")]
25618#[cfg_attr(test, assert_instr(sri, N = 1))]
25619#[rustc_legacy_const_generics(2)]
25620#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25621pub fn vsriq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t {
25622    static_assert!(N >= 1 && N <= 32);
25623    unsafe extern "unadjusted" {
25624        #[cfg_attr(
25625            any(target_arch = "aarch64", target_arch = "arm64ec"),
25626            link_name = "llvm.aarch64.neon.vsri.v4i32"
25627        )]
25628        fn _vsriq_n_s32(a: int32x4_t, b: int32x4_t, n: i32) -> int32x4_t;
25629    }
25630    unsafe { _vsriq_n_s32(a, b, N) }
25631}
25632#[doc = "Shift Right and Insert (immediate)"]
25633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"]
25634#[inline(always)]
25635#[target_feature(enable = "neon")]
25636#[cfg_attr(test, assert_instr(sri, N = 1))]
25637#[rustc_legacy_const_generics(2)]
25638#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25639pub fn vsri_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t {
25640    static_assert!(N >= 1 && N <= 64);
25641    unsafe extern "unadjusted" {
25642        #[cfg_attr(
25643            any(target_arch = "aarch64", target_arch = "arm64ec"),
25644            link_name = "llvm.aarch64.neon.vsri.v1i64"
25645        )]
25646        fn _vsri_n_s64(a: int64x1_t, b: int64x1_t, n: i32) -> int64x1_t;
25647    }
25648    unsafe { _vsri_n_s64(a, b, N) }
25649}
25650#[doc = "Shift Right and Insert (immediate)"]
25651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"]
25652#[inline(always)]
25653#[target_feature(enable = "neon")]
25654#[cfg_attr(test, assert_instr(sri, N = 1))]
25655#[rustc_legacy_const_generics(2)]
25656#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25657pub fn vsriq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t {
25658    static_assert!(N >= 1 && N <= 64);
25659    unsafe extern "unadjusted" {
25660        #[cfg_attr(
25661            any(target_arch = "aarch64", target_arch = "arm64ec"),
25662            link_name = "llvm.aarch64.neon.vsri.v2i64"
25663        )]
25664        fn _vsriq_n_s64(a: int64x2_t, b: int64x2_t, n: i32) -> int64x2_t;
25665    }
25666    unsafe { _vsriq_n_s64(a, b, N) }
25667}
25668#[doc = "Shift Right and Insert (immediate)"]
25669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"]
25670#[inline(always)]
25671#[target_feature(enable = "neon")]
25672#[cfg_attr(test, assert_instr(sri, N = 1))]
25673#[rustc_legacy_const_generics(2)]
25674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25675pub fn vsri_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
25676    static_assert!(N >= 1 && N <= 8);
25677    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
25678}
25679#[doc = "Shift Right and Insert (immediate)"]
25680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"]
25681#[inline(always)]
25682#[target_feature(enable = "neon")]
25683#[cfg_attr(test, assert_instr(sri, N = 1))]
25684#[rustc_legacy_const_generics(2)]
25685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25686pub fn vsriq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
25687    static_assert!(N >= 1 && N <= 8);
25688    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
25689}
25690#[doc = "Shift Right and Insert (immediate)"]
25691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"]
25692#[inline(always)]
25693#[target_feature(enable = "neon")]
25694#[cfg_attr(test, assert_instr(sri, N = 1))]
25695#[rustc_legacy_const_generics(2)]
25696#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25697pub fn vsri_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
25698    static_assert!(N >= 1 && N <= 16);
25699    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
25700}
25701#[doc = "Shift Right and Insert (immediate)"]
25702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"]
25703#[inline(always)]
25704#[target_feature(enable = "neon")]
25705#[cfg_attr(test, assert_instr(sri, N = 1))]
25706#[rustc_legacy_const_generics(2)]
25707#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25708pub fn vsriq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
25709    static_assert!(N >= 1 && N <= 16);
25710    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
25711}
25712#[doc = "Shift Right and Insert (immediate)"]
25713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"]
25714#[inline(always)]
25715#[target_feature(enable = "neon")]
25716#[cfg_attr(test, assert_instr(sri, N = 1))]
25717#[rustc_legacy_const_generics(2)]
25718#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25719pub fn vsri_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
25720    static_assert!(N >= 1 && N <= 32);
25721    unsafe { transmute(vsri_n_s32::<N>(transmute(a), transmute(b))) }
25722}
25723#[doc = "Shift Right and Insert (immediate)"]
25724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"]
25725#[inline(always)]
25726#[target_feature(enable = "neon")]
25727#[cfg_attr(test, assert_instr(sri, N = 1))]
25728#[rustc_legacy_const_generics(2)]
25729#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25730pub fn vsriq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
25731    static_assert!(N >= 1 && N <= 32);
25732    unsafe { transmute(vsriq_n_s32::<N>(transmute(a), transmute(b))) }
25733}
25734#[doc = "Shift Right and Insert (immediate)"]
25735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"]
25736#[inline(always)]
25737#[target_feature(enable = "neon")]
25738#[cfg_attr(test, assert_instr(sri, N = 1))]
25739#[rustc_legacy_const_generics(2)]
25740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25741pub fn vsri_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
25742    static_assert!(N >= 1 && N <= 64);
25743    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25744}
25745#[doc = "Shift Right and Insert (immediate)"]
25746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"]
25747#[inline(always)]
25748#[target_feature(enable = "neon")]
25749#[cfg_attr(test, assert_instr(sri, N = 1))]
25750#[rustc_legacy_const_generics(2)]
25751#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25752pub fn vsriq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
25753    static_assert!(N >= 1 && N <= 64);
25754    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
25755}
25756#[doc = "Shift Right and Insert (immediate)"]
25757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"]
25758#[inline(always)]
25759#[target_feature(enable = "neon")]
25760#[cfg_attr(test, assert_instr(sri, N = 1))]
25761#[rustc_legacy_const_generics(2)]
25762#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25763pub fn vsri_n_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
25764    static_assert!(N >= 1 && N <= 8);
25765    unsafe { transmute(vsri_n_s8::<N>(transmute(a), transmute(b))) }
25766}
25767#[doc = "Shift Right and Insert (immediate)"]
25768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"]
25769#[inline(always)]
25770#[target_feature(enable = "neon")]
25771#[cfg_attr(test, assert_instr(sri, N = 1))]
25772#[rustc_legacy_const_generics(2)]
25773#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25774pub fn vsriq_n_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
25775    static_assert!(N >= 1 && N <= 8);
25776    unsafe { transmute(vsriq_n_s8::<N>(transmute(a), transmute(b))) }
25777}
25778#[doc = "Shift Right and Insert (immediate)"]
25779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"]
25780#[inline(always)]
25781#[target_feature(enable = "neon")]
25782#[cfg_attr(test, assert_instr(sri, N = 1))]
25783#[rustc_legacy_const_generics(2)]
25784#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25785pub fn vsri_n_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
25786    static_assert!(N >= 1 && N <= 16);
25787    unsafe { transmute(vsri_n_s16::<N>(transmute(a), transmute(b))) }
25788}
25789#[doc = "Shift Right and Insert (immediate)"]
25790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"]
25791#[inline(always)]
25792#[target_feature(enable = "neon")]
25793#[cfg_attr(test, assert_instr(sri, N = 1))]
25794#[rustc_legacy_const_generics(2)]
25795#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25796pub fn vsriq_n_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
25797    static_assert!(N >= 1 && N <= 16);
25798    unsafe { transmute(vsriq_n_s16::<N>(transmute(a), transmute(b))) }
25799}
25800#[doc = "Shift Right and Insert (immediate)"]
25801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"]
25802#[inline(always)]
25803#[target_feature(enable = "neon,aes")]
25804#[cfg_attr(test, assert_instr(sri, N = 1))]
25805#[rustc_legacy_const_generics(2)]
25806#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25807pub fn vsri_n_p64<const N: i32>(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t {
25808    static_assert!(N >= 1 && N <= 64);
25809    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25810}
25811#[doc = "Shift Right and Insert (immediate)"]
25812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"]
25813#[inline(always)]
25814#[target_feature(enable = "neon,aes")]
25815#[cfg_attr(test, assert_instr(sri, N = 1))]
25816#[rustc_legacy_const_generics(2)]
25817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25818pub fn vsriq_n_p64<const N: i32>(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
25819    static_assert!(N >= 1 && N <= 64);
25820    unsafe { transmute(vsriq_n_s64::<N>(transmute(a), transmute(b))) }
25821}
25822#[doc = "Shift right and insert"]
25823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"]
25824#[inline(always)]
25825#[target_feature(enable = "neon")]
25826#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25827#[rustc_legacy_const_generics(2)]
25828#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
25829pub fn vsrid_n_s64<const N: i32>(a: i64, b: i64) -> i64 {
25830    static_assert!(N >= 1 && N <= 64);
25831    unsafe { transmute(vsri_n_s64::<N>(transmute(a), transmute(b))) }
25832}
25833#[doc = "Shift right and insert"]
25834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"]
25835#[inline(always)]
25836#[target_feature(enable = "neon")]
25837#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25838#[rustc_legacy_const_generics(2)]
25839#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sri, N = 2))]
25840pub fn vsrid_n_u64<const N: i32>(a: u64, b: u64) -> u64 {
25841    static_assert!(N >= 1 && N <= 64);
25842    unsafe { transmute(vsri_n_u64::<N>(transmute(a), transmute(b))) }
25843}
25844#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"]
25846#[doc = "## Safety"]
25847#[doc = "  * Neon intrinsic unsafe"]
25848#[inline(always)]
25849#[target_feature(enable = "neon,fp16")]
25850#[cfg_attr(test, assert_instr(str))]
25851#[allow(clippy::cast_ptr_alignment)]
25852#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25853#[cfg(not(target_arch = "arm64ec"))]
25854pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) {
25855    crate::ptr::write_unaligned(ptr.cast(), a)
25856}
25857#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"]
25859#[doc = "## Safety"]
25860#[doc = "  * Neon intrinsic unsafe"]
25861#[inline(always)]
25862#[target_feature(enable = "neon,fp16")]
25863#[cfg_attr(test, assert_instr(str))]
25864#[allow(clippy::cast_ptr_alignment)]
25865#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
25866#[cfg(not(target_arch = "arm64ec"))]
25867pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) {
25868    crate::ptr::write_unaligned(ptr.cast(), a)
25869}
25870#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"]
25872#[doc = "## Safety"]
25873#[doc = "  * Neon intrinsic unsafe"]
25874#[inline(always)]
25875#[target_feature(enable = "neon")]
25876#[cfg_attr(test, assert_instr(str))]
25877#[allow(clippy::cast_ptr_alignment)]
25878#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25879pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) {
25880    crate::ptr::write_unaligned(ptr.cast(), a)
25881}
25882#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"]
25884#[doc = "## Safety"]
25885#[doc = "  * Neon intrinsic unsafe"]
25886#[inline(always)]
25887#[target_feature(enable = "neon")]
25888#[cfg_attr(test, assert_instr(str))]
25889#[allow(clippy::cast_ptr_alignment)]
25890#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25891pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) {
25892    crate::ptr::write_unaligned(ptr.cast(), a)
25893}
25894#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"]
25896#[doc = "## Safety"]
25897#[doc = "  * Neon intrinsic unsafe"]
25898#[inline(always)]
25899#[target_feature(enable = "neon")]
25900#[cfg_attr(test, assert_instr(str))]
25901#[allow(clippy::cast_ptr_alignment)]
25902#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25903pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) {
25904    crate::ptr::write_unaligned(ptr.cast(), a)
25905}
25906#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"]
25908#[doc = "## Safety"]
25909#[doc = "  * Neon intrinsic unsafe"]
25910#[inline(always)]
25911#[target_feature(enable = "neon")]
25912#[cfg_attr(test, assert_instr(str))]
25913#[allow(clippy::cast_ptr_alignment)]
25914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25915pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) {
25916    crate::ptr::write_unaligned(ptr.cast(), a)
25917}
25918#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"]
25920#[doc = "## Safety"]
25921#[doc = "  * Neon intrinsic unsafe"]
25922#[inline(always)]
25923#[target_feature(enable = "neon")]
25924#[cfg_attr(test, assert_instr(str))]
25925#[allow(clippy::cast_ptr_alignment)]
25926#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25927pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) {
25928    crate::ptr::write_unaligned(ptr.cast(), a)
25929}
25930#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"]
25932#[doc = "## Safety"]
25933#[doc = "  * Neon intrinsic unsafe"]
25934#[inline(always)]
25935#[target_feature(enable = "neon")]
25936#[cfg_attr(test, assert_instr(str))]
25937#[allow(clippy::cast_ptr_alignment)]
25938#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25939pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) {
25940    crate::ptr::write_unaligned(ptr.cast(), a)
25941}
25942#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"]
25944#[doc = "## Safety"]
25945#[doc = "  * Neon intrinsic unsafe"]
25946#[inline(always)]
25947#[target_feature(enable = "neon")]
25948#[cfg_attr(test, assert_instr(str))]
25949#[allow(clippy::cast_ptr_alignment)]
25950#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25951pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) {
25952    crate::ptr::write_unaligned(ptr.cast(), a)
25953}
25954#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"]
25956#[doc = "## Safety"]
25957#[doc = "  * Neon intrinsic unsafe"]
25958#[inline(always)]
25959#[target_feature(enable = "neon")]
25960#[cfg_attr(test, assert_instr(str))]
25961#[allow(clippy::cast_ptr_alignment)]
25962#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25963pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) {
25964    crate::ptr::write_unaligned(ptr.cast(), a)
25965}
25966#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"]
25968#[doc = "## Safety"]
25969#[doc = "  * Neon intrinsic unsafe"]
25970#[inline(always)]
25971#[target_feature(enable = "neon")]
25972#[cfg_attr(test, assert_instr(str))]
25973#[allow(clippy::cast_ptr_alignment)]
25974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25975pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) {
25976    crate::ptr::write_unaligned(ptr.cast(), a)
25977}
25978#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"]
25980#[doc = "## Safety"]
25981#[doc = "  * Neon intrinsic unsafe"]
25982#[inline(always)]
25983#[target_feature(enable = "neon")]
25984#[cfg_attr(test, assert_instr(str))]
25985#[allow(clippy::cast_ptr_alignment)]
25986#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25987pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) {
25988    crate::ptr::write_unaligned(ptr.cast(), a)
25989}
25990#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
25991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"]
25992#[doc = "## Safety"]
25993#[doc = "  * Neon intrinsic unsafe"]
25994#[inline(always)]
25995#[target_feature(enable = "neon")]
25996#[cfg_attr(test, assert_instr(str))]
25997#[allow(clippy::cast_ptr_alignment)]
25998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
25999pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) {
26000    crate::ptr::write_unaligned(ptr.cast(), a)
26001}
26002#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"]
26004#[doc = "## Safety"]
26005#[doc = "  * Neon intrinsic unsafe"]
26006#[inline(always)]
26007#[target_feature(enable = "neon")]
26008#[cfg_attr(test, assert_instr(str))]
26009#[allow(clippy::cast_ptr_alignment)]
26010#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26011pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) {
26012    crate::ptr::write_unaligned(ptr.cast(), a)
26013}
26014#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"]
26016#[doc = "## Safety"]
26017#[doc = "  * Neon intrinsic unsafe"]
26018#[inline(always)]
26019#[target_feature(enable = "neon")]
26020#[cfg_attr(test, assert_instr(str))]
26021#[allow(clippy::cast_ptr_alignment)]
26022#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26023pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) {
26024    crate::ptr::write_unaligned(ptr.cast(), a)
26025}
26026#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"]
26028#[doc = "## Safety"]
26029#[doc = "  * Neon intrinsic unsafe"]
26030#[inline(always)]
26031#[target_feature(enable = "neon")]
26032#[cfg_attr(test, assert_instr(str))]
26033#[allow(clippy::cast_ptr_alignment)]
26034#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26035pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) {
26036    crate::ptr::write_unaligned(ptr.cast(), a)
26037}
26038#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"]
26040#[doc = "## Safety"]
26041#[doc = "  * Neon intrinsic unsafe"]
26042#[inline(always)]
26043#[target_feature(enable = "neon")]
26044#[cfg_attr(test, assert_instr(str))]
26045#[allow(clippy::cast_ptr_alignment)]
26046#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26047pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) {
26048    crate::ptr::write_unaligned(ptr.cast(), a)
26049}
26050#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"]
26052#[doc = "## Safety"]
26053#[doc = "  * Neon intrinsic unsafe"]
26054#[inline(always)]
26055#[target_feature(enable = "neon")]
26056#[cfg_attr(test, assert_instr(str))]
26057#[allow(clippy::cast_ptr_alignment)]
26058#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26059pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) {
26060    crate::ptr::write_unaligned(ptr.cast(), a)
26061}
26062#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"]
26064#[doc = "## Safety"]
26065#[doc = "  * Neon intrinsic unsafe"]
26066#[inline(always)]
26067#[target_feature(enable = "neon")]
26068#[cfg_attr(test, assert_instr(str))]
26069#[allow(clippy::cast_ptr_alignment)]
26070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26071pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) {
26072    crate::ptr::write_unaligned(ptr.cast(), a)
26073}
26074#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"]
26076#[doc = "## Safety"]
26077#[doc = "  * Neon intrinsic unsafe"]
26078#[inline(always)]
26079#[target_feature(enable = "neon")]
26080#[cfg_attr(test, assert_instr(str))]
26081#[allow(clippy::cast_ptr_alignment)]
26082#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26083pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) {
26084    crate::ptr::write_unaligned(ptr.cast(), a)
26085}
26086#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"]
26088#[doc = "## Safety"]
26089#[doc = "  * Neon intrinsic unsafe"]
26090#[inline(always)]
26091#[target_feature(enable = "neon")]
26092#[cfg_attr(test, assert_instr(str))]
26093#[allow(clippy::cast_ptr_alignment)]
26094#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26095pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) {
26096    crate::ptr::write_unaligned(ptr.cast(), a)
26097}
26098#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"]
26100#[doc = "## Safety"]
26101#[doc = "  * Neon intrinsic unsafe"]
26102#[inline(always)]
26103#[target_feature(enable = "neon")]
26104#[cfg_attr(test, assert_instr(str))]
26105#[allow(clippy::cast_ptr_alignment)]
26106#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26107pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) {
26108    crate::ptr::write_unaligned(ptr.cast(), a)
26109}
26110#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"]
26112#[doc = "## Safety"]
26113#[doc = "  * Neon intrinsic unsafe"]
26114#[inline(always)]
26115#[target_feature(enable = "neon")]
26116#[cfg_attr(test, assert_instr(str))]
26117#[allow(clippy::cast_ptr_alignment)]
26118#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26119pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) {
26120    crate::ptr::write_unaligned(ptr.cast(), a)
26121}
26122#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"]
26124#[doc = "## Safety"]
26125#[doc = "  * Neon intrinsic unsafe"]
26126#[inline(always)]
26127#[target_feature(enable = "neon")]
26128#[cfg_attr(test, assert_instr(str))]
26129#[allow(clippy::cast_ptr_alignment)]
26130#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26131pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) {
26132    crate::ptr::write_unaligned(ptr.cast(), a)
26133}
26134#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"]
26136#[doc = "## Safety"]
26137#[doc = "  * Neon intrinsic unsafe"]
26138#[inline(always)]
26139#[target_feature(enable = "neon")]
26140#[cfg_attr(test, assert_instr(str))]
26141#[allow(clippy::cast_ptr_alignment)]
26142#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26143pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) {
26144    crate::ptr::write_unaligned(ptr.cast(), a)
26145}
26146#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"]
26148#[doc = "## Safety"]
26149#[doc = "  * Neon intrinsic unsafe"]
26150#[inline(always)]
26151#[target_feature(enable = "neon")]
26152#[cfg_attr(test, assert_instr(str))]
26153#[allow(clippy::cast_ptr_alignment)]
26154#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26155pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) {
26156    crate::ptr::write_unaligned(ptr.cast(), a)
26157}
26158#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"]
26160#[doc = "## Safety"]
26161#[doc = "  * Neon intrinsic unsafe"]
26162#[inline(always)]
26163#[target_feature(enable = "neon,aes")]
26164#[cfg_attr(test, assert_instr(str))]
26165#[allow(clippy::cast_ptr_alignment)]
26166#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26167pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) {
26168    crate::ptr::write_unaligned(ptr.cast(), a)
26169}
26170#[doc = "Store multiple single-element structures from one, two, three, or four registers."]
26171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"]
26172#[doc = "## Safety"]
26173#[doc = "  * Neon intrinsic unsafe"]
26174#[inline(always)]
26175#[target_feature(enable = "neon,aes")]
26176#[cfg_attr(test, assert_instr(str))]
26177#[allow(clippy::cast_ptr_alignment)]
26178#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26179pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) {
26180    crate::ptr::write_unaligned(ptr.cast(), a)
26181}
26182#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"]
26184#[doc = "## Safety"]
26185#[doc = "  * Neon intrinsic unsafe"]
26186#[inline(always)]
26187#[target_feature(enable = "neon")]
26188#[cfg_attr(test, assert_instr(st1))]
26189#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26190pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) {
26191    unsafe extern "unadjusted" {
26192        #[cfg_attr(
26193            any(target_arch = "aarch64", target_arch = "arm64ec"),
26194            link_name = "llvm.aarch64.neon.st1x2.v1f64.p0"
26195        )]
26196        fn _vst1_f64_x2(a: float64x1_t, b: float64x1_t, ptr: *mut f64);
26197    }
26198    _vst1_f64_x2(b.0, b.1, a)
26199}
26200#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"]
26202#[doc = "## Safety"]
26203#[doc = "  * Neon intrinsic unsafe"]
26204#[inline(always)]
26205#[target_feature(enable = "neon")]
26206#[cfg_attr(test, assert_instr(st1))]
26207#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26208pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) {
26209    unsafe extern "unadjusted" {
26210        #[cfg_attr(
26211            any(target_arch = "aarch64", target_arch = "arm64ec"),
26212            link_name = "llvm.aarch64.neon.st1x2.v2f64.p0"
26213        )]
26214        fn _vst1q_f64_x2(a: float64x2_t, b: float64x2_t, ptr: *mut f64);
26215    }
26216    _vst1q_f64_x2(b.0, b.1, a)
26217}
26218#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"]
26220#[doc = "## Safety"]
26221#[doc = "  * Neon intrinsic unsafe"]
26222#[inline(always)]
26223#[target_feature(enable = "neon")]
26224#[cfg_attr(test, assert_instr(st1))]
26225#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26226pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) {
26227    unsafe extern "unadjusted" {
26228        #[cfg_attr(
26229            any(target_arch = "aarch64", target_arch = "arm64ec"),
26230            link_name = "llvm.aarch64.neon.st1x3.v1f64.p0"
26231        )]
26232        fn _vst1_f64_x3(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut f64);
26233    }
26234    _vst1_f64_x3(b.0, b.1, b.2, a)
26235}
26236#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"]
26238#[doc = "## Safety"]
26239#[doc = "  * Neon intrinsic unsafe"]
26240#[inline(always)]
26241#[target_feature(enable = "neon")]
26242#[cfg_attr(test, assert_instr(st1))]
26243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26244pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) {
26245    unsafe extern "unadjusted" {
26246        #[cfg_attr(
26247            any(target_arch = "aarch64", target_arch = "arm64ec"),
26248            link_name = "llvm.aarch64.neon.st1x3.v2f64.p0"
26249        )]
26250        fn _vst1q_f64_x3(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut f64);
26251    }
26252    _vst1q_f64_x3(b.0, b.1, b.2, a)
26253}
26254#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"]
26256#[doc = "## Safety"]
26257#[doc = "  * Neon intrinsic unsafe"]
26258#[inline(always)]
26259#[target_feature(enable = "neon")]
26260#[cfg_attr(test, assert_instr(st1))]
26261#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26262pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) {
26263    unsafe extern "unadjusted" {
26264        #[cfg_attr(
26265            any(target_arch = "aarch64", target_arch = "arm64ec"),
26266            link_name = "llvm.aarch64.neon.st1x4.v1f64.p0"
26267        )]
26268        fn _vst1_f64_x4(
26269            a: float64x1_t,
26270            b: float64x1_t,
26271            c: float64x1_t,
26272            d: float64x1_t,
26273            ptr: *mut f64,
26274        );
26275    }
26276    _vst1_f64_x4(b.0, b.1, b.2, b.3, a)
26277}
26278#[doc = "Store multiple single-element structures to one, two, three, or four registers"]
26279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"]
26280#[doc = "## Safety"]
26281#[doc = "  * Neon intrinsic unsafe"]
26282#[inline(always)]
26283#[target_feature(enable = "neon")]
26284#[cfg_attr(test, assert_instr(st1))]
26285#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26286pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) {
26287    unsafe extern "unadjusted" {
26288        #[cfg_attr(
26289            any(target_arch = "aarch64", target_arch = "arm64ec"),
26290            link_name = "llvm.aarch64.neon.st1x4.v2f64.p0"
26291        )]
26292        fn _vst1q_f64_x4(
26293            a: float64x2_t,
26294            b: float64x2_t,
26295            c: float64x2_t,
26296            d: float64x2_t,
26297            ptr: *mut f64,
26298        );
26299    }
26300    _vst1q_f64_x4(b.0, b.1, b.2, b.3, a)
26301}
26302#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
26303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"]
26304#[doc = "## Safety"]
26305#[doc = "  * Neon intrinsic unsafe"]
26306#[inline(always)]
26307#[target_feature(enable = "neon")]
26308#[cfg_attr(test, assert_instr(nop, LANE = 0))]
26309#[rustc_legacy_const_generics(2)]
26310#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26311pub unsafe fn vst1_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1_t) {
26312    static_assert!(LANE == 0);
26313    *a = simd_extract!(b, LANE as u32);
26314}
26315#[doc = "Store multiple single-element structures from one, two, three, or four registers"]
26316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"]
26317#[doc = "## Safety"]
26318#[doc = "  * Neon intrinsic unsafe"]
26319#[inline(always)]
26320#[target_feature(enable = "neon")]
26321#[cfg_attr(test, assert_instr(nop, LANE = 0))]
26322#[rustc_legacy_const_generics(2)]
26323#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26324pub unsafe fn vst1q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2_t) {
26325    static_assert_uimm_bits!(LANE, 1);
26326    *a = simd_extract!(b, LANE as u32);
26327}
26328#[doc = "Store multiple 2-element structures from two registers"]
26329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"]
26330#[doc = "## Safety"]
26331#[doc = "  * Neon intrinsic unsafe"]
26332#[inline(always)]
26333#[target_feature(enable = "neon")]
26334#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26335#[cfg_attr(test, assert_instr(st1))]
26336pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) {
26337    unsafe extern "unadjusted" {
26338        #[cfg_attr(
26339            any(target_arch = "aarch64", target_arch = "arm64ec"),
26340            link_name = "llvm.aarch64.neon.st2.v1f64.p0"
26341        )]
26342        fn _vst2_f64(a: float64x1_t, b: float64x1_t, ptr: *mut i8);
26343    }
26344    _vst2_f64(b.0, b.1, a as _)
26345}
26346#[doc = "Store multiple 2-element structures from two registers"]
26347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"]
26348#[doc = "## Safety"]
26349#[doc = "  * Neon intrinsic unsafe"]
26350#[inline(always)]
26351#[target_feature(enable = "neon")]
26352#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26353#[rustc_legacy_const_generics(2)]
26354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26355pub unsafe fn vst2_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x2_t) {
26356    static_assert!(LANE == 0);
26357    unsafe extern "unadjusted" {
26358        #[cfg_attr(
26359            any(target_arch = "aarch64", target_arch = "arm64ec"),
26360            link_name = "llvm.aarch64.neon.st2lane.v1f64.p0"
26361        )]
26362        fn _vst2_lane_f64(a: float64x1_t, b: float64x1_t, n: i64, ptr: *mut i8);
26363    }
26364    _vst2_lane_f64(b.0, b.1, LANE as i64, a as _)
26365}
26366#[doc = "Store multiple 2-element structures from two registers"]
26367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"]
26368#[doc = "## Safety"]
26369#[doc = "  * Neon intrinsic unsafe"]
26370#[inline(always)]
26371#[target_feature(enable = "neon")]
26372#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26373#[rustc_legacy_const_generics(2)]
26374#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26375pub unsafe fn vst2_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x2_t) {
26376    static_assert!(LANE == 0);
26377    unsafe extern "unadjusted" {
26378        #[cfg_attr(
26379            any(target_arch = "aarch64", target_arch = "arm64ec"),
26380            link_name = "llvm.aarch64.neon.st2lane.v1i64.p0"
26381        )]
26382        fn _vst2_lane_s64(a: int64x1_t, b: int64x1_t, n: i64, ptr: *mut i8);
26383    }
26384    _vst2_lane_s64(b.0, b.1, LANE as i64, a as _)
26385}
26386#[doc = "Store multiple 2-element structures from two registers"]
26387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"]
26388#[doc = "## Safety"]
26389#[doc = "  * Neon intrinsic unsafe"]
26390#[inline(always)]
26391#[target_feature(enable = "neon,aes")]
26392#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26393#[rustc_legacy_const_generics(2)]
26394#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26395pub unsafe fn vst2_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x2_t) {
26396    static_assert!(LANE == 0);
26397    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
26398}
26399#[doc = "Store multiple 2-element structures from two registers"]
26400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"]
26401#[doc = "## Safety"]
26402#[doc = "  * Neon intrinsic unsafe"]
26403#[inline(always)]
26404#[target_feature(enable = "neon")]
26405#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26406#[rustc_legacy_const_generics(2)]
26407#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26408pub unsafe fn vst2_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x2_t) {
26409    static_assert!(LANE == 0);
26410    vst2_lane_s64::<LANE>(transmute(a), transmute(b))
26411}
26412#[doc = "Store multiple 2-element structures from two registers"]
26413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"]
26414#[doc = "## Safety"]
26415#[doc = "  * Neon intrinsic unsafe"]
26416#[inline(always)]
26417#[target_feature(enable = "neon")]
26418#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26419#[cfg_attr(test, assert_instr(st2))]
26420pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) {
26421    unsafe extern "unadjusted" {
26422        #[cfg_attr(
26423            any(target_arch = "aarch64", target_arch = "arm64ec"),
26424            link_name = "llvm.aarch64.neon.st2.v2f64.p0"
26425        )]
26426        fn _vst2q_f64(a: float64x2_t, b: float64x2_t, ptr: *mut i8);
26427    }
26428    _vst2q_f64(b.0, b.1, a as _)
26429}
26430#[doc = "Store multiple 2-element structures from two registers"]
26431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"]
26432#[doc = "## Safety"]
26433#[doc = "  * Neon intrinsic unsafe"]
26434#[inline(always)]
26435#[target_feature(enable = "neon")]
26436#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26437#[cfg_attr(test, assert_instr(st2))]
26438pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) {
26439    unsafe extern "unadjusted" {
26440        #[cfg_attr(
26441            any(target_arch = "aarch64", target_arch = "arm64ec"),
26442            link_name = "llvm.aarch64.neon.st2.v2i64.p0"
26443        )]
26444        fn _vst2q_s64(a: int64x2_t, b: int64x2_t, ptr: *mut i8);
26445    }
26446    _vst2q_s64(b.0, b.1, a as _)
26447}
26448#[doc = "Store multiple 2-element structures from two registers"]
26449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"]
26450#[doc = "## Safety"]
26451#[doc = "  * Neon intrinsic unsafe"]
26452#[inline(always)]
26453#[target_feature(enable = "neon")]
26454#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26455#[rustc_legacy_const_generics(2)]
26456#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26457pub unsafe fn vst2q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x2_t) {
26458    static_assert_uimm_bits!(LANE, 1);
26459    unsafe extern "unadjusted" {
26460        #[cfg_attr(
26461            any(target_arch = "aarch64", target_arch = "arm64ec"),
26462            link_name = "llvm.aarch64.neon.st2lane.v2f64.p0"
26463        )]
26464        fn _vst2q_lane_f64(a: float64x2_t, b: float64x2_t, n: i64, ptr: *mut i8);
26465    }
26466    _vst2q_lane_f64(b.0, b.1, LANE as i64, a as _)
26467}
26468#[doc = "Store multiple 2-element structures from two registers"]
26469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"]
26470#[doc = "## Safety"]
26471#[doc = "  * Neon intrinsic unsafe"]
26472#[inline(always)]
26473#[target_feature(enable = "neon")]
26474#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26475#[rustc_legacy_const_generics(2)]
26476#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26477pub unsafe fn vst2q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x2_t) {
26478    static_assert_uimm_bits!(LANE, 4);
26479    unsafe extern "unadjusted" {
26480        #[cfg_attr(
26481            any(target_arch = "aarch64", target_arch = "arm64ec"),
26482            link_name = "llvm.aarch64.neon.st2lane.v16i8.p0"
26483        )]
26484        fn _vst2q_lane_s8(a: int8x16_t, b: int8x16_t, n: i64, ptr: *mut i8);
26485    }
26486    _vst2q_lane_s8(b.0, b.1, LANE as i64, a as _)
26487}
26488#[doc = "Store multiple 2-element structures from two registers"]
26489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"]
26490#[doc = "## Safety"]
26491#[doc = "  * Neon intrinsic unsafe"]
26492#[inline(always)]
26493#[target_feature(enable = "neon")]
26494#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26495#[rustc_legacy_const_generics(2)]
26496#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26497pub unsafe fn vst2q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x2_t) {
26498    static_assert_uimm_bits!(LANE, 1);
26499    unsafe extern "unadjusted" {
26500        #[cfg_attr(
26501            any(target_arch = "aarch64", target_arch = "arm64ec"),
26502            link_name = "llvm.aarch64.neon.st2lane.v2i64.p0"
26503        )]
26504        fn _vst2q_lane_s64(a: int64x2_t, b: int64x2_t, n: i64, ptr: *mut i8);
26505    }
26506    _vst2q_lane_s64(b.0, b.1, LANE as i64, a as _)
26507}
26508#[doc = "Store multiple 2-element structures from two registers"]
26509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"]
26510#[doc = "## Safety"]
26511#[doc = "  * Neon intrinsic unsafe"]
26512#[inline(always)]
26513#[target_feature(enable = "neon,aes")]
26514#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26515#[rustc_legacy_const_generics(2)]
26516#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26517pub unsafe fn vst2q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x2_t) {
26518    static_assert_uimm_bits!(LANE, 1);
26519    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
26520}
26521#[doc = "Store multiple 2-element structures from two registers"]
26522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"]
26523#[doc = "## Safety"]
26524#[doc = "  * Neon intrinsic unsafe"]
26525#[inline(always)]
26526#[target_feature(enable = "neon")]
26527#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26528#[rustc_legacy_const_generics(2)]
26529#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26530pub unsafe fn vst2q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x2_t) {
26531    static_assert_uimm_bits!(LANE, 4);
26532    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
26533}
26534#[doc = "Store multiple 2-element structures from two registers"]
26535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"]
26536#[doc = "## Safety"]
26537#[doc = "  * Neon intrinsic unsafe"]
26538#[inline(always)]
26539#[target_feature(enable = "neon")]
26540#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26541#[rustc_legacy_const_generics(2)]
26542#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26543pub unsafe fn vst2q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x2_t) {
26544    static_assert_uimm_bits!(LANE, 1);
26545    vst2q_lane_s64::<LANE>(transmute(a), transmute(b))
26546}
26547#[doc = "Store multiple 2-element structures from two registers"]
26548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"]
26549#[doc = "## Safety"]
26550#[doc = "  * Neon intrinsic unsafe"]
26551#[inline(always)]
26552#[target_feature(enable = "neon")]
26553#[cfg_attr(test, assert_instr(st2, LANE = 0))]
26554#[rustc_legacy_const_generics(2)]
26555#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26556pub unsafe fn vst2q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x2_t) {
26557    static_assert_uimm_bits!(LANE, 4);
26558    vst2q_lane_s8::<LANE>(transmute(a), transmute(b))
26559}
26560#[doc = "Store multiple 2-element structures from two registers"]
26561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"]
26562#[doc = "## Safety"]
26563#[doc = "  * Neon intrinsic unsafe"]
26564#[inline(always)]
26565#[target_feature(enable = "neon,aes")]
26566#[cfg_attr(test, assert_instr(st2))]
26567#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26568pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) {
26569    vst2q_s64(transmute(a), transmute(b))
26570}
26571#[doc = "Store multiple 2-element structures from two registers"]
26572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"]
26573#[doc = "## Safety"]
26574#[doc = "  * Neon intrinsic unsafe"]
26575#[inline(always)]
26576#[target_feature(enable = "neon")]
26577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26578#[cfg_attr(test, assert_instr(st2))]
26579pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) {
26580    vst2q_s64(transmute(a), transmute(b))
26581}
26582#[doc = "Store multiple 3-element structures from three registers"]
26583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"]
26584#[doc = "## Safety"]
26585#[doc = "  * Neon intrinsic unsafe"]
26586#[inline(always)]
26587#[target_feature(enable = "neon")]
26588#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26589#[cfg_attr(test, assert_instr(nop))]
26590pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) {
26591    unsafe extern "unadjusted" {
26592        #[cfg_attr(
26593            any(target_arch = "aarch64", target_arch = "arm64ec"),
26594            link_name = "llvm.aarch64.neon.st3.v1f64.p0"
26595        )]
26596        fn _vst3_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, ptr: *mut i8);
26597    }
26598    _vst3_f64(b.0, b.1, b.2, a as _)
26599}
26600#[doc = "Store multiple 3-element structures from three registers"]
26601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"]
26602#[doc = "## Safety"]
26603#[doc = "  * Neon intrinsic unsafe"]
26604#[inline(always)]
26605#[target_feature(enable = "neon")]
26606#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26607#[rustc_legacy_const_generics(2)]
26608#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26609pub unsafe fn vst3_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x3_t) {
26610    static_assert!(LANE == 0);
26611    unsafe extern "unadjusted" {
26612        #[cfg_attr(
26613            any(target_arch = "aarch64", target_arch = "arm64ec"),
26614            link_name = "llvm.aarch64.neon.st3lane.v1f64.p0"
26615        )]
26616        fn _vst3_lane_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, n: i64, ptr: *mut i8);
26617    }
26618    _vst3_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
26619}
26620#[doc = "Store multiple 3-element structures from three registers"]
26621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"]
26622#[doc = "## Safety"]
26623#[doc = "  * Neon intrinsic unsafe"]
26624#[inline(always)]
26625#[target_feature(enable = "neon")]
26626#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26627#[rustc_legacy_const_generics(2)]
26628#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26629pub unsafe fn vst3_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x3_t) {
26630    static_assert!(LANE == 0);
26631    unsafe extern "unadjusted" {
26632        #[cfg_attr(
26633            any(target_arch = "aarch64", target_arch = "arm64ec"),
26634            link_name = "llvm.aarch64.neon.st3lane.v1i64.p0"
26635        )]
26636        fn _vst3_lane_s64(a: int64x1_t, b: int64x1_t, c: int64x1_t, n: i64, ptr: *mut i8);
26637    }
26638    _vst3_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
26639}
26640#[doc = "Store multiple 3-element structures from three registers"]
26641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"]
26642#[doc = "## Safety"]
26643#[doc = "  * Neon intrinsic unsafe"]
26644#[inline(always)]
26645#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26646#[target_feature(enable = "neon,aes")]
26647#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26648#[rustc_legacy_const_generics(2)]
26649pub unsafe fn vst3_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x3_t) {
26650    static_assert!(LANE == 0);
26651    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
26652}
26653#[doc = "Store multiple 3-element structures from three registers"]
26654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"]
26655#[doc = "## Safety"]
26656#[doc = "  * Neon intrinsic unsafe"]
26657#[inline(always)]
26658#[target_feature(enable = "neon")]
26659#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26660#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26661#[rustc_legacy_const_generics(2)]
26662pub unsafe fn vst3_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x3_t) {
26663    static_assert!(LANE == 0);
26664    vst3_lane_s64::<LANE>(transmute(a), transmute(b))
26665}
26666#[doc = "Store multiple 3-element structures from three registers"]
26667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"]
26668#[doc = "## Safety"]
26669#[doc = "  * Neon intrinsic unsafe"]
26670#[inline(always)]
26671#[target_feature(enable = "neon")]
26672#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26673#[cfg_attr(test, assert_instr(st3))]
26674pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) {
26675    unsafe extern "unadjusted" {
26676        #[cfg_attr(
26677            any(target_arch = "aarch64", target_arch = "arm64ec"),
26678            link_name = "llvm.aarch64.neon.st3.v2f64.p0"
26679        )]
26680        fn _vst3q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, ptr: *mut i8);
26681    }
26682    _vst3q_f64(b.0, b.1, b.2, a as _)
26683}
26684#[doc = "Store multiple 3-element structures from three registers"]
26685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"]
26686#[doc = "## Safety"]
26687#[doc = "  * Neon intrinsic unsafe"]
26688#[inline(always)]
26689#[target_feature(enable = "neon")]
26690#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26691#[cfg_attr(test, assert_instr(st3))]
26692pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) {
26693    unsafe extern "unadjusted" {
26694        #[cfg_attr(
26695            any(target_arch = "aarch64", target_arch = "arm64ec"),
26696            link_name = "llvm.aarch64.neon.st3.v2i64.p0"
26697        )]
26698        fn _vst3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, ptr: *mut i8);
26699    }
26700    _vst3q_s64(b.0, b.1, b.2, a as _)
26701}
26702#[doc = "Store multiple 3-element structures from three registers"]
26703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"]
26704#[doc = "## Safety"]
26705#[doc = "  * Neon intrinsic unsafe"]
26706#[inline(always)]
26707#[target_feature(enable = "neon")]
26708#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26709#[rustc_legacy_const_generics(2)]
26710#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26711pub unsafe fn vst3q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x3_t) {
26712    static_assert_uimm_bits!(LANE, 1);
26713    unsafe extern "unadjusted" {
26714        #[cfg_attr(
26715            any(target_arch = "aarch64", target_arch = "arm64ec"),
26716            link_name = "llvm.aarch64.neon.st3lane.v2f64.p0"
26717        )]
26718        fn _vst3q_lane_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, n: i64, ptr: *mut i8);
26719    }
26720    _vst3q_lane_f64(b.0, b.1, b.2, LANE as i64, a as _)
26721}
26722#[doc = "Store multiple 3-element structures from three registers"]
26723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"]
26724#[doc = "## Safety"]
26725#[doc = "  * Neon intrinsic unsafe"]
26726#[inline(always)]
26727#[target_feature(enable = "neon")]
26728#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26729#[rustc_legacy_const_generics(2)]
26730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26731pub unsafe fn vst3q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x3_t) {
26732    static_assert_uimm_bits!(LANE, 4);
26733    unsafe extern "unadjusted" {
26734        #[cfg_attr(
26735            any(target_arch = "aarch64", target_arch = "arm64ec"),
26736            link_name = "llvm.aarch64.neon.st3lane.v16i8.p0"
26737        )]
26738        fn _vst3q_lane_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t, n: i64, ptr: *mut i8);
26739    }
26740    _vst3q_lane_s8(b.0, b.1, b.2, LANE as i64, a as _)
26741}
26742#[doc = "Store multiple 3-element structures from three registers"]
26743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"]
26744#[doc = "## Safety"]
26745#[doc = "  * Neon intrinsic unsafe"]
26746#[inline(always)]
26747#[target_feature(enable = "neon")]
26748#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26749#[rustc_legacy_const_generics(2)]
26750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26751pub unsafe fn vst3q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x3_t) {
26752    static_assert_uimm_bits!(LANE, 1);
26753    unsafe extern "unadjusted" {
26754        #[cfg_attr(
26755            any(target_arch = "aarch64", target_arch = "arm64ec"),
26756            link_name = "llvm.aarch64.neon.st3lane.v2i64.p0"
26757        )]
26758        fn _vst3q_lane_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, n: i64, ptr: *mut i8);
26759    }
26760    _vst3q_lane_s64(b.0, b.1, b.2, LANE as i64, a as _)
26761}
26762#[doc = "Store multiple 3-element structures from three registers"]
26763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"]
26764#[doc = "## Safety"]
26765#[doc = "  * Neon intrinsic unsafe"]
26766#[inline(always)]
26767#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26768#[target_feature(enable = "neon,aes")]
26769#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26770#[rustc_legacy_const_generics(2)]
26771pub unsafe fn vst3q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x3_t) {
26772    static_assert_uimm_bits!(LANE, 1);
26773    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
26774}
26775#[doc = "Store multiple 3-element structures from three registers"]
26776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"]
26777#[doc = "## Safety"]
26778#[doc = "  * Neon intrinsic unsafe"]
26779#[inline(always)]
26780#[target_feature(enable = "neon")]
26781#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26782#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26783#[rustc_legacy_const_generics(2)]
26784pub unsafe fn vst3q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x3_t) {
26785    static_assert_uimm_bits!(LANE, 4);
26786    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
26787}
26788#[doc = "Store multiple 3-element structures from three registers"]
26789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"]
26790#[doc = "## Safety"]
26791#[doc = "  * Neon intrinsic unsafe"]
26792#[inline(always)]
26793#[target_feature(enable = "neon")]
26794#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26795#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26796#[rustc_legacy_const_generics(2)]
26797pub unsafe fn vst3q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x3_t) {
26798    static_assert_uimm_bits!(LANE, 1);
26799    vst3q_lane_s64::<LANE>(transmute(a), transmute(b))
26800}
26801#[doc = "Store multiple 3-element structures from three registers"]
26802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"]
26803#[doc = "## Safety"]
26804#[doc = "  * Neon intrinsic unsafe"]
26805#[inline(always)]
26806#[target_feature(enable = "neon")]
26807#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26808#[cfg_attr(test, assert_instr(st3, LANE = 0))]
26809#[rustc_legacy_const_generics(2)]
26810pub unsafe fn vst3q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x3_t) {
26811    static_assert_uimm_bits!(LANE, 4);
26812    vst3q_lane_s8::<LANE>(transmute(a), transmute(b))
26813}
26814#[doc = "Store multiple 3-element structures from three registers"]
26815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"]
26816#[doc = "## Safety"]
26817#[doc = "  * Neon intrinsic unsafe"]
26818#[inline(always)]
26819#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26820#[target_feature(enable = "neon,aes")]
26821#[cfg_attr(test, assert_instr(st3))]
26822pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) {
26823    vst3q_s64(transmute(a), transmute(b))
26824}
26825#[doc = "Store multiple 3-element structures from three registers"]
26826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"]
26827#[doc = "## Safety"]
26828#[doc = "  * Neon intrinsic unsafe"]
26829#[inline(always)]
26830#[target_feature(enable = "neon")]
26831#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26832#[cfg_attr(test, assert_instr(st3))]
26833pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) {
26834    vst3q_s64(transmute(a), transmute(b))
26835}
26836#[doc = "Store multiple 4-element structures from four registers"]
26837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"]
26838#[doc = "## Safety"]
26839#[doc = "  * Neon intrinsic unsafe"]
26840#[inline(always)]
26841#[target_feature(enable = "neon")]
26842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26843#[cfg_attr(test, assert_instr(nop))]
26844pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) {
26845    unsafe extern "unadjusted" {
26846        #[cfg_attr(
26847            any(target_arch = "aarch64", target_arch = "arm64ec"),
26848            link_name = "llvm.aarch64.neon.st4.v1f64.p0"
26849        )]
26850        fn _vst4_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t, d: float64x1_t, ptr: *mut i8);
26851    }
26852    _vst4_f64(b.0, b.1, b.2, b.3, a as _)
26853}
26854#[doc = "Store multiple 4-element structures from four registers"]
26855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"]
26856#[doc = "## Safety"]
26857#[doc = "  * Neon intrinsic unsafe"]
26858#[inline(always)]
26859#[target_feature(enable = "neon")]
26860#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26861#[rustc_legacy_const_generics(2)]
26862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26863pub unsafe fn vst4_lane_f64<const LANE: i32>(a: *mut f64, b: float64x1x4_t) {
26864    static_assert!(LANE == 0);
26865    unsafe extern "unadjusted" {
26866        #[cfg_attr(
26867            any(target_arch = "aarch64", target_arch = "arm64ec"),
26868            link_name = "llvm.aarch64.neon.st4lane.v1f64.p0"
26869        )]
26870        fn _vst4_lane_f64(
26871            a: float64x1_t,
26872            b: float64x1_t,
26873            c: float64x1_t,
26874            d: float64x1_t,
26875            n: i64,
26876            ptr: *mut i8,
26877        );
26878    }
26879    _vst4_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26880}
26881#[doc = "Store multiple 4-element structures from four registers"]
26882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"]
26883#[doc = "## Safety"]
26884#[doc = "  * Neon intrinsic unsafe"]
26885#[inline(always)]
26886#[target_feature(enable = "neon")]
26887#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26888#[rustc_legacy_const_generics(2)]
26889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26890pub unsafe fn vst4_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1x4_t) {
26891    static_assert!(LANE == 0);
26892    unsafe extern "unadjusted" {
26893        #[cfg_attr(
26894            any(target_arch = "aarch64", target_arch = "arm64ec"),
26895            link_name = "llvm.aarch64.neon.st4lane.v1i64.p0"
26896        )]
26897        fn _vst4_lane_s64(
26898            a: int64x1_t,
26899            b: int64x1_t,
26900            c: int64x1_t,
26901            d: int64x1_t,
26902            n: i64,
26903            ptr: *mut i8,
26904        );
26905    }
26906    _vst4_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26907}
26908#[doc = "Store multiple 4-element structures from four registers"]
26909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"]
26910#[doc = "## Safety"]
26911#[doc = "  * Neon intrinsic unsafe"]
26912#[inline(always)]
26913#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26914#[target_feature(enable = "neon,aes")]
26915#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26916#[rustc_legacy_const_generics(2)]
26917pub unsafe fn vst4_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1x4_t) {
26918    static_assert!(LANE == 0);
26919    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
26920}
26921#[doc = "Store multiple 4-element structures from four registers"]
26922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"]
26923#[doc = "## Safety"]
26924#[doc = "  * Neon intrinsic unsafe"]
26925#[inline(always)]
26926#[target_feature(enable = "neon")]
26927#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26928#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26929#[rustc_legacy_const_generics(2)]
26930pub unsafe fn vst4_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1x4_t) {
26931    static_assert!(LANE == 0);
26932    vst4_lane_s64::<LANE>(transmute(a), transmute(b))
26933}
26934#[doc = "Store multiple 4-element structures from four registers"]
26935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"]
26936#[doc = "## Safety"]
26937#[doc = "  * Neon intrinsic unsafe"]
26938#[inline(always)]
26939#[target_feature(enable = "neon")]
26940#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26941#[cfg_attr(test, assert_instr(st4))]
26942pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) {
26943    unsafe extern "unadjusted" {
26944        #[cfg_attr(
26945            any(target_arch = "aarch64", target_arch = "arm64ec"),
26946            link_name = "llvm.aarch64.neon.st4.v2f64.p0"
26947        )]
26948        fn _vst4q_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t, d: float64x2_t, ptr: *mut i8);
26949    }
26950    _vst4q_f64(b.0, b.1, b.2, b.3, a as _)
26951}
26952#[doc = "Store multiple 4-element structures from four registers"]
26953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"]
26954#[doc = "## Safety"]
26955#[doc = "  * Neon intrinsic unsafe"]
26956#[inline(always)]
26957#[target_feature(enable = "neon")]
26958#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26959#[cfg_attr(test, assert_instr(st4))]
26960pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) {
26961    unsafe extern "unadjusted" {
26962        #[cfg_attr(
26963            any(target_arch = "aarch64", target_arch = "arm64ec"),
26964            link_name = "llvm.aarch64.neon.st4.v2i64.p0"
26965        )]
26966        fn _vst4q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t, d: int64x2_t, ptr: *mut i8);
26967    }
26968    _vst4q_s64(b.0, b.1, b.2, b.3, a as _)
26969}
26970#[doc = "Store multiple 4-element structures from four registers"]
26971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"]
26972#[doc = "## Safety"]
26973#[doc = "  * Neon intrinsic unsafe"]
26974#[inline(always)]
26975#[target_feature(enable = "neon")]
26976#[cfg_attr(test, assert_instr(st4, LANE = 0))]
26977#[rustc_legacy_const_generics(2)]
26978#[stable(feature = "neon_intrinsics", since = "1.59.0")]
26979pub unsafe fn vst4q_lane_f64<const LANE: i32>(a: *mut f64, b: float64x2x4_t) {
26980    static_assert_uimm_bits!(LANE, 1);
26981    unsafe extern "unadjusted" {
26982        #[cfg_attr(
26983            any(target_arch = "aarch64", target_arch = "arm64ec"),
26984            link_name = "llvm.aarch64.neon.st4lane.v2f64.p0"
26985        )]
26986        fn _vst4q_lane_f64(
26987            a: float64x2_t,
26988            b: float64x2_t,
26989            c: float64x2_t,
26990            d: float64x2_t,
26991            n: i64,
26992            ptr: *mut i8,
26993        );
26994    }
26995    _vst4q_lane_f64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
26996}
26997#[doc = "Store multiple 4-element structures from four registers"]
26998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"]
26999#[doc = "## Safety"]
27000#[doc = "  * Neon intrinsic unsafe"]
27001#[inline(always)]
27002#[target_feature(enable = "neon")]
27003#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27004#[rustc_legacy_const_generics(2)]
27005#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27006pub unsafe fn vst4q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16x4_t) {
27007    static_assert_uimm_bits!(LANE, 4);
27008    unsafe extern "unadjusted" {
27009        #[cfg_attr(
27010            any(target_arch = "aarch64", target_arch = "arm64ec"),
27011            link_name = "llvm.aarch64.neon.st4lane.v16i8.p0"
27012        )]
27013        fn _vst4q_lane_s8(
27014            a: int8x16_t,
27015            b: int8x16_t,
27016            c: int8x16_t,
27017            d: int8x16_t,
27018            n: i64,
27019            ptr: *mut i8,
27020        );
27021    }
27022    _vst4q_lane_s8(b.0, b.1, b.2, b.3, LANE as i64, a as _)
27023}
27024#[doc = "Store multiple 4-element structures from four registers"]
27025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"]
27026#[doc = "## Safety"]
27027#[doc = "  * Neon intrinsic unsafe"]
27028#[inline(always)]
27029#[target_feature(enable = "neon")]
27030#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27031#[rustc_legacy_const_generics(2)]
27032#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27033pub unsafe fn vst4q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2x4_t) {
27034    static_assert_uimm_bits!(LANE, 1);
27035    unsafe extern "unadjusted" {
27036        #[cfg_attr(
27037            any(target_arch = "aarch64", target_arch = "arm64ec"),
27038            link_name = "llvm.aarch64.neon.st4lane.v2i64.p0"
27039        )]
27040        fn _vst4q_lane_s64(
27041            a: int64x2_t,
27042            b: int64x2_t,
27043            c: int64x2_t,
27044            d: int64x2_t,
27045            n: i64,
27046            ptr: *mut i8,
27047        );
27048    }
27049    _vst4q_lane_s64(b.0, b.1, b.2, b.3, LANE as i64, a as _)
27050}
27051#[doc = "Store multiple 4-element structures from four registers"]
27052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"]
27053#[doc = "## Safety"]
27054#[doc = "  * Neon intrinsic unsafe"]
27055#[inline(always)]
27056#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27057#[target_feature(enable = "neon,aes")]
27058#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27059#[rustc_legacy_const_generics(2)]
27060pub unsafe fn vst4q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2x4_t) {
27061    static_assert_uimm_bits!(LANE, 1);
27062    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
27063}
27064#[doc = "Store multiple 4-element structures from four registers"]
27065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"]
27066#[doc = "## Safety"]
27067#[doc = "  * Neon intrinsic unsafe"]
27068#[inline(always)]
27069#[target_feature(enable = "neon")]
27070#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27071#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27072#[rustc_legacy_const_generics(2)]
27073pub unsafe fn vst4q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16x4_t) {
27074    static_assert_uimm_bits!(LANE, 4);
27075    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
27076}
27077#[doc = "Store multiple 4-element structures from four registers"]
27078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"]
27079#[doc = "## Safety"]
27080#[doc = "  * Neon intrinsic unsafe"]
27081#[inline(always)]
27082#[target_feature(enable = "neon")]
27083#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27084#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27085#[rustc_legacy_const_generics(2)]
27086pub unsafe fn vst4q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2x4_t) {
27087    static_assert_uimm_bits!(LANE, 1);
27088    vst4q_lane_s64::<LANE>(transmute(a), transmute(b))
27089}
27090#[doc = "Store multiple 4-element structures from four registers"]
27091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"]
27092#[doc = "## Safety"]
27093#[doc = "  * Neon intrinsic unsafe"]
27094#[inline(always)]
27095#[target_feature(enable = "neon")]
27096#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27097#[cfg_attr(test, assert_instr(st4, LANE = 0))]
27098#[rustc_legacy_const_generics(2)]
27099pub unsafe fn vst4q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16x4_t) {
27100    static_assert_uimm_bits!(LANE, 4);
27101    vst4q_lane_s8::<LANE>(transmute(a), transmute(b))
27102}
27103#[doc = "Store multiple 4-element structures from four registers"]
27104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"]
27105#[doc = "## Safety"]
27106#[doc = "  * Neon intrinsic unsafe"]
27107#[inline(always)]
27108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27109#[target_feature(enable = "neon,aes")]
27110#[cfg_attr(test, assert_instr(st4))]
27111pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) {
27112    vst4q_s64(transmute(a), transmute(b))
27113}
27114#[doc = "Store multiple 4-element structures from four registers"]
27115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"]
27116#[doc = "## Safety"]
27117#[doc = "  * Neon intrinsic unsafe"]
27118#[inline(always)]
27119#[target_feature(enable = "neon")]
27120#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27121#[cfg_attr(test, assert_instr(st4))]
27122pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) {
27123    vst4q_s64(transmute(a), transmute(b))
27124}
27125#[doc = "Store-Release a single-element structure from one lane of one register."]
27126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_f64)"]
27127#[inline(always)]
27128#[target_feature(enable = "neon,rcpc3")]
27129#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27130#[rustc_legacy_const_generics(2)]
27131#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27132#[cfg(target_has_atomic = "64")]
27133pub fn vstl1_lane_f64<const LANE: i32>(ptr: *mut f64, val: float64x1_t) {
27134    static_assert!(LANE == 0);
27135    unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
27136}
27137#[doc = "Store-Release a single-element structure from one lane of one register."]
27138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_f64)"]
27139#[inline(always)]
27140#[target_feature(enable = "neon,rcpc3")]
27141#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27142#[rustc_legacy_const_generics(2)]
27143#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27144#[cfg(target_has_atomic = "64")]
27145pub fn vstl1q_lane_f64<const LANE: i32>(ptr: *mut f64, val: float64x2_t) {
27146    static_assert_uimm_bits!(LANE, 1);
27147    unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
27148}
27149#[doc = "Store-Release a single-element structure from one lane of one register."]
27150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_u64)"]
27151#[inline(always)]
27152#[target_feature(enable = "neon,rcpc3")]
27153#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27154#[rustc_legacy_const_generics(2)]
27155#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27156#[cfg(target_has_atomic = "64")]
27157pub fn vstl1_lane_u64<const LANE: i32>(ptr: *mut u64, val: uint64x1_t) {
27158    static_assert!(LANE == 0);
27159    unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
27160}
27161#[doc = "Store-Release a single-element structure from one lane of one register."]
27162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_u64)"]
27163#[inline(always)]
27164#[target_feature(enable = "neon,rcpc3")]
27165#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27166#[rustc_legacy_const_generics(2)]
27167#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27168#[cfg(target_has_atomic = "64")]
27169pub fn vstl1q_lane_u64<const LANE: i32>(ptr: *mut u64, val: uint64x2_t) {
27170    static_assert_uimm_bits!(LANE, 1);
27171    unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
27172}
27173#[doc = "Store-Release a single-element structure from one lane of one register."]
27174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_p64)"]
27175#[inline(always)]
27176#[target_feature(enable = "neon,rcpc3")]
27177#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27178#[rustc_legacy_const_generics(2)]
27179#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27180#[cfg(target_has_atomic = "64")]
27181pub fn vstl1_lane_p64<const LANE: i32>(ptr: *mut p64, val: poly64x1_t) {
27182    static_assert!(LANE == 0);
27183    unsafe { vstl1_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
27184}
27185#[doc = "Store-Release a single-element structure from one lane of one register."]
27186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_p64)"]
27187#[inline(always)]
27188#[target_feature(enable = "neon,rcpc3")]
27189#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27190#[rustc_legacy_const_generics(2)]
27191#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27192#[cfg(target_has_atomic = "64")]
27193pub fn vstl1q_lane_p64<const LANE: i32>(ptr: *mut p64, val: poly64x2_t) {
27194    static_assert_uimm_bits!(LANE, 1);
27195    unsafe { vstl1q_lane_s64::<LANE>(ptr as *mut i64, transmute(val)) }
27196}
27197#[doc = "Store-Release a single-element structure from one lane of one register."]
27198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_s64)"]
27199#[inline(always)]
27200#[target_feature(enable = "neon,rcpc3")]
27201#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27202#[rustc_legacy_const_generics(2)]
27203#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27204#[cfg(target_has_atomic = "64")]
27205pub fn vstl1_lane_s64<const LANE: i32>(ptr: *mut i64, val: int64x1_t) {
27206    static_assert!(LANE == 0);
27207    let atomic_dst = ptr as *mut crate::sync::atomic::AtomicI64;
27208    unsafe {
27209        let lane: i64 = simd_extract!(val, LANE as u32);
27210        (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release)
27211    }
27212}
27213#[doc = "Store-Release a single-element structure from one lane of one register."]
27214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_s64)"]
27215#[inline(always)]
27216#[target_feature(enable = "neon,rcpc3")]
27217#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))]
27218#[rustc_legacy_const_generics(2)]
27219#[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")]
27220#[cfg(target_has_atomic = "64")]
27221pub fn vstl1q_lane_s64<const LANE: i32>(ptr: *mut i64, val: int64x2_t) {
27222    static_assert_uimm_bits!(LANE, 1);
27223    let atomic_dst = ptr as *mut crate::sync::atomic::AtomicI64;
27224    unsafe {
27225        let lane: i64 = simd_extract!(val, LANE as u32);
27226        (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release)
27227    }
27228}
27229#[doc = "Subtract"]
27230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"]
27231#[inline(always)]
27232#[target_feature(enable = "neon")]
27233#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27234#[cfg_attr(test, assert_instr(fsub))]
27235pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t {
27236    unsafe { simd_sub(a, b) }
27237}
27238#[doc = "Subtract"]
27239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"]
27240#[inline(always)]
27241#[target_feature(enable = "neon")]
27242#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27243#[cfg_attr(test, assert_instr(fsub))]
27244pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
27245    unsafe { simd_sub(a, b) }
27246}
27247#[doc = "Subtract"]
27248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"]
27249#[inline(always)]
27250#[target_feature(enable = "neon")]
27251#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27252#[cfg_attr(test, assert_instr(sub))]
27253pub fn vsubd_s64(a: i64, b: i64) -> i64 {
27254    a.wrapping_sub(b)
27255}
27256#[doc = "Subtract"]
27257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"]
27258#[inline(always)]
27259#[target_feature(enable = "neon")]
27260#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27261#[cfg_attr(test, assert_instr(sub))]
27262pub fn vsubd_u64(a: u64, b: u64) -> u64 {
27263    a.wrapping_sub(b)
27264}
27265#[doc = "Subtract"]
27266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"]
27267#[inline(always)]
27268#[target_feature(enable = "neon,fp16")]
27269#[unstable(feature = "stdarch_neon_f16", issue = "136306")]
27270#[cfg(not(target_arch = "arm64ec"))]
27271#[cfg_attr(test, assert_instr(fsub))]
27272pub fn vsubh_f16(a: f16, b: f16) -> f16 {
27273    a - b
27274}
27275#[doc = "Signed Subtract Long"]
27276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"]
27277#[inline(always)]
27278#[target_feature(enable = "neon")]
27279#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27280#[cfg_attr(test, assert_instr(ssubl2))]
27281pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t {
27282    unsafe {
27283        let c: int8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
27284        let d: int16x8_t = simd_cast(c);
27285        let e: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27286        let f: int16x8_t = simd_cast(e);
27287        simd_sub(d, f)
27288    }
27289}
27290#[doc = "Signed Subtract Long"]
27291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"]
27292#[inline(always)]
27293#[target_feature(enable = "neon")]
27294#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27295#[cfg_attr(test, assert_instr(ssubl2))]
27296pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t {
27297    unsafe {
27298        let c: int16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
27299        let d: int32x4_t = simd_cast(c);
27300        let e: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27301        let f: int32x4_t = simd_cast(e);
27302        simd_sub(d, f)
27303    }
27304}
27305#[doc = "Signed Subtract Long"]
27306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"]
27307#[inline(always)]
27308#[target_feature(enable = "neon")]
27309#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27310#[cfg_attr(test, assert_instr(ssubl2))]
27311pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t {
27312    unsafe {
27313        let c: int32x2_t = simd_shuffle!(a, a, [2, 3]);
27314        let d: int64x2_t = simd_cast(c);
27315        let e: int32x2_t = simd_shuffle!(b, b, [2, 3]);
27316        let f: int64x2_t = simd_cast(e);
27317        simd_sub(d, f)
27318    }
27319}
27320#[doc = "Unsigned Subtract Long"]
27321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"]
27322#[inline(always)]
27323#[target_feature(enable = "neon")]
27324#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27325#[cfg_attr(test, assert_instr(usubl2))]
27326pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t {
27327    unsafe {
27328        let c: uint8x8_t = simd_shuffle!(a, a, [8, 9, 10, 11, 12, 13, 14, 15]);
27329        let d: uint16x8_t = simd_cast(c);
27330        let e: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27331        let f: uint16x8_t = simd_cast(e);
27332        simd_sub(d, f)
27333    }
27334}
27335#[doc = "Unsigned Subtract Long"]
27336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"]
27337#[inline(always)]
27338#[target_feature(enable = "neon")]
27339#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27340#[cfg_attr(test, assert_instr(usubl2))]
27341pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t {
27342    unsafe {
27343        let c: uint16x4_t = simd_shuffle!(a, a, [4, 5, 6, 7]);
27344        let d: uint32x4_t = simd_cast(c);
27345        let e: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27346        let f: uint32x4_t = simd_cast(e);
27347        simd_sub(d, f)
27348    }
27349}
27350#[doc = "Unsigned Subtract Long"]
27351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"]
27352#[inline(always)]
27353#[target_feature(enable = "neon")]
27354#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27355#[cfg_attr(test, assert_instr(usubl2))]
27356pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t {
27357    unsafe {
27358        let c: uint32x2_t = simd_shuffle!(a, a, [2, 3]);
27359        let d: uint64x2_t = simd_cast(c);
27360        let e: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
27361        let f: uint64x2_t = simd_cast(e);
27362        simd_sub(d, f)
27363    }
27364}
27365#[doc = "Signed Subtract Wide"]
27366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"]
27367#[inline(always)]
27368#[target_feature(enable = "neon")]
27369#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27370#[cfg_attr(test, assert_instr(ssubw2))]
27371pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t {
27372    unsafe {
27373        let c: int8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27374        simd_sub(a, simd_cast(c))
27375    }
27376}
27377#[doc = "Signed Subtract Wide"]
27378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"]
27379#[inline(always)]
27380#[target_feature(enable = "neon")]
27381#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27382#[cfg_attr(test, assert_instr(ssubw2))]
27383pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t {
27384    unsafe {
27385        let c: int16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27386        simd_sub(a, simd_cast(c))
27387    }
27388}
27389#[doc = "Signed Subtract Wide"]
27390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"]
27391#[inline(always)]
27392#[target_feature(enable = "neon")]
27393#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27394#[cfg_attr(test, assert_instr(ssubw2))]
27395pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t {
27396    unsafe {
27397        let c: int32x2_t = simd_shuffle!(b, b, [2, 3]);
27398        simd_sub(a, simd_cast(c))
27399    }
27400}
27401#[doc = "Unsigned Subtract Wide"]
27402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"]
27403#[inline(always)]
27404#[target_feature(enable = "neon")]
27405#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27406#[cfg_attr(test, assert_instr(usubw2))]
27407pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t {
27408    unsafe {
27409        let c: uint8x8_t = simd_shuffle!(b, b, [8, 9, 10, 11, 12, 13, 14, 15]);
27410        simd_sub(a, simd_cast(c))
27411    }
27412}
27413#[doc = "Unsigned Subtract Wide"]
27414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"]
27415#[inline(always)]
27416#[target_feature(enable = "neon")]
27417#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27418#[cfg_attr(test, assert_instr(usubw2))]
27419pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t {
27420    unsafe {
27421        let c: uint16x4_t = simd_shuffle!(b, b, [4, 5, 6, 7]);
27422        simd_sub(a, simd_cast(c))
27423    }
27424}
27425#[doc = "Unsigned Subtract Wide"]
27426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"]
27427#[inline(always)]
27428#[target_feature(enable = "neon")]
27429#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27430#[cfg_attr(test, assert_instr(usubw2))]
27431pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t {
27432    unsafe {
27433        let c: uint32x2_t = simd_shuffle!(b, b, [2, 3]);
27434        simd_sub(a, simd_cast(c))
27435    }
27436}
27437#[doc = "Table look-up"]
27438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"]
27439#[inline(always)]
27440#[target_feature(enable = "neon")]
27441#[cfg_attr(test, assert_instr(tbl))]
27442#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27443pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
27444    vqtbl1_s8(vcombine_s8(a, unsafe { crate::mem::zeroed() }), unsafe {
27445        {
27446            transmute(b)
27447        }
27448    })
27449}
27450#[doc = "Table look-up"]
27451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"]
27452#[inline(always)]
27453#[target_feature(enable = "neon")]
27454#[cfg_attr(test, assert_instr(tbl))]
27455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27456pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
27457    vqtbl1_u8(vcombine_u8(a, unsafe { crate::mem::zeroed() }), b)
27458}
27459#[doc = "Table look-up"]
27460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"]
27461#[inline(always)]
27462#[target_feature(enable = "neon")]
27463#[cfg_attr(test, assert_instr(tbl))]
27464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27465pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t {
27466    vqtbl1_p8(vcombine_p8(a, unsafe { crate::mem::zeroed() }), b)
27467}
27468#[doc = "Table look-up"]
27469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"]
27470#[inline(always)]
27471#[target_feature(enable = "neon")]
27472#[cfg_attr(test, assert_instr(tbl))]
27473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27474pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t {
27475    unsafe { vqtbl1(transmute(vcombine_s8(a.0, a.1)), transmute(b)) }
27476}
27477#[doc = "Table look-up"]
27478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
27479#[inline(always)]
27480#[cfg(target_endian = "little")]
27481#[target_feature(enable = "neon")]
27482#[cfg_attr(test, assert_instr(tbl))]
27483#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27484pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
27485    unsafe { transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b)) }
27486}
27487#[doc = "Table look-up"]
27488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"]
27489#[inline(always)]
27490#[cfg(target_endian = "big")]
27491#[target_feature(enable = "neon")]
27492#[cfg_attr(test, assert_instr(tbl))]
27493#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27494pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t {
27495    let mut a: uint8x8x2_t = a;
27496    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27497    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27498    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27499    unsafe {
27500        let ret_val: uint8x8_t = transmute(vqtbl1(transmute(vcombine_u8(a.0, a.1)), b));
27501        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27502    }
27503}
27504#[doc = "Table look-up"]
27505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
27506#[inline(always)]
27507#[cfg(target_endian = "little")]
27508#[target_feature(enable = "neon")]
27509#[cfg_attr(test, assert_instr(tbl))]
27510#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27511pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
27512    unsafe { transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b)) }
27513}
27514#[doc = "Table look-up"]
27515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"]
27516#[inline(always)]
27517#[cfg(target_endian = "big")]
27518#[target_feature(enable = "neon")]
27519#[cfg_attr(test, assert_instr(tbl))]
27520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27521pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t {
27522    let mut a: poly8x8x2_t = a;
27523    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27524    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27525    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27526    unsafe {
27527        let ret_val: poly8x8_t = transmute(vqtbl1(transmute(vcombine_p8(a.0, a.1)), b));
27528        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27529    }
27530}
27531#[doc = "Table look-up"]
27532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"]
27533#[inline(always)]
27534#[target_feature(enable = "neon")]
27535#[cfg_attr(test, assert_instr(tbl))]
27536#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27537pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t {
27538    let x = int8x16x2_t(
27539        vcombine_s8(a.0, a.1),
27540        vcombine_s8(a.2, unsafe { crate::mem::zeroed() }),
27541    );
27542    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
27543}
27544#[doc = "Table look-up"]
27545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
27546#[inline(always)]
27547#[cfg(target_endian = "little")]
27548#[target_feature(enable = "neon")]
27549#[cfg_attr(test, assert_instr(tbl))]
27550#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27551pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
27552    let x = uint8x16x2_t(
27553        vcombine_u8(a.0, a.1),
27554        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
27555    );
27556    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27557}
27558#[doc = "Table look-up"]
27559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"]
27560#[inline(always)]
27561#[cfg(target_endian = "big")]
27562#[target_feature(enable = "neon")]
27563#[cfg_attr(test, assert_instr(tbl))]
27564#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27565pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t {
27566    let mut a: uint8x8x3_t = a;
27567    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27568    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27569    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27570    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27571    let x = uint8x16x2_t(
27572        vcombine_u8(a.0, a.1),
27573        vcombine_u8(a.2, unsafe { crate::mem::zeroed() }),
27574    );
27575    unsafe {
27576        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27577        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27578    }
27579}
27580#[doc = "Table look-up"]
27581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
27582#[inline(always)]
27583#[cfg(target_endian = "little")]
27584#[target_feature(enable = "neon")]
27585#[cfg_attr(test, assert_instr(tbl))]
27586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27587pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
27588    let x = poly8x16x2_t(
27589        vcombine_p8(a.0, a.1),
27590        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
27591    );
27592    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27593}
27594#[doc = "Table look-up"]
27595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"]
27596#[inline(always)]
27597#[cfg(target_endian = "big")]
27598#[target_feature(enable = "neon")]
27599#[cfg_attr(test, assert_instr(tbl))]
27600#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27601pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t {
27602    let mut a: poly8x8x3_t = a;
27603    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27604    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27605    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27606    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27607    let x = poly8x16x2_t(
27608        vcombine_p8(a.0, a.1),
27609        vcombine_p8(a.2, unsafe { crate::mem::zeroed() }),
27610    );
27611    unsafe {
27612        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27613        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27614    }
27615}
27616#[doc = "Table look-up"]
27617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"]
27618#[inline(always)]
27619#[target_feature(enable = "neon")]
27620#[cfg_attr(test, assert_instr(tbl))]
27621#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27622pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t {
27623    let x = int8x16x2_t(vcombine_s8(a.0, a.1), vcombine_s8(a.2, a.3));
27624    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), transmute(b))) }
27625}
27626#[doc = "Table look-up"]
27627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
27628#[inline(always)]
27629#[cfg(target_endian = "little")]
27630#[target_feature(enable = "neon")]
27631#[cfg_attr(test, assert_instr(tbl))]
27632#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27633pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
27634    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
27635    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27636}
27637#[doc = "Table look-up"]
27638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"]
27639#[inline(always)]
27640#[cfg(target_endian = "big")]
27641#[target_feature(enable = "neon")]
27642#[cfg_attr(test, assert_instr(tbl))]
27643#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27644pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t {
27645    let mut a: uint8x8x4_t = a;
27646    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27647    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27648    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27649    a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27650    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27651    let x = uint8x16x2_t(vcombine_u8(a.0, a.1), vcombine_u8(a.2, a.3));
27652    unsafe {
27653        let ret_val: uint8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27654        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27655    }
27656}
27657#[doc = "Table look-up"]
27658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
27659#[inline(always)]
27660#[cfg(target_endian = "little")]
27661#[target_feature(enable = "neon")]
27662#[cfg_attr(test, assert_instr(tbl))]
27663#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27664pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
27665    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
27666    unsafe { transmute(vqtbl2(transmute(x.0), transmute(x.1), b)) }
27667}
27668#[doc = "Table look-up"]
27669#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"]
27670#[inline(always)]
27671#[cfg(target_endian = "big")]
27672#[target_feature(enable = "neon")]
27673#[cfg_attr(test, assert_instr(tbl))]
27674#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27675pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t {
27676    let mut a: poly8x8x4_t = a;
27677    a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27678    a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27679    a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27680    a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27681    let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) };
27682    let x = poly8x16x2_t(vcombine_p8(a.0, a.1), vcombine_p8(a.2, a.3));
27683    unsafe {
27684        let ret_val: poly8x8_t = transmute(vqtbl2(transmute(x.0), transmute(x.1), b));
27685        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27686    }
27687}
27688#[doc = "Extended table look-up"]
27689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"]
27690#[inline(always)]
27691#[target_feature(enable = "neon")]
27692#[cfg_attr(test, assert_instr(tbx))]
27693#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27694pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t {
27695    unsafe {
27696        simd_select(
27697            simd_lt::<int8x8_t, int8x8_t>(c, transmute(i8x8::splat(8))),
27698            transmute(vqtbx1(
27699                transmute(a),
27700                transmute(vcombine_s8(b, crate::mem::zeroed())),
27701                transmute(c),
27702            )),
27703            a,
27704        )
27705    }
27706}
27707#[doc = "Extended table look-up"]
27708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"]
27709#[inline(always)]
27710#[target_feature(enable = "neon")]
27711#[cfg_attr(test, assert_instr(tbx))]
27712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27713pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t {
27714    unsafe {
27715        simd_select(
27716            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
27717            transmute(vqtbx1(
27718                transmute(a),
27719                transmute(vcombine_u8(b, crate::mem::zeroed())),
27720                c,
27721            )),
27722            a,
27723        )
27724    }
27725}
27726#[doc = "Extended table look-up"]
27727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"]
27728#[inline(always)]
27729#[target_feature(enable = "neon")]
27730#[cfg_attr(test, assert_instr(tbx))]
27731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27732pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t {
27733    unsafe {
27734        simd_select(
27735            simd_lt::<uint8x8_t, int8x8_t>(c, transmute(u8x8::splat(8))),
27736            transmute(vqtbx1(
27737                transmute(a),
27738                transmute(vcombine_p8(b, crate::mem::zeroed())),
27739                c,
27740            )),
27741            a,
27742        )
27743    }
27744}
27745#[doc = "Extended table look-up"]
27746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"]
27747#[inline(always)]
27748#[target_feature(enable = "neon")]
27749#[cfg_attr(test, assert_instr(tbx))]
27750#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27751pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t {
27752    unsafe { vqtbx1(transmute(a), transmute(vcombine_s8(b.0, b.1)), transmute(c)) }
27753}
27754#[doc = "Extended table look-up"]
27755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
27756#[inline(always)]
27757#[cfg(target_endian = "little")]
27758#[target_feature(enable = "neon")]
27759#[cfg_attr(test, assert_instr(tbx))]
27760#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27761pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
27762    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c)) }
27763}
27764#[doc = "Extended table look-up"]
27765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"]
27766#[inline(always)]
27767#[cfg(target_endian = "big")]
27768#[target_feature(enable = "neon")]
27769#[cfg_attr(test, assert_instr(tbx))]
27770#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27771pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t {
27772    let mut b: uint8x8x2_t = b;
27773    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27774    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27775    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27776    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27777    unsafe {
27778        let ret_val: uint8x8_t =
27779            transmute(vqtbx1(transmute(a), transmute(vcombine_u8(b.0, b.1)), c));
27780        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27781    }
27782}
27783#[doc = "Extended table look-up"]
27784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
27785#[inline(always)]
27786#[cfg(target_endian = "little")]
27787#[target_feature(enable = "neon")]
27788#[cfg_attr(test, assert_instr(tbx))]
27789#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27790pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
27791    unsafe { transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c)) }
27792}
27793#[doc = "Extended table look-up"]
27794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"]
27795#[inline(always)]
27796#[cfg(target_endian = "big")]
27797#[target_feature(enable = "neon")]
27798#[cfg_attr(test, assert_instr(tbx))]
27799#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27800pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t {
27801    let mut b: poly8x8x2_t = b;
27802    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27803    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27804    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27805    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27806    unsafe {
27807        let ret_val: poly8x8_t =
27808            transmute(vqtbx1(transmute(a), transmute(vcombine_p8(b.0, b.1)), c));
27809        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27810    }
27811}
27812#[doc = "Extended table look-up"]
27813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"]
27814#[inline(always)]
27815#[target_feature(enable = "neon")]
27816#[cfg_attr(test, assert_instr(tbx))]
27817#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27818pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t {
27819    let x = int8x16x2_t(
27820        vcombine_s8(b.0, b.1),
27821        vcombine_s8(b.2, unsafe { crate::mem::zeroed() }),
27822    );
27823    unsafe {
27824        transmute(simd_select(
27825            simd_lt::<int8x8_t, int8x8_t>(transmute(c), transmute(i8x8::splat(24))),
27826            transmute(vqtbx2(
27827                transmute(a),
27828                transmute(x.0),
27829                transmute(x.1),
27830                transmute(c),
27831            )),
27832            a,
27833        ))
27834    }
27835}
27836#[doc = "Extended table look-up"]
27837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
27838#[inline(always)]
27839#[cfg(target_endian = "little")]
27840#[target_feature(enable = "neon")]
27841#[cfg_attr(test, assert_instr(tbx))]
27842#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27843pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
27844    let x = uint8x16x2_t(
27845        vcombine_u8(b.0, b.1),
27846        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
27847    );
27848    unsafe {
27849        transmute(simd_select(
27850            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27851            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27852            a,
27853        ))
27854    }
27855}
27856#[doc = "Extended table look-up"]
27857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"]
27858#[inline(always)]
27859#[cfg(target_endian = "big")]
27860#[target_feature(enable = "neon")]
27861#[cfg_attr(test, assert_instr(tbx))]
27862#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27863pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t {
27864    let mut b: uint8x8x3_t = b;
27865    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27866    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27867    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27868    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27869    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27870    let x = uint8x16x2_t(
27871        vcombine_u8(b.0, b.1),
27872        vcombine_u8(b.2, unsafe { crate::mem::zeroed() }),
27873    );
27874    unsafe {
27875        let ret_val: uint8x8_t = transmute(simd_select(
27876            simd_lt::<uint8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27877            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27878            a,
27879        ));
27880        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27881    }
27882}
27883#[doc = "Extended table look-up"]
27884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
27885#[inline(always)]
27886#[cfg(target_endian = "little")]
27887#[target_feature(enable = "neon")]
27888#[cfg_attr(test, assert_instr(tbx))]
27889#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27890pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
27891    let x = poly8x16x2_t(
27892        vcombine_p8(b.0, b.1),
27893        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
27894    );
27895    unsafe {
27896        transmute(simd_select(
27897            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27898            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27899            a,
27900        ))
27901    }
27902}
27903#[doc = "Extended table look-up"]
27904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"]
27905#[inline(always)]
27906#[cfg(target_endian = "big")]
27907#[target_feature(enable = "neon")]
27908#[cfg_attr(test, assert_instr(tbx))]
27909#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27910pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t {
27911    let mut b: poly8x8x3_t = b;
27912    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27913    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27914    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27915    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27916    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27917    let x = poly8x16x2_t(
27918        vcombine_p8(b.0, b.1),
27919        vcombine_p8(b.2, unsafe { crate::mem::zeroed() }),
27920    );
27921    unsafe {
27922        let ret_val: poly8x8_t = transmute(simd_select(
27923            simd_lt::<poly8x8_t, int8x8_t>(transmute(c), transmute(u8x8::splat(24))),
27924            transmute(vqtbx2(transmute(a), transmute(x.0), transmute(x.1), c)),
27925            a,
27926        ));
27927        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27928    }
27929}
27930#[doc = "Extended table look-up"]
27931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"]
27932#[inline(always)]
27933#[target_feature(enable = "neon")]
27934#[cfg_attr(test, assert_instr(tbx))]
27935#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27936pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t {
27937    unsafe {
27938        vqtbx2(
27939            transmute(a),
27940            transmute(vcombine_s8(b.0, b.1)),
27941            transmute(vcombine_s8(b.2, b.3)),
27942            transmute(c),
27943        )
27944    }
27945}
27946#[doc = "Extended table look-up"]
27947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
27948#[inline(always)]
27949#[cfg(target_endian = "little")]
27950#[target_feature(enable = "neon")]
27951#[cfg_attr(test, assert_instr(tbx))]
27952#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27953pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
27954    unsafe {
27955        transmute(vqtbx2(
27956            transmute(a),
27957            transmute(vcombine_u8(b.0, b.1)),
27958            transmute(vcombine_u8(b.2, b.3)),
27959            c,
27960        ))
27961    }
27962}
27963#[doc = "Extended table look-up"]
27964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"]
27965#[inline(always)]
27966#[cfg(target_endian = "big")]
27967#[target_feature(enable = "neon")]
27968#[cfg_attr(test, assert_instr(tbx))]
27969#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27970pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t {
27971    let mut b: uint8x8x4_t = b;
27972    let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
27973    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
27974    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
27975    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
27976    b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
27977    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
27978    unsafe {
27979        let ret_val: uint8x8_t = transmute(vqtbx2(
27980            transmute(a),
27981            transmute(vcombine_u8(b.0, b.1)),
27982            transmute(vcombine_u8(b.2, b.3)),
27983            c,
27984        ));
27985        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
27986    }
27987}
27988#[doc = "Extended table look-up"]
27989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
27990#[inline(always)]
27991#[cfg(target_endian = "little")]
27992#[target_feature(enable = "neon")]
27993#[cfg_attr(test, assert_instr(tbx))]
27994#[stable(feature = "neon_intrinsics", since = "1.59.0")]
27995pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
27996    unsafe {
27997        transmute(vqtbx2(
27998            transmute(a),
27999            transmute(vcombine_p8(b.0, b.1)),
28000            transmute(vcombine_p8(b.2, b.3)),
28001            c,
28002        ))
28003    }
28004}
28005#[doc = "Extended table look-up"]
28006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"]
28007#[inline(always)]
28008#[cfg(target_endian = "big")]
28009#[target_feature(enable = "neon")]
28010#[cfg_attr(test, assert_instr(tbx))]
28011#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28012pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t {
28013    let mut b: poly8x8x4_t = b;
28014    let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) };
28015    b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) };
28016    b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) };
28017    b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) };
28018    b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) };
28019    let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) };
28020    unsafe {
28021        let ret_val: poly8x8_t = transmute(vqtbx2(
28022            transmute(a),
28023            transmute(vcombine_p8(b.0, b.1)),
28024            transmute(vcombine_p8(b.2, b.3)),
28025            c,
28026        ));
28027        simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0])
28028    }
28029}
28030#[doc = "Transpose vectors"]
28031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"]
28032#[inline(always)]
28033#[target_feature(enable = "neon,fp16")]
28034#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
28035#[cfg(not(target_arch = "arm64ec"))]
28036#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28037pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28038    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28039}
28040#[doc = "Transpose vectors"]
28041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"]
28042#[inline(always)]
28043#[target_feature(enable = "neon,fp16")]
28044#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
28045#[cfg(not(target_arch = "arm64ec"))]
28046#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28047pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28048    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28049}
28050#[doc = "Transpose vectors"]
28051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"]
28052#[inline(always)]
28053#[target_feature(enable = "neon")]
28054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28055#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28056pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28057    unsafe { simd_shuffle!(a, b, [0, 2]) }
28058}
28059#[doc = "Transpose vectors"]
28060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"]
28061#[inline(always)]
28062#[target_feature(enable = "neon")]
28063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28064#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28065pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28066    unsafe { simd_shuffle!(a, b, [0, 2]) }
28067}
28068#[doc = "Transpose vectors"]
28069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"]
28070#[inline(always)]
28071#[target_feature(enable = "neon")]
28072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28073#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28074pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28075    unsafe { simd_shuffle!(a, b, [0, 2]) }
28076}
28077#[doc = "Transpose vectors"]
28078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"]
28079#[inline(always)]
28080#[target_feature(enable = "neon")]
28081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28082#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28083pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28084    unsafe { simd_shuffle!(a, b, [0, 2]) }
28085}
28086#[doc = "Transpose vectors"]
28087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"]
28088#[inline(always)]
28089#[target_feature(enable = "neon")]
28090#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28091#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28092pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28093    unsafe { simd_shuffle!(a, b, [0, 2]) }
28094}
28095#[doc = "Transpose vectors"]
28096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"]
28097#[inline(always)]
28098#[target_feature(enable = "neon")]
28099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28100#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28101pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28102    unsafe { simd_shuffle!(a, b, [0, 2]) }
28103}
28104#[doc = "Transpose vectors"]
28105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"]
28106#[inline(always)]
28107#[target_feature(enable = "neon")]
28108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28109#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28110pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28111    unsafe { simd_shuffle!(a, b, [0, 2]) }
28112}
28113#[doc = "Transpose vectors"]
28114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"]
28115#[inline(always)]
28116#[target_feature(enable = "neon")]
28117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28118#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28119pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28120    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28121}
28122#[doc = "Transpose vectors"]
28123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"]
28124#[inline(always)]
28125#[target_feature(enable = "neon")]
28126#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28127#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28128pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28129    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28130}
28131#[doc = "Transpose vectors"]
28132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"]
28133#[inline(always)]
28134#[target_feature(enable = "neon")]
28135#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28136#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28137pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28138    unsafe {
28139        simd_shuffle!(
28140            a,
28141            b,
28142            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
28143        )
28144    }
28145}
28146#[doc = "Transpose vectors"]
28147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"]
28148#[inline(always)]
28149#[target_feature(enable = "neon")]
28150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28151#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28152pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28153    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28154}
28155#[doc = "Transpose vectors"]
28156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"]
28157#[inline(always)]
28158#[target_feature(enable = "neon")]
28159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28160#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28161pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28162    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28163}
28164#[doc = "Transpose vectors"]
28165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"]
28166#[inline(always)]
28167#[target_feature(enable = "neon")]
28168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28169#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28170pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28171    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28172}
28173#[doc = "Transpose vectors"]
28174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"]
28175#[inline(always)]
28176#[target_feature(enable = "neon")]
28177#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28178#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28179pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28180    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28181}
28182#[doc = "Transpose vectors"]
28183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"]
28184#[inline(always)]
28185#[target_feature(enable = "neon")]
28186#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28187#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28188pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28189    unsafe {
28190        simd_shuffle!(
28191            a,
28192            b,
28193            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
28194        )
28195    }
28196}
28197#[doc = "Transpose vectors"]
28198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"]
28199#[inline(always)]
28200#[target_feature(enable = "neon")]
28201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28202#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28203pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28204    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28205}
28206#[doc = "Transpose vectors"]
28207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"]
28208#[inline(always)]
28209#[target_feature(enable = "neon")]
28210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28211#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28212pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28213    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28214}
28215#[doc = "Transpose vectors"]
28216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"]
28217#[inline(always)]
28218#[target_feature(enable = "neon")]
28219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28220#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28221pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28222    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28223}
28224#[doc = "Transpose vectors"]
28225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"]
28226#[inline(always)]
28227#[target_feature(enable = "neon")]
28228#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28229#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28230pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28231    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28232}
28233#[doc = "Transpose vectors"]
28234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"]
28235#[inline(always)]
28236#[target_feature(enable = "neon")]
28237#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28238#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28239pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28240    unsafe {
28241        simd_shuffle!(
28242            a,
28243            b,
28244            [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
28245        )
28246    }
28247}
28248#[doc = "Transpose vectors"]
28249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"]
28250#[inline(always)]
28251#[target_feature(enable = "neon")]
28252#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28253#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28254pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28255    unsafe { simd_shuffle!(a, b, [0, 4, 2, 6]) }
28256}
28257#[doc = "Transpose vectors"]
28258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"]
28259#[inline(always)]
28260#[target_feature(enable = "neon")]
28261#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28262#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))]
28263pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28264    unsafe { simd_shuffle!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]) }
28265}
28266#[doc = "Transpose vectors"]
28267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"]
28268#[inline(always)]
28269#[target_feature(enable = "neon,fp16")]
28270#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
28271#[cfg(not(target_arch = "arm64ec"))]
28272#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28273pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28274    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28275}
28276#[doc = "Transpose vectors"]
28277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"]
28278#[inline(always)]
28279#[target_feature(enable = "neon,fp16")]
28280#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
28281#[cfg(not(target_arch = "arm64ec"))]
28282#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28283pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28284    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28285}
28286#[doc = "Transpose vectors"]
28287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"]
28288#[inline(always)]
28289#[target_feature(enable = "neon")]
28290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28291#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28292pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28293    unsafe { simd_shuffle!(a, b, [1, 3]) }
28294}
28295#[doc = "Transpose vectors"]
28296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"]
28297#[inline(always)]
28298#[target_feature(enable = "neon")]
28299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28300#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28301pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28302    unsafe { simd_shuffle!(a, b, [1, 3]) }
28303}
28304#[doc = "Transpose vectors"]
28305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"]
28306#[inline(always)]
28307#[target_feature(enable = "neon")]
28308#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28309#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28310pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28311    unsafe { simd_shuffle!(a, b, [1, 3]) }
28312}
28313#[doc = "Transpose vectors"]
28314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"]
28315#[inline(always)]
28316#[target_feature(enable = "neon")]
28317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28318#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28319pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28320    unsafe { simd_shuffle!(a, b, [1, 3]) }
28321}
28322#[doc = "Transpose vectors"]
28323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"]
28324#[inline(always)]
28325#[target_feature(enable = "neon")]
28326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28327#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28328pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28329    unsafe { simd_shuffle!(a, b, [1, 3]) }
28330}
28331#[doc = "Transpose vectors"]
28332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"]
28333#[inline(always)]
28334#[target_feature(enable = "neon")]
28335#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28336#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28337pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28338    unsafe { simd_shuffle!(a, b, [1, 3]) }
28339}
28340#[doc = "Transpose vectors"]
28341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"]
28342#[inline(always)]
28343#[target_feature(enable = "neon")]
28344#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28345#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
28346pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28347    unsafe { simd_shuffle!(a, b, [1, 3]) }
28348}
28349#[doc = "Transpose vectors"]
28350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"]
28351#[inline(always)]
28352#[target_feature(enable = "neon")]
28353#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28354#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28355pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28356    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28357}
28358#[doc = "Transpose vectors"]
28359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"]
28360#[inline(always)]
28361#[target_feature(enable = "neon")]
28362#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28363#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28364pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28365    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28366}
28367#[doc = "Transpose vectors"]
28368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"]
28369#[inline(always)]
28370#[target_feature(enable = "neon")]
28371#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28372#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28373pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28374    unsafe {
28375        simd_shuffle!(
28376            a,
28377            b,
28378            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28379        )
28380    }
28381}
28382#[doc = "Transpose vectors"]
28383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"]
28384#[inline(always)]
28385#[target_feature(enable = "neon")]
28386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28387#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28388pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28389    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28390}
28391#[doc = "Transpose vectors"]
28392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"]
28393#[inline(always)]
28394#[target_feature(enable = "neon")]
28395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28396#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28397pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28398    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28399}
28400#[doc = "Transpose vectors"]
28401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"]
28402#[inline(always)]
28403#[target_feature(enable = "neon")]
28404#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28405#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28406pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28407    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28408}
28409#[doc = "Transpose vectors"]
28410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"]
28411#[inline(always)]
28412#[target_feature(enable = "neon")]
28413#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28414#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28415pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28416    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28417}
28418#[doc = "Transpose vectors"]
28419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"]
28420#[inline(always)]
28421#[target_feature(enable = "neon")]
28422#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28423#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28424pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28425    unsafe {
28426        simd_shuffle!(
28427            a,
28428            b,
28429            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28430        )
28431    }
28432}
28433#[doc = "Transpose vectors"]
28434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"]
28435#[inline(always)]
28436#[target_feature(enable = "neon")]
28437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28438#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28439pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28440    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28441}
28442#[doc = "Transpose vectors"]
28443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"]
28444#[inline(always)]
28445#[target_feature(enable = "neon")]
28446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28447#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28448pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28449    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28450}
28451#[doc = "Transpose vectors"]
28452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"]
28453#[inline(always)]
28454#[target_feature(enable = "neon")]
28455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28456#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28457pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28458    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28459}
28460#[doc = "Transpose vectors"]
28461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"]
28462#[inline(always)]
28463#[target_feature(enable = "neon")]
28464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28465#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28466pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28467    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28468}
28469#[doc = "Transpose vectors"]
28470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"]
28471#[inline(always)]
28472#[target_feature(enable = "neon")]
28473#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28474#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28475pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28476    unsafe {
28477        simd_shuffle!(
28478            a,
28479            b,
28480            [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]
28481        )
28482    }
28483}
28484#[doc = "Transpose vectors"]
28485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"]
28486#[inline(always)]
28487#[target_feature(enable = "neon")]
28488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28489#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28490pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
28491    unsafe { simd_shuffle!(a, b, [1, 5, 3, 7]) }
28492}
28493#[doc = "Transpose vectors"]
28494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"]
28495#[inline(always)]
28496#[target_feature(enable = "neon")]
28497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28498#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))]
28499pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
28500    unsafe { simd_shuffle!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]) }
28501}
28502#[doc = "Signed compare bitwise Test bits nonzero"]
28503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"]
28504#[inline(always)]
28505#[target_feature(enable = "neon")]
28506#[cfg_attr(test, assert_instr(cmtst))]
28507#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28508pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t {
28509    unsafe {
28510        let c: int64x1_t = simd_and(a, b);
28511        let d: i64x1 = i64x1::new(0);
28512        simd_ne(c, transmute(d))
28513    }
28514}
28515#[doc = "Signed compare bitwise Test bits nonzero"]
28516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"]
28517#[inline(always)]
28518#[target_feature(enable = "neon")]
28519#[cfg_attr(test, assert_instr(cmtst))]
28520#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28521pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t {
28522    unsafe {
28523        let c: int64x2_t = simd_and(a, b);
28524        let d: i64x2 = i64x2::new(0, 0);
28525        simd_ne(c, transmute(d))
28526    }
28527}
28528#[doc = "Signed compare bitwise Test bits nonzero"]
28529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"]
28530#[inline(always)]
28531#[target_feature(enable = "neon")]
28532#[cfg_attr(test, assert_instr(cmtst))]
28533#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28534pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t {
28535    unsafe {
28536        let c: poly64x1_t = simd_and(a, b);
28537        let d: i64x1 = i64x1::new(0);
28538        simd_ne(c, transmute(d))
28539    }
28540}
28541#[doc = "Signed compare bitwise Test bits nonzero"]
28542#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"]
28543#[inline(always)]
28544#[target_feature(enable = "neon")]
28545#[cfg_attr(test, assert_instr(cmtst))]
28546#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28547pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t {
28548    unsafe {
28549        let c: poly64x2_t = simd_and(a, b);
28550        let d: i64x2 = i64x2::new(0, 0);
28551        simd_ne(c, transmute(d))
28552    }
28553}
28554#[doc = "Unsigned compare bitwise Test bits nonzero"]
28555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"]
28556#[inline(always)]
28557#[target_feature(enable = "neon")]
28558#[cfg_attr(test, assert_instr(cmtst))]
28559#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28560pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
28561    unsafe {
28562        let c: uint64x1_t = simd_and(a, b);
28563        let d: u64x1 = u64x1::new(0);
28564        simd_ne(c, transmute(d))
28565    }
28566}
28567#[doc = "Unsigned compare bitwise Test bits nonzero"]
28568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"]
28569#[inline(always)]
28570#[target_feature(enable = "neon")]
28571#[cfg_attr(test, assert_instr(cmtst))]
28572#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28573pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28574    unsafe {
28575        let c: uint64x2_t = simd_and(a, b);
28576        let d: u64x2 = u64x2::new(0, 0);
28577        simd_ne(c, transmute(d))
28578    }
28579}
28580#[doc = "Compare bitwise test bits nonzero"]
28581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"]
28582#[inline(always)]
28583#[target_feature(enable = "neon")]
28584#[cfg_attr(test, assert_instr(tst))]
28585#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28586pub fn vtstd_s64(a: i64, b: i64) -> u64 {
28587    unsafe { transmute(vtst_s64(transmute(a), transmute(b))) }
28588}
28589#[doc = "Compare bitwise test bits nonzero"]
28590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"]
28591#[inline(always)]
28592#[target_feature(enable = "neon")]
28593#[cfg_attr(test, assert_instr(tst))]
28594#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28595pub fn vtstd_u64(a: u64, b: u64) -> u64 {
28596    unsafe { transmute(vtst_u64(transmute(a), transmute(b))) }
28597}
28598#[doc = "Signed saturating Accumulate of Unsigned value."]
28599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"]
28600#[inline(always)]
28601#[target_feature(enable = "neon")]
28602#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28603#[cfg_attr(test, assert_instr(suqadd))]
28604pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t {
28605    unsafe extern "unadjusted" {
28606        #[cfg_attr(
28607            any(target_arch = "aarch64", target_arch = "arm64ec"),
28608            link_name = "llvm.aarch64.neon.suqadd.v8i8"
28609        )]
28610        fn _vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t;
28611    }
28612    unsafe { _vuqadd_s8(a, b) }
28613}
28614#[doc = "Signed saturating Accumulate of Unsigned value."]
28615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"]
28616#[inline(always)]
28617#[target_feature(enable = "neon")]
28618#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28619#[cfg_attr(test, assert_instr(suqadd))]
28620pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t {
28621    unsafe extern "unadjusted" {
28622        #[cfg_attr(
28623            any(target_arch = "aarch64", target_arch = "arm64ec"),
28624            link_name = "llvm.aarch64.neon.suqadd.v16i8"
28625        )]
28626        fn _vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t;
28627    }
28628    unsafe { _vuqaddq_s8(a, b) }
28629}
28630#[doc = "Signed saturating Accumulate of Unsigned value."]
28631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"]
28632#[inline(always)]
28633#[target_feature(enable = "neon")]
28634#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28635#[cfg_attr(test, assert_instr(suqadd))]
28636pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t {
28637    unsafe extern "unadjusted" {
28638        #[cfg_attr(
28639            any(target_arch = "aarch64", target_arch = "arm64ec"),
28640            link_name = "llvm.aarch64.neon.suqadd.v4i16"
28641        )]
28642        fn _vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t;
28643    }
28644    unsafe { _vuqadd_s16(a, b) }
28645}
28646#[doc = "Signed saturating Accumulate of Unsigned value."]
28647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"]
28648#[inline(always)]
28649#[target_feature(enable = "neon")]
28650#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28651#[cfg_attr(test, assert_instr(suqadd))]
28652pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t {
28653    unsafe extern "unadjusted" {
28654        #[cfg_attr(
28655            any(target_arch = "aarch64", target_arch = "arm64ec"),
28656            link_name = "llvm.aarch64.neon.suqadd.v8i16"
28657        )]
28658        fn _vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t;
28659    }
28660    unsafe { _vuqaddq_s16(a, b) }
28661}
28662#[doc = "Signed saturating Accumulate of Unsigned value."]
28663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"]
28664#[inline(always)]
28665#[target_feature(enable = "neon")]
28666#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28667#[cfg_attr(test, assert_instr(suqadd))]
28668pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t {
28669    unsafe extern "unadjusted" {
28670        #[cfg_attr(
28671            any(target_arch = "aarch64", target_arch = "arm64ec"),
28672            link_name = "llvm.aarch64.neon.suqadd.v2i32"
28673        )]
28674        fn _vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t;
28675    }
28676    unsafe { _vuqadd_s32(a, b) }
28677}
28678#[doc = "Signed saturating Accumulate of Unsigned value."]
28679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"]
28680#[inline(always)]
28681#[target_feature(enable = "neon")]
28682#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28683#[cfg_attr(test, assert_instr(suqadd))]
28684pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t {
28685    unsafe extern "unadjusted" {
28686        #[cfg_attr(
28687            any(target_arch = "aarch64", target_arch = "arm64ec"),
28688            link_name = "llvm.aarch64.neon.suqadd.v4i32"
28689        )]
28690        fn _vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t;
28691    }
28692    unsafe { _vuqaddq_s32(a, b) }
28693}
28694#[doc = "Signed saturating Accumulate of Unsigned value."]
28695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"]
28696#[inline(always)]
28697#[target_feature(enable = "neon")]
28698#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28699#[cfg_attr(test, assert_instr(suqadd))]
28700pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t {
28701    unsafe extern "unadjusted" {
28702        #[cfg_attr(
28703            any(target_arch = "aarch64", target_arch = "arm64ec"),
28704            link_name = "llvm.aarch64.neon.suqadd.v1i64"
28705        )]
28706        fn _vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t;
28707    }
28708    unsafe { _vuqadd_s64(a, b) }
28709}
28710#[doc = "Signed saturating Accumulate of Unsigned value."]
28711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"]
28712#[inline(always)]
28713#[target_feature(enable = "neon")]
28714#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28715#[cfg_attr(test, assert_instr(suqadd))]
28716pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t {
28717    unsafe extern "unadjusted" {
28718        #[cfg_attr(
28719            any(target_arch = "aarch64", target_arch = "arm64ec"),
28720            link_name = "llvm.aarch64.neon.suqadd.v2i64"
28721        )]
28722        fn _vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t;
28723    }
28724    unsafe { _vuqaddq_s64(a, b) }
28725}
28726#[doc = "Signed saturating accumulate of unsigned value"]
28727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"]
28728#[inline(always)]
28729#[target_feature(enable = "neon")]
28730#[cfg_attr(test, assert_instr(suqadd))]
28731#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28732pub fn vuqaddb_s8(a: i8, b: u8) -> i8 {
28733    unsafe { simd_extract!(vuqadd_s8(vdup_n_s8(a), vdup_n_u8(b)), 0) }
28734}
28735#[doc = "Signed saturating accumulate of unsigned value"]
28736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"]
28737#[inline(always)]
28738#[target_feature(enable = "neon")]
28739#[cfg_attr(test, assert_instr(suqadd))]
28740#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28741pub fn vuqaddh_s16(a: i16, b: u16) -> i16 {
28742    unsafe { simd_extract!(vuqadd_s16(vdup_n_s16(a), vdup_n_u16(b)), 0) }
28743}
28744#[doc = "Signed saturating accumulate of unsigned value"]
28745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"]
28746#[inline(always)]
28747#[target_feature(enable = "neon")]
28748#[cfg_attr(test, assert_instr(suqadd))]
28749#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28750pub fn vuqaddd_s64(a: i64, b: u64) -> i64 {
28751    unsafe extern "unadjusted" {
28752        #[cfg_attr(
28753            any(target_arch = "aarch64", target_arch = "arm64ec"),
28754            link_name = "llvm.aarch64.neon.suqadd.i64"
28755        )]
28756        fn _vuqaddd_s64(a: i64, b: u64) -> i64;
28757    }
28758    unsafe { _vuqaddd_s64(a, b) }
28759}
28760#[doc = "Signed saturating accumulate of unsigned value"]
28761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"]
28762#[inline(always)]
28763#[target_feature(enable = "neon")]
28764#[cfg_attr(test, assert_instr(suqadd))]
28765#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28766pub fn vuqadds_s32(a: i32, b: u32) -> i32 {
28767    unsafe extern "unadjusted" {
28768        #[cfg_attr(
28769            any(target_arch = "aarch64", target_arch = "arm64ec"),
28770            link_name = "llvm.aarch64.neon.suqadd.i32"
28771        )]
28772        fn _vuqadds_s32(a: i32, b: u32) -> i32;
28773    }
28774    unsafe { _vuqadds_s32(a, b) }
28775}
28776#[doc = "Unzip vectors"]
28777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"]
28778#[inline(always)]
28779#[target_feature(enable = "neon,fp16")]
28780#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
28781#[cfg(not(target_arch = "arm64ec"))]
28782#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28783pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
28784    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28785}
28786#[doc = "Unzip vectors"]
28787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"]
28788#[inline(always)]
28789#[target_feature(enable = "neon,fp16")]
28790#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
28791#[cfg(not(target_arch = "arm64ec"))]
28792#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28793pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
28794    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28795}
28796#[doc = "Unzip vectors"]
28797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"]
28798#[inline(always)]
28799#[target_feature(enable = "neon")]
28800#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28801#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28802pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
28803    unsafe { simd_shuffle!(a, b, [0, 2]) }
28804}
28805#[doc = "Unzip vectors"]
28806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"]
28807#[inline(always)]
28808#[target_feature(enable = "neon")]
28809#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28810#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28811pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
28812    unsafe { simd_shuffle!(a, b, [0, 2]) }
28813}
28814#[doc = "Unzip vectors"]
28815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"]
28816#[inline(always)]
28817#[target_feature(enable = "neon")]
28818#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28819#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28820pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
28821    unsafe { simd_shuffle!(a, b, [0, 2]) }
28822}
28823#[doc = "Unzip vectors"]
28824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"]
28825#[inline(always)]
28826#[target_feature(enable = "neon")]
28827#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28828#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28829pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
28830    unsafe { simd_shuffle!(a, b, [0, 2]) }
28831}
28832#[doc = "Unzip vectors"]
28833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"]
28834#[inline(always)]
28835#[target_feature(enable = "neon")]
28836#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28837#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28838pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
28839    unsafe { simd_shuffle!(a, b, [0, 2]) }
28840}
28841#[doc = "Unzip vectors"]
28842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"]
28843#[inline(always)]
28844#[target_feature(enable = "neon")]
28845#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28846#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28847pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
28848    unsafe { simd_shuffle!(a, b, [0, 2]) }
28849}
28850#[doc = "Unzip vectors"]
28851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"]
28852#[inline(always)]
28853#[target_feature(enable = "neon")]
28854#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28855#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
28856pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
28857    unsafe { simd_shuffle!(a, b, [0, 2]) }
28858}
28859#[doc = "Unzip vectors"]
28860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"]
28861#[inline(always)]
28862#[target_feature(enable = "neon")]
28863#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28864#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28865pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
28866    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28867}
28868#[doc = "Unzip vectors"]
28869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"]
28870#[inline(always)]
28871#[target_feature(enable = "neon")]
28872#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28873#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28874pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
28875    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28876}
28877#[doc = "Unzip vectors"]
28878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"]
28879#[inline(always)]
28880#[target_feature(enable = "neon")]
28881#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28882#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28883pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
28884    unsafe {
28885        simd_shuffle!(
28886            a,
28887            b,
28888            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28889        )
28890    }
28891}
28892#[doc = "Unzip vectors"]
28893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"]
28894#[inline(always)]
28895#[target_feature(enable = "neon")]
28896#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28897#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28898pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
28899    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28900}
28901#[doc = "Unzip vectors"]
28902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"]
28903#[inline(always)]
28904#[target_feature(enable = "neon")]
28905#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28906#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28907pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
28908    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28909}
28910#[doc = "Unzip vectors"]
28911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"]
28912#[inline(always)]
28913#[target_feature(enable = "neon")]
28914#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28915#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28916pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
28917    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28918}
28919#[doc = "Unzip vectors"]
28920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"]
28921#[inline(always)]
28922#[target_feature(enable = "neon")]
28923#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28924#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28925pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
28926    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28927}
28928#[doc = "Unzip vectors"]
28929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"]
28930#[inline(always)]
28931#[target_feature(enable = "neon")]
28932#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28933#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28934pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
28935    unsafe {
28936        simd_shuffle!(
28937            a,
28938            b,
28939            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28940        )
28941    }
28942}
28943#[doc = "Unzip vectors"]
28944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"]
28945#[inline(always)]
28946#[target_feature(enable = "neon")]
28947#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28948#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28949pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
28950    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28951}
28952#[doc = "Unzip vectors"]
28953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"]
28954#[inline(always)]
28955#[target_feature(enable = "neon")]
28956#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28957#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28958pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
28959    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28960}
28961#[doc = "Unzip vectors"]
28962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"]
28963#[inline(always)]
28964#[target_feature(enable = "neon")]
28965#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28966#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28967pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
28968    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
28969}
28970#[doc = "Unzip vectors"]
28971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"]
28972#[inline(always)]
28973#[target_feature(enable = "neon")]
28974#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28975#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28976pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
28977    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
28978}
28979#[doc = "Unzip vectors"]
28980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"]
28981#[inline(always)]
28982#[target_feature(enable = "neon")]
28983#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28984#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
28985pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
28986    unsafe {
28987        simd_shuffle!(
28988            a,
28989            b,
28990            [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
28991        )
28992    }
28993}
28994#[doc = "Unzip vectors"]
28995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"]
28996#[inline(always)]
28997#[target_feature(enable = "neon")]
28998#[stable(feature = "neon_intrinsics", since = "1.59.0")]
28999#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
29000pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29001    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6]) }
29002}
29003#[doc = "Unzip vectors"]
29004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"]
29005#[inline(always)]
29006#[target_feature(enable = "neon")]
29007#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29008#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))]
29009pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29010    unsafe { simd_shuffle!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]) }
29011}
29012#[doc = "Unzip vectors"]
29013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"]
29014#[inline(always)]
29015#[target_feature(enable = "neon,fp16")]
29016#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
29017#[cfg(not(target_arch = "arm64ec"))]
29018#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29019pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
29020    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29021}
29022#[doc = "Unzip vectors"]
29023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"]
29024#[inline(always)]
29025#[target_feature(enable = "neon,fp16")]
29026#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
29027#[cfg(not(target_arch = "arm64ec"))]
29028#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29029pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
29030    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29031}
29032#[doc = "Unzip vectors"]
29033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"]
29034#[inline(always)]
29035#[target_feature(enable = "neon")]
29036#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29037#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29038pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
29039    unsafe { simd_shuffle!(a, b, [1, 3]) }
29040}
29041#[doc = "Unzip vectors"]
29042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"]
29043#[inline(always)]
29044#[target_feature(enable = "neon")]
29045#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29046#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29047pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
29048    unsafe { simd_shuffle!(a, b, [1, 3]) }
29049}
29050#[doc = "Unzip vectors"]
29051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"]
29052#[inline(always)]
29053#[target_feature(enable = "neon")]
29054#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29055#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29056pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
29057    unsafe { simd_shuffle!(a, b, [1, 3]) }
29058}
29059#[doc = "Unzip vectors"]
29060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"]
29061#[inline(always)]
29062#[target_feature(enable = "neon")]
29063#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29064#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29065pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
29066    unsafe { simd_shuffle!(a, b, [1, 3]) }
29067}
29068#[doc = "Unzip vectors"]
29069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"]
29070#[inline(always)]
29071#[target_feature(enable = "neon")]
29072#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29073#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29074pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
29075    unsafe { simd_shuffle!(a, b, [1, 3]) }
29076}
29077#[doc = "Unzip vectors"]
29078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"]
29079#[inline(always)]
29080#[target_feature(enable = "neon")]
29081#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29082#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29083pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29084    unsafe { simd_shuffle!(a, b, [1, 3]) }
29085}
29086#[doc = "Unzip vectors"]
29087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"]
29088#[inline(always)]
29089#[target_feature(enable = "neon")]
29090#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29091#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29092pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29093    unsafe { simd_shuffle!(a, b, [1, 3]) }
29094}
29095#[doc = "Unzip vectors"]
29096#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"]
29097#[inline(always)]
29098#[target_feature(enable = "neon")]
29099#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29100#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29101pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
29102    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29103}
29104#[doc = "Unzip vectors"]
29105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"]
29106#[inline(always)]
29107#[target_feature(enable = "neon")]
29108#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29109#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29110pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
29111    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29112}
29113#[doc = "Unzip vectors"]
29114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"]
29115#[inline(always)]
29116#[target_feature(enable = "neon")]
29117#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29118#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29119pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
29120    unsafe {
29121        simd_shuffle!(
29122            a,
29123            b,
29124            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
29125        )
29126    }
29127}
29128#[doc = "Unzip vectors"]
29129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"]
29130#[inline(always)]
29131#[target_feature(enable = "neon")]
29132#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29133#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29134pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
29135    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29136}
29137#[doc = "Unzip vectors"]
29138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"]
29139#[inline(always)]
29140#[target_feature(enable = "neon")]
29141#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29142#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29143pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
29144    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29145}
29146#[doc = "Unzip vectors"]
29147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"]
29148#[inline(always)]
29149#[target_feature(enable = "neon")]
29150#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29151#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29152pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
29153    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29154}
29155#[doc = "Unzip vectors"]
29156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"]
29157#[inline(always)]
29158#[target_feature(enable = "neon")]
29159#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29160#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29161pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
29162    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29163}
29164#[doc = "Unzip vectors"]
29165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"]
29166#[inline(always)]
29167#[target_feature(enable = "neon")]
29168#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29169#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29170pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
29171    unsafe {
29172        simd_shuffle!(
29173            a,
29174            b,
29175            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
29176        )
29177    }
29178}
29179#[doc = "Unzip vectors"]
29180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"]
29181#[inline(always)]
29182#[target_feature(enable = "neon")]
29183#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29184#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29185pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
29186    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29187}
29188#[doc = "Unzip vectors"]
29189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"]
29190#[inline(always)]
29191#[target_feature(enable = "neon")]
29192#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29193#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29194pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
29195    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29196}
29197#[doc = "Unzip vectors"]
29198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"]
29199#[inline(always)]
29200#[target_feature(enable = "neon")]
29201#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29202#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29203pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
29204    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29205}
29206#[doc = "Unzip vectors"]
29207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"]
29208#[inline(always)]
29209#[target_feature(enable = "neon")]
29210#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29211#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29212pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
29213    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29214}
29215#[doc = "Unzip vectors"]
29216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"]
29217#[inline(always)]
29218#[target_feature(enable = "neon")]
29219#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29220#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29221pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
29222    unsafe {
29223        simd_shuffle!(
29224            a,
29225            b,
29226            [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]
29227        )
29228    }
29229}
29230#[doc = "Unzip vectors"]
29231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"]
29232#[inline(always)]
29233#[target_feature(enable = "neon")]
29234#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29235#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29236pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29237    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7]) }
29238}
29239#[doc = "Unzip vectors"]
29240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"]
29241#[inline(always)]
29242#[target_feature(enable = "neon")]
29243#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29244#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))]
29245pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29246    unsafe { simd_shuffle!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]) }
29247}
29248#[doc = "Exclusive OR and rotate"]
29249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"]
29250#[inline(always)]
29251#[target_feature(enable = "neon,sha3")]
29252#[cfg_attr(test, assert_instr(xar, IMM6 = 0))]
29253#[rustc_legacy_const_generics(2)]
29254#[stable(feature = "stdarch_neon_sha3", since = "1.79.0")]
29255pub fn vxarq_u64<const IMM6: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29256    static_assert_uimm_bits!(IMM6, 6);
29257    unsafe extern "unadjusted" {
29258        #[cfg_attr(
29259            any(target_arch = "aarch64", target_arch = "arm64ec"),
29260            link_name = "llvm.aarch64.crypto.xar"
29261        )]
29262        fn _vxarq_u64(a: uint64x2_t, b: uint64x2_t, n: i64) -> uint64x2_t;
29263    }
29264    unsafe { _vxarq_u64(a, b, IMM6 as i64) }
29265}
29266#[doc = "Zip vectors"]
29267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"]
29268#[inline(always)]
29269#[target_feature(enable = "neon,fp16")]
29270#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
29271#[cfg(not(target_arch = "arm64ec"))]
29272#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29273pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
29274    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29275}
29276#[doc = "Zip vectors"]
29277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"]
29278#[inline(always)]
29279#[target_feature(enable = "neon,fp16")]
29280#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
29281#[cfg(not(target_arch = "arm64ec"))]
29282#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29283pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
29284    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29285}
29286#[doc = "Zip vectors"]
29287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"]
29288#[inline(always)]
29289#[target_feature(enable = "neon")]
29290#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29291#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29292pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
29293    unsafe { simd_shuffle!(a, b, [0, 2]) }
29294}
29295#[doc = "Zip vectors"]
29296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"]
29297#[inline(always)]
29298#[target_feature(enable = "neon")]
29299#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29300#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29301pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
29302    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29303}
29304#[doc = "Zip vectors"]
29305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"]
29306#[inline(always)]
29307#[target_feature(enable = "neon")]
29308#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29309#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29310pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
29311    unsafe { simd_shuffle!(a, b, [0, 2]) }
29312}
29313#[doc = "Zip vectors"]
29314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"]
29315#[inline(always)]
29316#[target_feature(enable = "neon")]
29317#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29318#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29319pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
29320    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29321}
29322#[doc = "Zip vectors"]
29323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"]
29324#[inline(always)]
29325#[target_feature(enable = "neon")]
29326#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29327#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29328pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
29329    unsafe {
29330        simd_shuffle!(
29331            a,
29332            b,
29333            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29334        )
29335    }
29336}
29337#[doc = "Zip vectors"]
29338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"]
29339#[inline(always)]
29340#[target_feature(enable = "neon")]
29341#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29342#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29343pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
29344    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29345}
29346#[doc = "Zip vectors"]
29347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"]
29348#[inline(always)]
29349#[target_feature(enable = "neon")]
29350#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29351#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29352pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
29353    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29354}
29355#[doc = "Zip vectors"]
29356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"]
29357#[inline(always)]
29358#[target_feature(enable = "neon")]
29359#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29360#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29361pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
29362    unsafe { simd_shuffle!(a, b, [0, 2]) }
29363}
29364#[doc = "Zip vectors"]
29365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"]
29366#[inline(always)]
29367#[target_feature(enable = "neon")]
29368#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29369#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29370pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
29371    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29372}
29373#[doc = "Zip vectors"]
29374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"]
29375#[inline(always)]
29376#[target_feature(enable = "neon")]
29377#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29378#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29379pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
29380    unsafe { simd_shuffle!(a, b, [0, 2]) }
29381}
29382#[doc = "Zip vectors"]
29383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"]
29384#[inline(always)]
29385#[target_feature(enable = "neon")]
29386#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29387#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29388pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
29389    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29390}
29391#[doc = "Zip vectors"]
29392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"]
29393#[inline(always)]
29394#[target_feature(enable = "neon")]
29395#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29396#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29397pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
29398    unsafe {
29399        simd_shuffle!(
29400            a,
29401            b,
29402            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29403        )
29404    }
29405}
29406#[doc = "Zip vectors"]
29407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"]
29408#[inline(always)]
29409#[target_feature(enable = "neon")]
29410#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29411#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29412pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
29413    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29414}
29415#[doc = "Zip vectors"]
29416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"]
29417#[inline(always)]
29418#[target_feature(enable = "neon")]
29419#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29420#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29421pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
29422    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29423}
29424#[doc = "Zip vectors"]
29425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"]
29426#[inline(always)]
29427#[target_feature(enable = "neon")]
29428#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29429#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29430pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
29431    unsafe { simd_shuffle!(a, b, [0, 2]) }
29432}
29433#[doc = "Zip vectors"]
29434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"]
29435#[inline(always)]
29436#[target_feature(enable = "neon")]
29437#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29438#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29439pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
29440    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29441}
29442#[doc = "Zip vectors"]
29443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"]
29444#[inline(always)]
29445#[target_feature(enable = "neon")]
29446#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29447#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29448pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29449    unsafe { simd_shuffle!(a, b, [0, 2]) }
29450}
29451#[doc = "Zip vectors"]
29452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"]
29453#[inline(always)]
29454#[target_feature(enable = "neon")]
29455#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29456#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29457pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
29458    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29459}
29460#[doc = "Zip vectors"]
29461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"]
29462#[inline(always)]
29463#[target_feature(enable = "neon")]
29464#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29465#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29466pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
29467    unsafe {
29468        simd_shuffle!(
29469            a,
29470            b,
29471            [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]
29472        )
29473    }
29474}
29475#[doc = "Zip vectors"]
29476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"]
29477#[inline(always)]
29478#[target_feature(enable = "neon")]
29479#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29480#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29481pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29482    unsafe { simd_shuffle!(a, b, [0, 4, 1, 5]) }
29483}
29484#[doc = "Zip vectors"]
29485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"]
29486#[inline(always)]
29487#[target_feature(enable = "neon")]
29488#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29489#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29490pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29491    unsafe { simd_shuffle!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]) }
29492}
29493#[doc = "Zip vectors"]
29494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"]
29495#[inline(always)]
29496#[target_feature(enable = "neon")]
29497#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29498#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))]
29499pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29500    unsafe { simd_shuffle!(a, b, [0, 2]) }
29501}
29502#[doc = "Zip vectors"]
29503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"]
29504#[inline(always)]
29505#[target_feature(enable = "neon,fp16")]
29506#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
29507#[cfg(not(target_arch = "arm64ec"))]
29508#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29509pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t {
29510    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29511}
29512#[doc = "Zip vectors"]
29513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"]
29514#[inline(always)]
29515#[target_feature(enable = "neon,fp16")]
29516#[stable(feature = "stdarch_neon_fp16", since = "1.94.0")]
29517#[cfg(not(target_arch = "arm64ec"))]
29518#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29519pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t {
29520    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29521}
29522#[doc = "Zip vectors"]
29523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"]
29524#[inline(always)]
29525#[target_feature(enable = "neon")]
29526#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29527#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29528pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t {
29529    unsafe { simd_shuffle!(a, b, [1, 3]) }
29530}
29531#[doc = "Zip vectors"]
29532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"]
29533#[inline(always)]
29534#[target_feature(enable = "neon")]
29535#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29536#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29537pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t {
29538    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29539}
29540#[doc = "Zip vectors"]
29541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"]
29542#[inline(always)]
29543#[target_feature(enable = "neon")]
29544#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29545#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29546pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
29547    unsafe { simd_shuffle!(a, b, [1, 3]) }
29548}
29549#[doc = "Zip vectors"]
29550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"]
29551#[inline(always)]
29552#[target_feature(enable = "neon")]
29553#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29554#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29555pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
29556    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29557}
29558#[doc = "Zip vectors"]
29559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"]
29560#[inline(always)]
29561#[target_feature(enable = "neon")]
29562#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29563#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29564pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
29565    unsafe {
29566        simd_shuffle!(
29567            a,
29568            b,
29569            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29570        )
29571    }
29572}
29573#[doc = "Zip vectors"]
29574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"]
29575#[inline(always)]
29576#[target_feature(enable = "neon")]
29577#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29578#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29579pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
29580    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29581}
29582#[doc = "Zip vectors"]
29583#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"]
29584#[inline(always)]
29585#[target_feature(enable = "neon")]
29586#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29587#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29588pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
29589    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29590}
29591#[doc = "Zip vectors"]
29592#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"]
29593#[inline(always)]
29594#[target_feature(enable = "neon")]
29595#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29596#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29597pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
29598    unsafe { simd_shuffle!(a, b, [1, 3]) }
29599}
29600#[doc = "Zip vectors"]
29601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"]
29602#[inline(always)]
29603#[target_feature(enable = "neon")]
29604#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29605#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29606pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
29607    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29608}
29609#[doc = "Zip vectors"]
29610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"]
29611#[inline(always)]
29612#[target_feature(enable = "neon")]
29613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29614#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29615pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
29616    unsafe { simd_shuffle!(a, b, [1, 3]) }
29617}
29618#[doc = "Zip vectors"]
29619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"]
29620#[inline(always)]
29621#[target_feature(enable = "neon")]
29622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29623#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29624pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
29625    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29626}
29627#[doc = "Zip vectors"]
29628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"]
29629#[inline(always)]
29630#[target_feature(enable = "neon")]
29631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29632#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29633pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
29634    unsafe {
29635        simd_shuffle!(
29636            a,
29637            b,
29638            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29639        )
29640    }
29641}
29642#[doc = "Zip vectors"]
29643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"]
29644#[inline(always)]
29645#[target_feature(enable = "neon")]
29646#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29647#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29648pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
29649    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29650}
29651#[doc = "Zip vectors"]
29652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"]
29653#[inline(always)]
29654#[target_feature(enable = "neon")]
29655#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29656#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29657pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
29658    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29659}
29660#[doc = "Zip vectors"]
29661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"]
29662#[inline(always)]
29663#[target_feature(enable = "neon")]
29664#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29665#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29666pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
29667    unsafe { simd_shuffle!(a, b, [1, 3]) }
29668}
29669#[doc = "Zip vectors"]
29670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"]
29671#[inline(always)]
29672#[target_feature(enable = "neon")]
29673#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29674#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29675pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
29676    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29677}
29678#[doc = "Zip vectors"]
29679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"]
29680#[inline(always)]
29681#[target_feature(enable = "neon")]
29682#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29683#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29684pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
29685    unsafe { simd_shuffle!(a, b, [1, 3]) }
29686}
29687#[doc = "Zip vectors"]
29688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"]
29689#[inline(always)]
29690#[target_feature(enable = "neon")]
29691#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29692#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29693pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t {
29694    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29695}
29696#[doc = "Zip vectors"]
29697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"]
29698#[inline(always)]
29699#[target_feature(enable = "neon")]
29700#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29701#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29702pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t {
29703    unsafe {
29704        simd_shuffle!(
29705            a,
29706            b,
29707            [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]
29708        )
29709    }
29710}
29711#[doc = "Zip vectors"]
29712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"]
29713#[inline(always)]
29714#[target_feature(enable = "neon")]
29715#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29716#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29717pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t {
29718    unsafe { simd_shuffle!(a, b, [2, 6, 3, 7]) }
29719}
29720#[doc = "Zip vectors"]
29721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"]
29722#[inline(always)]
29723#[target_feature(enable = "neon")]
29724#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29725#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29726pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t {
29727    unsafe { simd_shuffle!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]) }
29728}
29729#[doc = "Zip vectors"]
29730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"]
29731#[inline(always)]
29732#[target_feature(enable = "neon")]
29733#[stable(feature = "neon_intrinsics", since = "1.59.0")]
29734#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))]
29735pub fn vzip2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t {
29736    unsafe { simd_shuffle!(a, b, [1, 3]) }
29737}