aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.9/gcc/config/aarch64
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.9/gcc/config/aarch64')
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-builtins.c154
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-elf-raw.h8
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-linux.h11
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-protos.h1
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-simd.md39
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64.c12
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64.md55
-rw-r--r--gcc-4.9/gcc/config/aarch64/arm_neon.h494
8 files changed, 508 insertions, 266 deletions
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-builtins.c b/gcc-4.9/gcc/config/aarch64/aarch64-builtins.c
index 55cfe0ab2..a5af874bf 100644
--- a/gcc-4.9/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-builtins.c
@@ -371,6 +371,12 @@ static aarch64_simd_builtin_datum aarch64_simd_builtin_data[] = {
enum aarch64_builtins
{
AARCH64_BUILTIN_MIN,
+
+ AARCH64_BUILTIN_GET_FPCR,
+ AARCH64_BUILTIN_SET_FPCR,
+ AARCH64_BUILTIN_GET_FPSR,
+ AARCH64_BUILTIN_SET_FPSR,
+
AARCH64_SIMD_BUILTIN_BASE,
#include "aarch64-simd-builtins.def"
AARCH64_SIMD_BUILTIN_MAX = AARCH64_SIMD_BUILTIN_BASE
@@ -752,6 +758,24 @@ aarch64_init_simd_builtins (void)
void
aarch64_init_builtins (void)
{
+ tree ftype_set_fpr
+ = build_function_type_list (void_type_node, unsigned_type_node, NULL);
+ tree ftype_get_fpr
+ = build_function_type_list (unsigned_type_node, NULL);
+
+ aarch64_builtin_decls[AARCH64_BUILTIN_GET_FPCR]
+ = add_builtin_function ("__builtin_aarch64_get_fpcr", ftype_get_fpr,
+ AARCH64_BUILTIN_GET_FPCR, BUILT_IN_MD, NULL, NULL_TREE);
+ aarch64_builtin_decls[AARCH64_BUILTIN_SET_FPCR]
+ = add_builtin_function ("__builtin_aarch64_set_fpcr", ftype_set_fpr,
+ AARCH64_BUILTIN_SET_FPCR, BUILT_IN_MD, NULL, NULL_TREE);
+ aarch64_builtin_decls[AARCH64_BUILTIN_GET_FPSR]
+ = add_builtin_function ("__builtin_aarch64_get_fpsr", ftype_get_fpr,
+ AARCH64_BUILTIN_GET_FPSR, BUILT_IN_MD, NULL, NULL_TREE);
+ aarch64_builtin_decls[AARCH64_BUILTIN_SET_FPSR]
+ = add_builtin_function ("__builtin_aarch64_set_fpsr", ftype_set_fpr,
+ AARCH64_BUILTIN_SET_FPSR, BUILT_IN_MD, NULL, NULL_TREE);
+
if (TARGET_SIMD)
aarch64_init_simd_builtins ();
}
@@ -964,6 +988,36 @@ aarch64_expand_builtin (tree exp,
{
tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
int fcode = DECL_FUNCTION_CODE (fndecl);
+ int icode;
+ rtx pat, op0;
+ tree arg0;
+
+ switch (fcode)
+ {
+ case AARCH64_BUILTIN_GET_FPCR:
+ case AARCH64_BUILTIN_SET_FPCR:
+ case AARCH64_BUILTIN_GET_FPSR:
+ case AARCH64_BUILTIN_SET_FPSR:
+ if ((fcode == AARCH64_BUILTIN_GET_FPCR)
+ || (fcode == AARCH64_BUILTIN_GET_FPSR))
+ {
+ icode = (fcode == AARCH64_BUILTIN_GET_FPSR) ?
+ CODE_FOR_get_fpsr : CODE_FOR_get_fpcr;
+ target = gen_reg_rtx (SImode);
+ pat = GEN_FCN (icode) (target);
+ }
+ else
+ {
+ target = NULL_RTX;
+ icode = (fcode == AARCH64_BUILTIN_SET_FPSR) ?
+ CODE_FOR_set_fpsr : CODE_FOR_set_fpcr;
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ op0 = expand_normal (arg0);
+ pat = GEN_FCN (icode) (op0);
+ }
+ emit_insn (pat);
+ return target;
+ }
if (fcode >= AARCH64_SIMD_BUILTIN_BASE)
return aarch64_simd_expand_builtin (fcode, exp, target);
@@ -1196,6 +1250,106 @@ aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi)
return changed;
}
+void
+aarch64_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
+{
+ const unsigned AARCH64_FE_INVALID = 1;
+ const unsigned AARCH64_FE_DIVBYZERO = 2;
+ const unsigned AARCH64_FE_OVERFLOW = 4;
+ const unsigned AARCH64_FE_UNDERFLOW = 8;
+ const unsigned AARCH64_FE_INEXACT = 16;
+ const unsigned HOST_WIDE_INT AARCH64_FE_ALL_EXCEPT = (AARCH64_FE_INVALID
+ | AARCH64_FE_DIVBYZERO
+ | AARCH64_FE_OVERFLOW
+ | AARCH64_FE_UNDERFLOW
+ | AARCH64_FE_INEXACT);
+ const unsigned HOST_WIDE_INT AARCH64_FE_EXCEPT_SHIFT = 8;
+ tree fenv_cr, fenv_sr, get_fpcr, set_fpcr, mask_cr, mask_sr;
+ tree ld_fenv_cr, ld_fenv_sr, masked_fenv_cr, masked_fenv_sr, hold_fnclex_cr;
+ tree hold_fnclex_sr, new_fenv_var, reload_fenv, restore_fnenv, get_fpsr, set_fpsr;
+ tree update_call, atomic_feraiseexcept, hold_fnclex, masked_fenv, ld_fenv;
+
+ /* Generate the equivalence of :
+ unsigned int fenv_cr;
+ fenv_cr = __builtin_aarch64_get_fpcr ();
+
+ unsigned int fenv_sr;
+ fenv_sr = __builtin_aarch64_get_fpsr ();
+
+ Now set all exceptions to non-stop
+ unsigned int mask_cr
+ = ~(AARCH64_FE_ALL_EXCEPT << AARCH64_FE_EXCEPT_SHIFT);
+ unsigned int masked_cr;
+ masked_cr = fenv_cr & mask_cr;
+
+ And clear all exception flags
+ unsigned int maske_sr = ~AARCH64_FE_ALL_EXCEPT;
+ unsigned int masked_cr;
+ masked_sr = fenv_sr & mask_sr;
+
+ __builtin_aarch64_set_cr (masked_cr);
+ __builtin_aarch64_set_sr (masked_sr); */
+
+ fenv_cr = create_tmp_var (unsigned_type_node, NULL);
+ fenv_sr = create_tmp_var (unsigned_type_node, NULL);
+
+ get_fpcr = aarch64_builtin_decls[AARCH64_BUILTIN_GET_FPCR];
+ set_fpcr = aarch64_builtin_decls[AARCH64_BUILTIN_SET_FPCR];
+ get_fpsr = aarch64_builtin_decls[AARCH64_BUILTIN_GET_FPSR];
+ set_fpsr = aarch64_builtin_decls[AARCH64_BUILTIN_SET_FPSR];
+
+ mask_cr = build_int_cst (unsigned_type_node,
+ ~(AARCH64_FE_ALL_EXCEPT << AARCH64_FE_EXCEPT_SHIFT));
+ mask_sr = build_int_cst (unsigned_type_node,
+ ~(AARCH64_FE_ALL_EXCEPT));
+
+ ld_fenv_cr = build2 (MODIFY_EXPR, unsigned_type_node,
+ fenv_cr, build_call_expr (get_fpcr, 0));
+ ld_fenv_sr = build2 (MODIFY_EXPR, unsigned_type_node,
+ fenv_sr, build_call_expr (get_fpsr, 0));
+
+ masked_fenv_cr = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_cr, mask_cr);
+ masked_fenv_sr = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_sr, mask_sr);
+
+ hold_fnclex_cr = build_call_expr (set_fpcr, 1, masked_fenv_cr);
+ hold_fnclex_sr = build_call_expr (set_fpsr, 1, masked_fenv_sr);
+
+ hold_fnclex = build2 (COMPOUND_EXPR, void_type_node, hold_fnclex_cr,
+ hold_fnclex_sr);
+ masked_fenv = build2 (COMPOUND_EXPR, void_type_node, masked_fenv_cr,
+ masked_fenv_sr);
+ ld_fenv = build2 (COMPOUND_EXPR, void_type_node, ld_fenv_cr, ld_fenv_sr);
+
+ *hold = build2 (COMPOUND_EXPR, void_type_node,
+ build2 (COMPOUND_EXPR, void_type_node, masked_fenv, ld_fenv),
+ hold_fnclex);
+
+ /* Store the value of masked_fenv to clear the exceptions:
+ __builtin_aarch64_set_fpsr (masked_fenv_sr); */
+
+ *clear = build_call_expr (set_fpsr, 1, masked_fenv_sr);
+
+ /* Generate the equivalent of :
+ unsigned int new_fenv_var;
+ new_fenv_var = __builtin_aarch64_get_fpsr ();
+
+ __builtin_aarch64_set_fpsr (fenv_sr);
+
+ __atomic_feraiseexcept (new_fenv_var); */
+
+ new_fenv_var = create_tmp_var (unsigned_type_node, NULL);
+ reload_fenv = build2 (MODIFY_EXPR, unsigned_type_node,
+ new_fenv_var, build_call_expr (get_fpsr, 0));
+ restore_fnenv = build_call_expr (set_fpsr, 1, fenv_sr);
+ atomic_feraiseexcept = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
+ update_call = build_call_expr (atomic_feraiseexcept, 1,
+ fold_convert (integer_type_node, new_fenv_var));
+ *update = build2 (COMPOUND_EXPR, void_type_node,
+ build2 (COMPOUND_EXPR, void_type_node,
+ reload_fenv, restore_fnenv), update_call);
+}
+
+
#undef AARCH64_CHECK_BUILTIN_MODE
#undef AARCH64_FIND_FRINT_VARIANT
#undef BUILTIN_DX
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-elf-raw.h b/gcc-4.9/gcc/config/aarch64/aarch64-elf-raw.h
index eafdd551d..bb5c88d53 100644
--- a/gcc-4.9/gcc/config/aarch64/aarch64-elf-raw.h
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-elf-raw.h
@@ -33,6 +33,14 @@
" %{mfix-cortex-a53-835769:--fix-cortex-a53-835769}"
#endif
+#ifdef TARGET_FIX_ERR_A53_835769_DEFAULT
+#define CA53_ERR_835769_SPEC \
+ " %{!mno-fix-cortex-a53-835769:--fix-cortex-a53-835769}"
+#else
+#define CA53_ERR_835769_SPEC \
+ " %{mfix-cortex-a53-835769:--fix-cortex-a53-835769}"
+#endif
+
#ifndef LINK_SPEC
#define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} -X \
-maarch64elf%{mabi=ilp32*:32}%{mbig-endian:b}" \
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-linux.h b/gcc-4.9/gcc/config/aarch64/aarch64-linux.h
index b77becd23..651abe3ce 100644
--- a/gcc-4.9/gcc/config/aarch64/aarch64-linux.h
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-linux.h
@@ -50,7 +50,16 @@
#define LINUX_TARGET_LINK_SPEC LINUX_TARGET_LINK_SPEC0 CA53_ERR_835769_SPEC
-#define LINK_SPEC LINUX_TARGET_LINK_SPEC
+#ifdef TARGET_FIX_ERR_A53_835769_DEFAULT
+#define CA53_ERR_835769_SPEC \
+ " %{!mno-fix-cortex-a53-835769:--fix-cortex-a53-835769}"
+#else
+#define CA53_ERR_835769_SPEC \
+ " %{mfix-cortex-a53-835769:--fix-cortex-a53-835769}"
+#endif
+
+#define LINK_SPEC LINUX_TARGET_LINK_SPEC \
+ CA53_ERR_835769_SPEC
#define TARGET_OS_CPP_BUILTINS() \
do \
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-protos.h b/gcc-4.9/gcc/config/aarch64/aarch64-protos.h
index bef58bf71..8b0a70538 100644
--- a/gcc-4.9/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-protos.h
@@ -291,4 +291,5 @@ extern bool aarch64_madd_needs_nop (rtx);
extern void aarch64_final_prescan_insn (rtx);
extern bool
aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel);
+void aarch64_atomic_assign_expand_fenv (tree *, tree *, tree *);
#endif /* GCC_AARCH64_PROTOS_H */
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-simd.md b/gcc-4.9/gcc/config/aarch64/aarch64-simd.md
index 851e77a02..7626ed31f 100644
--- a/gcc-4.9/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-simd.md
@@ -934,6 +934,41 @@
[(set_attr "type" "neon_minmax<q>")]
)
+(define_expand "<su><maxmin>v2di3"
+ [(parallel [
+ (set (match_operand:V2DI 0 "register_operand" "")
+ (MAXMIN:V2DI (match_operand:V2DI 1 "register_operand" "")
+ (match_operand:V2DI 2 "register_operand" "")))
+ (clobber (reg:CC CC_REGNUM))])]
+ "TARGET_SIMD"
+{
+ enum rtx_code cmp_operator;
+ rtx cmp_fmt;
+
+ switch (<CODE>)
+ {
+ case UMIN:
+ cmp_operator = LTU;
+ break;
+ case SMIN:
+ cmp_operator = LT;
+ break;
+ case UMAX:
+ cmp_operator = GTU;
+ break;
+ case SMAX:
+ cmp_operator = GT;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ cmp_fmt = gen_rtx_fmt_ee (cmp_operator, V2DImode, operands[1], operands[2]);
+ emit_insn (gen_aarch64_vcond_internalv2div2di (operands[0], operands[1],
+ operands[2], cmp_fmt, operands[1], operands[2]));
+ DONE;
+})
+
;; vec_concat gives a new vector with the low elements from operand 1, and
;; the high elements from operand 2. That is to say, given op1 = { a, b }
;; op2 = { c, d }, vec_concat (op1, op2) = { a, b, c, d }.
@@ -4565,8 +4600,8 @@
})
(define_insn "*aarch64_simd_ld1r<mode>"
- [(set (match_operand:VALLDI 0 "register_operand" "=w")
- (vec_duplicate:VALLDI
+ [(set (match_operand:VALL 0 "register_operand" "=w")
+ (vec_duplicate:VALL
(match_operand:<VEL> 1 "aarch64_simd_struct_operand" "Utv")))]
"TARGET_SIMD"
"ld1r\\t{%0.<Vtype>}, %1"
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64.c b/gcc-4.9/gcc/config/aarch64/aarch64.c
index 2ff6c7cb8..029c54ca3 100644
--- a/gcc-4.9/gcc/config/aarch64/aarch64.c
+++ b/gcc-4.9/gcc/config/aarch64/aarch64.c
@@ -3874,7 +3874,7 @@ aarch64_print_operand_address (FILE *f, rtx x)
switch (GET_CODE (x))
{
case PRE_INC:
- asm_fprintf (f, "[%s,%d]!", reg_names [REGNO (addr.base)],
+ asm_fprintf (f, "[%s,%d]!", reg_names [REGNO (addr.base)],
GET_MODE_SIZE (aarch64_memory_reference_mode));
return;
case POST_INC:
@@ -5152,7 +5152,6 @@ aarch64_parse_cpu (void)
if (strlen (cpu->name) == len && strncmp (cpu->name, str, len) == 0)
{
selected_cpu = cpu;
- selected_tune = cpu;
aarch64_isa_flags = selected_cpu->flags;
if (ext != NULL)
@@ -5248,9 +5247,8 @@ aarch64_override_options (void)
gcc_assert (selected_cpu);
- /* The selected cpu may be an architecture, so lookup tuning by core ID. */
if (!selected_tune)
- selected_tune = &all_cores[selected_cpu->core];
+ selected_tune = selected_cpu;
aarch64_tune_flags = selected_tune->flags;
aarch64_tune = selected_tune->core;
@@ -7194,7 +7192,7 @@ aarch64_expand_vector_init (rtx target, rtx vals)
x = XVECEXP (vals, 0, 0);
if (!CONST_INT_P (x) && !CONST_DOUBLE_P (x))
n_var = 1, one_var = 0;
-
+
for (i = 1; i < n_elts; ++i)
{
x = XVECEXP (vals, 0, i);
@@ -8642,6 +8640,10 @@ aarch64_cannot_change_mode_class (enum machine_mode from,
#define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
aarch64_autovectorize_vector_sizes
+#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
+#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV \
+ aarch64_atomic_assign_expand_fenv
+
/* Section anchor support. */
#undef TARGET_MIN_ANCHOR_OFFSET
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64.md b/gcc-4.9/gcc/config/aarch64/aarch64.md
index 319f80591..05f5e1b35 100644
--- a/gcc-4.9/gcc/config/aarch64/aarch64.md
+++ b/gcc-4.9/gcc/config/aarch64/aarch64.md
@@ -107,6 +107,10 @@
(define_c_enum "unspecv" [
UNSPECV_EH_RETURN ; Represent EH_RETURN
+ UNSPECV_GET_FPCR ; Represent fetch of FPCR content.
+ UNSPECV_SET_FPCR ; Represent assign of FPCR content.
+ UNSPECV_GET_FPSR ; Represent fetch of FPSR content.
+ UNSPECV_SET_FPSR ; Represent assign of FPSR content.
]
)
@@ -1102,7 +1106,7 @@
add\\t%x0, %x1, %x2
sub\\t%x0, %x1, #%n2
add\\t%d0, %d1, %d2"
- [(set_attr "type" "alu_imm,alu_reg,alu_imm,alu_reg")
+ [(set_attr "type" "alu_imm,alu_reg,alu_imm,neon_add")
(set_attr "simd" "*,*,*,yes")]
)
@@ -2782,7 +2786,7 @@
;; Logical right shift using SISD or Integer instruction
(define_insn "*aarch64_lshr_sisd_or_int_<mode>3"
- [(set (match_operand:GPI 0 "register_operand" "=w,w,r")
+ [(set (match_operand:GPI 0 "register_operand" "=w,&w,r")
(lshiftrt:GPI
(match_operand:GPI 1 "register_operand" "w,w,r")
(match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>" "Us<cmode>,w,rUs<cmode>")))]
@@ -2801,11 +2805,13 @@
(match_operand:DI 1 "aarch64_simd_register")
(match_operand:QI 2 "aarch64_simd_register")))]
"TARGET_SIMD && reload_completed"
- [(set (match_dup 2)
+ [(set (match_dup 3)
(unspec:QI [(match_dup 2)] UNSPEC_SISD_NEG))
(set (match_dup 0)
- (unspec:DI [(match_dup 1) (match_dup 2)] UNSPEC_SISD_USHL))]
- ""
+ (unspec:DI [(match_dup 1) (match_dup 3)] UNSPEC_SISD_USHL))]
+ {
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ }
)
(define_split
@@ -2814,11 +2820,13 @@
(match_operand:SI 1 "aarch64_simd_register")
(match_operand:QI 2 "aarch64_simd_register")))]
"TARGET_SIMD && reload_completed"
- [(set (match_dup 2)
+ [(set (match_dup 3)
(unspec:QI [(match_dup 2)] UNSPEC_SISD_NEG))
(set (match_dup 0)
- (unspec:SI [(match_dup 1) (match_dup 2)] UNSPEC_USHL_2S))]
- ""
+ (unspec:SI [(match_dup 1) (match_dup 3)] UNSPEC_USHL_2S))]
+ {
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ }
)
;; Arithmetic right shift using SISD or Integer instruction
@@ -3642,6 +3650,37 @@
DONE;
})
+;; Write Floating-point Control Register.
+(define_insn "set_fpcr"
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")] UNSPECV_SET_FPCR)]
+ ""
+ "msr\\tfpcr, %0\;isb"
+ [(set_attr "type" "mrs")])
+
+;; Read Floating-point Control Register.
+(define_insn "get_fpcr"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec_volatile:SI [(const_int 0)] UNSPECV_GET_FPCR))]
+ ""
+ "mrs\\t%0, fpcr"
+ [(set_attr "type" "mrs")])
+
+;; Write Floating-point Status Register.
+(define_insn "set_fpsr"
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")] UNSPECV_SET_FPSR)]
+ ""
+ "msr\\tfpsr, %0"
+ [(set_attr "type" "mrs")])
+
+;; Read Floating-point Status Register.
+(define_insn "get_fpsr"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec_volatile:SI [(const_int 0)] UNSPECV_GET_FPSR))]
+ ""
+ "mrs\\t%0, fpsr"
+ [(set_attr "type" "mrs")])
+
+
;; AdvSIMD Stuff
(include "aarch64-simd.md")
diff --git a/gcc-4.9/gcc/config/aarch64/arm_neon.h b/gcc-4.9/gcc/config/aarch64/arm_neon.h
index c01669b2c..ae0ae9c1b 100644
--- a/gcc-4.9/gcc/config/aarch64/arm_neon.h
+++ b/gcc-4.9/gcc/config/aarch64/arm_neon.h
@@ -39,9 +39,6 @@ typedef __builtin_aarch64_simd_hi int16x4_t
typedef __builtin_aarch64_simd_si int32x2_t
__attribute__ ((__vector_size__ (8)));
typedef int64_t int64x1_t;
-typedef int32_t int32x1_t;
-typedef int16_t int16x1_t;
-typedef int8_t int8x1_t;
typedef double float64x1_t;
typedef __builtin_aarch64_simd_sf float32x2_t
__attribute__ ((__vector_size__ (8)));
@@ -56,9 +53,6 @@ typedef __builtin_aarch64_simd_uhi uint16x4_t
typedef __builtin_aarch64_simd_usi uint32x2_t
__attribute__ ((__vector_size__ (8)));
typedef uint64_t uint64x1_t;
-typedef uint32_t uint32x1_t;
-typedef uint16_t uint16x1_t;
-typedef uint8_t uint8x1_t;
typedef __builtin_aarch64_simd_qi int8x16_t
__attribute__ ((__vector_size__ (16)));
typedef __builtin_aarch64_simd_hi int16x8_t
@@ -8400,7 +8394,7 @@ vmul_n_u32 (uint32x2_t a, uint32_t b)
#define vmull_high_lane_s16(a, b, c) \
__extension__ \
({ \
- int16x8_t b_ = (b); \
+ int16x4_t b_ = (b); \
int16x8_t a_ = (a); \
int32x4_t result; \
__asm__ ("smull2 %0.4s, %1.8h, %2.h[%3]" \
@@ -8413,7 +8407,7 @@ vmul_n_u32 (uint32x2_t a, uint32_t b)
#define vmull_high_lane_s32(a, b, c) \
__extension__ \
({ \
- int32x4_t b_ = (b); \
+ int32x2_t b_ = (b); \
int32x4_t a_ = (a); \
int64x2_t result; \
__asm__ ("smull2 %0.2d, %1.4s, %2.s[%3]" \
@@ -8426,7 +8420,7 @@ vmul_n_u32 (uint32x2_t a, uint32_t b)
#define vmull_high_lane_u16(a, b, c) \
__extension__ \
({ \
- uint16x8_t b_ = (b); \
+ uint16x4_t b_ = (b); \
uint16x8_t a_ = (a); \
uint32x4_t result; \
__asm__ ("umull2 %0.4s, %1.8h, %2.h[%3]" \
@@ -8439,7 +8433,7 @@ vmul_n_u32 (uint32x2_t a, uint32_t b)
#define vmull_high_lane_u32(a, b, c) \
__extension__ \
({ \
- uint32x4_t b_ = (b); \
+ uint32x2_t b_ = (b); \
uint32x4_t a_ = (a); \
uint64x2_t result; \
__asm__ ("umull2 %0.2d, %1.4s, %2.s[%3]" \
@@ -20925,42 +20919,42 @@ vqabsq_s64 (int64x2_t __a)
return (int64x2_t) __builtin_aarch64_sqabsv2di (__a);
}
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqabsb_s8 (int8x1_t __a)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vqabsb_s8 (int8_t __a)
{
- return (int8x1_t) __builtin_aarch64_sqabsqi (__a);
+ return (int8_t) __builtin_aarch64_sqabsqi (__a);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqabsh_s16 (int16x1_t __a)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqabsh_s16 (int16_t __a)
{
- return (int16x1_t) __builtin_aarch64_sqabshi (__a);
+ return (int16_t) __builtin_aarch64_sqabshi (__a);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqabss_s32 (int32x1_t __a)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqabss_s32 (int32_t __a)
{
- return (int32x1_t) __builtin_aarch64_sqabssi (__a);
+ return (int32_t) __builtin_aarch64_sqabssi (__a);
}
/* vqadd */
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqaddb_s8 (int8x1_t __a, int8x1_t __b)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vqaddb_s8 (int8_t __a, int8_t __b)
{
- return (int8x1_t) __builtin_aarch64_sqaddqi (__a, __b);
+ return (int8_t) __builtin_aarch64_sqaddqi (__a, __b);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqaddh_s16 (int16x1_t __a, int16x1_t __b)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqaddh_s16 (int16_t __a, int16_t __b)
{
- return (int16x1_t) __builtin_aarch64_sqaddhi (__a, __b);
+ return (int16_t) __builtin_aarch64_sqaddhi (__a, __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqadds_s32 (int32x1_t __a, int32x1_t __b)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqadds_s32 (int32_t __a, int32_t __b)
{
- return (int32x1_t) __builtin_aarch64_sqaddsi (__a, __b);
+ return (int32_t) __builtin_aarch64_sqaddsi (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
@@ -20969,22 +20963,22 @@ vqaddd_s64 (int64x1_t __a, int64x1_t __b)
return (int64x1_t) __builtin_aarch64_sqadddi (__a, __b);
}
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqaddb_u8 (uint8x1_t __a, uint8x1_t __b)
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vqaddb_u8 (uint8_t __a, uint8_t __b)
{
- return (uint8x1_t) __builtin_aarch64_uqaddqi (__a, __b);
+ return (uint8_t) __builtin_aarch64_uqaddqi (__a, __b);
}
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqaddh_u16 (uint16x1_t __a, uint16x1_t __b)
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vqaddh_u16 (uint16_t __a, uint16_t __b)
{
- return (uint16x1_t) __builtin_aarch64_uqaddhi (__a, __b);
+ return (uint16_t) __builtin_aarch64_uqaddhi (__a, __b);
}
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vqadds_u32 (uint32x1_t __a, uint32x1_t __b)
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vqadds_u32 (uint32_t __a, uint32_t __b)
{
- return (uint32x1_t) __builtin_aarch64_uqaddsi (__a, __b);
+ return (uint32_t) __builtin_aarch64_uqaddsi (__a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
@@ -21095,26 +21089,26 @@ vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
return __builtin_aarch64_sqdmlal_nv2si (__a, __b, __c);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmlalh_s16 (int32x1_t __a, int16x1_t __b, int16x1_t __c)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqdmlalh_s16 (int32_t __a, int16_t __b, int16_t __c)
{
return __builtin_aarch64_sqdmlalhi (__a, __b, __c);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmlalh_lane_s16 (int32x1_t __a, int16x1_t __b, int16x4_t __c, const int __d)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqdmlalh_lane_s16 (int32_t __a, int16_t __b, int16x4_t __c, const int __d)
{
return __builtin_aarch64_sqdmlal_lanehi (__a, __b, __c, __d);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqdmlals_s32 (int64x1_t __a, int32x1_t __b, int32x1_t __c)
+vqdmlals_s32 (int64x1_t __a, int32_t __b, int32_t __c)
{
return __builtin_aarch64_sqdmlalsi (__a, __b, __c);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqdmlals_lane_s32 (int64x1_t __a, int32x1_t __b, int32x2_t __c, const int __d)
+vqdmlals_lane_s32 (int64x1_t __a, int32_t __b, int32x2_t __c, const int __d)
{
return __builtin_aarch64_sqdmlal_lanesi (__a, __b, __c, __d);
}
@@ -21221,26 +21215,26 @@ vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
return __builtin_aarch64_sqdmlsl_nv2si (__a, __b, __c);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmlslh_s16 (int32x1_t __a, int16x1_t __b, int16x1_t __c)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqdmlslh_s16 (int32_t __a, int16_t __b, int16_t __c)
{
return __builtin_aarch64_sqdmlslhi (__a, __b, __c);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmlslh_lane_s16 (int32x1_t __a, int16x1_t __b, int16x4_t __c, const int __d)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqdmlslh_lane_s16 (int32_t __a, int16_t __b, int16x4_t __c, const int __d)
{
return __builtin_aarch64_sqdmlsl_lanehi (__a, __b, __c, __d);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqdmlsls_s32 (int64x1_t __a, int32x1_t __b, int32x1_t __c)
+vqdmlsls_s32 (int64x1_t __a, int32_t __b, int32_t __c)
{
return __builtin_aarch64_sqdmlslsi (__a, __b, __c);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqdmlsls_lane_s32 (int64x1_t __a, int32x1_t __b, int32x2_t __c, const int __d)
+vqdmlsls_lane_s32 (int64x1_t __a, int32_t __b, int32x2_t __c, const int __d)
{
return __builtin_aarch64_sqdmlsl_lanesi (__a, __b, __c, __d);
}
@@ -21271,26 +21265,26 @@ vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
return __builtin_aarch64_sqdmulh_lanev4si (__a, __b, __c);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqdmulhh_s16 (int16x1_t __a, int16x1_t __b)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqdmulhh_s16 (int16_t __a, int16_t __b)
{
- return (int16x1_t) __builtin_aarch64_sqdmulhhi (__a, __b);
+ return (int16_t) __builtin_aarch64_sqdmulhhi (__a, __b);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqdmulhh_lane_s16 (int16x1_t __a, int16x4_t __b, const int __c)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqdmulhh_lane_s16 (int16_t __a, int16x4_t __b, const int __c)
{
return __builtin_aarch64_sqdmulh_lanehi (__a, __b, __c);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmulhs_s32 (int32x1_t __a, int32x1_t __b)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqdmulhs_s32 (int32_t __a, int32_t __b)
{
- return (int32x1_t) __builtin_aarch64_sqdmulhsi (__a, __b);
+ return (int32_t) __builtin_aarch64_sqdmulhsi (__a, __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmulhs_lane_s32 (int32x1_t __a, int32x2_t __b, const int __c)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqdmulhs_lane_s32 (int32_t __a, int32x2_t __b, const int __c)
{
return __builtin_aarch64_sqdmulh_lanesi (__a, __b, __c);
}
@@ -21393,26 +21387,26 @@ vqdmull_n_s32 (int32x2_t __a, int32_t __b)
return __builtin_aarch64_sqdmull_nv2si (__a, __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmullh_s16 (int16x1_t __a, int16x1_t __b)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqdmullh_s16 (int16_t __a, int16_t __b)
{
- return (int32x1_t) __builtin_aarch64_sqdmullhi (__a, __b);
+ return (int32_t) __builtin_aarch64_sqdmullhi (__a, __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmullh_lane_s16 (int16x1_t __a, int16x4_t __b, const int __c)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqdmullh_lane_s16 (int16_t __a, int16x4_t __b, const int __c)
{
return __builtin_aarch64_sqdmull_lanehi (__a, __b, __c);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqdmulls_s32 (int32x1_t __a, int32x1_t __b)
+vqdmulls_s32 (int32_t __a, int32_t __b)
{
return (int64x1_t) __builtin_aarch64_sqdmullsi (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqdmulls_lane_s32 (int32x1_t __a, int32x2_t __b, const int __c)
+vqdmulls_lane_s32 (int32_t __a, int32x2_t __b, const int __c)
{
return __builtin_aarch64_sqdmull_lanesi (__a, __b, __c);
}
@@ -21455,40 +21449,40 @@ vqmovn_u64 (uint64x2_t __a)
return (uint32x2_t) __builtin_aarch64_uqmovnv2di ((int64x2_t) __a);
}
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqmovnh_s16 (int16x1_t __a)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vqmovnh_s16 (int16_t __a)
{
- return (int8x1_t) __builtin_aarch64_sqmovnhi (__a);
+ return (int8_t) __builtin_aarch64_sqmovnhi (__a);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqmovns_s32 (int32x1_t __a)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqmovns_s32 (int32_t __a)
{
- return (int16x1_t) __builtin_aarch64_sqmovnsi (__a);
+ return (int16_t) __builtin_aarch64_sqmovnsi (__a);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vqmovnd_s64 (int64x1_t __a)
{
- return (int32x1_t) __builtin_aarch64_sqmovndi (__a);
+ return (int32_t) __builtin_aarch64_sqmovndi (__a);
}
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqmovnh_u16 (uint16x1_t __a)
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vqmovnh_u16 (uint16_t __a)
{
- return (uint8x1_t) __builtin_aarch64_uqmovnhi (__a);
+ return (uint8_t) __builtin_aarch64_uqmovnhi (__a);
}
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqmovns_u32 (uint32x1_t __a)
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vqmovns_u32 (uint32_t __a)
{
- return (uint16x1_t) __builtin_aarch64_uqmovnsi (__a);
+ return (uint16_t) __builtin_aarch64_uqmovnsi (__a);
}
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
vqmovnd_u64 (uint64x1_t __a)
{
- return (uint32x1_t) __builtin_aarch64_uqmovndi (__a);
+ return (uint32_t) __builtin_aarch64_uqmovndi (__a);
}
/* vqmovun */
@@ -21511,22 +21505,22 @@ vqmovun_s64 (int64x2_t __a)
return (uint32x2_t) __builtin_aarch64_sqmovunv2di (__a);
}
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqmovunh_s16 (int16x1_t __a)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vqmovunh_s16 (int16_t __a)
{
- return (int8x1_t) __builtin_aarch64_sqmovunhi (__a);
+ return (int8_t) __builtin_aarch64_sqmovunhi (__a);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqmovuns_s32 (int32x1_t __a)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqmovuns_s32 (int32_t __a)
{
- return (int16x1_t) __builtin_aarch64_sqmovunsi (__a);
+ return (int16_t) __builtin_aarch64_sqmovunsi (__a);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vqmovund_s64 (int64x1_t __a)
{
- return (int32x1_t) __builtin_aarch64_sqmovundi (__a);
+ return (int32_t) __builtin_aarch64_sqmovundi (__a);
}
/* vqneg */
@@ -21537,22 +21531,22 @@ vqnegq_s64 (int64x2_t __a)
return (int64x2_t) __builtin_aarch64_sqnegv2di (__a);
}
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqnegb_s8 (int8x1_t __a)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vqnegb_s8 (int8_t __a)
{
- return (int8x1_t) __builtin_aarch64_sqnegqi (__a);
+ return (int8_t) __builtin_aarch64_sqnegqi (__a);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqnegh_s16 (int16x1_t __a)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqnegh_s16 (int16_t __a)
{
- return (int16x1_t) __builtin_aarch64_sqneghi (__a);
+ return (int16_t) __builtin_aarch64_sqneghi (__a);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqnegs_s32 (int32x1_t __a)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqnegs_s32 (int32_t __a)
{
- return (int32x1_t) __builtin_aarch64_sqnegsi (__a);
+ return (int32_t) __builtin_aarch64_sqnegsi (__a);
}
/* vqrdmulh */
@@ -21581,26 +21575,26 @@ vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
return __builtin_aarch64_sqrdmulh_lanev4si (__a, __b, __c);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqrdmulhh_s16 (int16x1_t __a, int16x1_t __b)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqrdmulhh_s16 (int16_t __a, int16_t __b)
{
- return (int16x1_t) __builtin_aarch64_sqrdmulhhi (__a, __b);
+ return (int16_t) __builtin_aarch64_sqrdmulhhi (__a, __b);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqrdmulhh_lane_s16 (int16x1_t __a, int16x4_t __b, const int __c)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqrdmulhh_lane_s16 (int16_t __a, int16x4_t __b, const int __c)
{
return __builtin_aarch64_sqrdmulh_lanehi (__a, __b, __c);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqrdmulhs_s32 (int32x1_t __a, int32x1_t __b)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqrdmulhs_s32 (int32_t __a, int32_t __b)
{
- return (int32x1_t) __builtin_aarch64_sqrdmulhsi (__a, __b);
+ return (int32_t) __builtin_aarch64_sqrdmulhsi (__a, __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqrdmulhs_lane_s32 (int32x1_t __a, int32x2_t __b, const int __c)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqrdmulhs_lane_s32 (int32_t __a, int32x2_t __b, const int __c)
{
return __builtin_aarch64_sqrdmulh_lanesi (__a, __b, __c);
}
@@ -21703,20 +21697,20 @@ vqrshlq_u64 (uint64x2_t __a, int64x2_t __b)
return (uint64x2_t) __builtin_aarch64_uqrshlv2di ((int64x2_t) __a, __b);
}
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqrshlb_s8 (int8x1_t __a, int8x1_t __b)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vqrshlb_s8 (int8_t __a, int8_t __b)
{
return __builtin_aarch64_sqrshlqi (__a, __b);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqrshlh_s16 (int16x1_t __a, int16x1_t __b)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqrshlh_s16 (int16_t __a, int16_t __b)
{
return __builtin_aarch64_sqrshlhi (__a, __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqrshls_s32 (int32x1_t __a, int32x1_t __b)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqrshls_s32 (int32_t __a, int32_t __b)
{
return __builtin_aarch64_sqrshlsi (__a, __b);
}
@@ -21727,22 +21721,22 @@ vqrshld_s64 (int64x1_t __a, int64x1_t __b)
return __builtin_aarch64_sqrshldi (__a, __b);
}
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqrshlb_u8 (uint8x1_t __a, uint8x1_t __b)
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vqrshlb_u8 (uint8_t __a, uint8_t __b)
{
- return (uint8x1_t) __builtin_aarch64_uqrshlqi (__a, __b);
+ return (uint8_t) __builtin_aarch64_uqrshlqi (__a, __b);
}
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqrshlh_u16 (uint16x1_t __a, uint16x1_t __b)
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vqrshlh_u16 (uint16_t __a, uint16_t __b)
{
- return (uint16x1_t) __builtin_aarch64_uqrshlhi (__a, __b);
+ return (uint16_t) __builtin_aarch64_uqrshlhi (__a, __b);
}
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vqrshls_u32 (uint32x1_t __a, uint32x1_t __b)
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vqrshls_u32 (uint32_t __a, uint32_t __b)
{
- return (uint32x1_t) __builtin_aarch64_uqrshlsi (__a, __b);
+ return (uint32_t) __builtin_aarch64_uqrshlsi (__a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
@@ -21789,40 +21783,40 @@ vqrshrn_n_u64 (uint64x2_t __a, const int __b)
return (uint32x2_t) __builtin_aarch64_uqrshrn_nv2di ((int64x2_t) __a, __b);
}
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqrshrnh_n_s16 (int16x1_t __a, const int __b)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vqrshrnh_n_s16 (int16_t __a, const int __b)
{
- return (int8x1_t) __builtin_aarch64_sqrshrn_nhi (__a, __b);
+ return (int8_t) __builtin_aarch64_sqrshrn_nhi (__a, __b);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqrshrns_n_s32 (int32x1_t __a, const int __b)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqrshrns_n_s32 (int32_t __a, const int __b)
{
- return (int16x1_t) __builtin_aarch64_sqrshrn_nsi (__a, __b);
+ return (int16_t) __builtin_aarch64_sqrshrn_nsi (__a, __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vqrshrnd_n_s64 (int64x1_t __a, const int __b)
{
- return (int32x1_t) __builtin_aarch64_sqrshrn_ndi (__a, __b);
+ return (int32_t) __builtin_aarch64_sqrshrn_ndi (__a, __b);
}
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqrshrnh_n_u16 (uint16x1_t __a, const int __b)
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vqrshrnh_n_u16 (uint16_t __a, const int __b)
{
- return (uint8x1_t) __builtin_aarch64_uqrshrn_nhi (__a, __b);
+ return (uint8_t) __builtin_aarch64_uqrshrn_nhi (__a, __b);
}
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqrshrns_n_u32 (uint32x1_t __a, const int __b)
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vqrshrns_n_u32 (uint32_t __a, const int __b)
{
- return (uint16x1_t) __builtin_aarch64_uqrshrn_nsi (__a, __b);
+ return (uint16_t) __builtin_aarch64_uqrshrn_nsi (__a, __b);
}
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
vqrshrnd_n_u64 (uint64x1_t __a, const int __b)
{
- return (uint32x1_t) __builtin_aarch64_uqrshrn_ndi (__a, __b);
+ return (uint32_t) __builtin_aarch64_uqrshrn_ndi (__a, __b);
}
/* vqrshrun */
@@ -21845,22 +21839,22 @@ vqrshrun_n_s64 (int64x2_t __a, const int __b)
return (uint32x2_t) __builtin_aarch64_sqrshrun_nv2di (__a, __b);
}
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqrshrunh_n_s16 (int16x1_t __a, const int __b)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vqrshrunh_n_s16 (int16_t __a, const int __b)
{
- return (int8x1_t) __builtin_aarch64_sqrshrun_nhi (__a, __b);
+ return (int8_t) __builtin_aarch64_sqrshrun_nhi (__a, __b);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqrshruns_n_s32 (int32x1_t __a, const int __b)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqrshruns_n_s32 (int32_t __a, const int __b)
{
- return (int16x1_t) __builtin_aarch64_sqrshrun_nsi (__a, __b);
+ return (int16_t) __builtin_aarch64_sqrshrun_nsi (__a, __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vqrshrund_n_s64 (int64x1_t __a, const int __b)
{
- return (int32x1_t) __builtin_aarch64_sqrshrun_ndi (__a, __b);
+ return (int32_t) __builtin_aarch64_sqrshrun_ndi (__a, __b);
}
/* vqshl */
@@ -21961,20 +21955,20 @@ vqshlq_u64 (uint64x2_t __a, int64x2_t __b)
return (uint64x2_t) __builtin_aarch64_uqshlv2di ((int64x2_t) __a, __b);
}
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqshlb_s8 (int8x1_t __a, int8x1_t __b)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vqshlb_s8 (int8_t __a, int8_t __b)
{
return __builtin_aarch64_sqshlqi (__a, __b);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqshlh_s16 (int16x1_t __a, int16x1_t __b)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqshlh_s16 (int16_t __a, int16_t __b)
{
return __builtin_aarch64_sqshlhi (__a, __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqshls_s32 (int32x1_t __a, int32x1_t __b)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqshls_s32 (int32_t __a, int32_t __b)
{
return __builtin_aarch64_sqshlsi (__a, __b);
}
@@ -21985,22 +21979,22 @@ vqshld_s64 (int64x1_t __a, int64x1_t __b)
return __builtin_aarch64_sqshldi (__a, __b);
}
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqshlb_u8 (uint8x1_t __a, uint8x1_t __b)
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vqshlb_u8 (uint8_t __a, uint8_t __b)
{
- return (uint8x1_t) __builtin_aarch64_uqshlqi (__a, __b);
+ return (uint8_t) __builtin_aarch64_uqshlqi (__a, __b);
}
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqshlh_u16 (uint16x1_t __a, uint16x1_t __b)
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vqshlh_u16 (uint16_t __a, uint16_t __b)
{
- return (uint16x1_t) __builtin_aarch64_uqshlhi (__a, __b);
+ return (uint16_t) __builtin_aarch64_uqshlhi (__a, __b);
}
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vqshls_u32 (uint32x1_t __a, uint32x1_t __b)
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vqshls_u32 (uint32_t __a, uint32_t __b)
{
- return (uint32x1_t) __builtin_aarch64_uqshlsi (__a, __b);
+ return (uint32_t) __builtin_aarch64_uqshlsi (__a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
@@ -22105,22 +22099,22 @@ vqshlq_n_u64 (uint64x2_t __a, const int __b)
return (uint64x2_t) __builtin_aarch64_uqshl_nv2di ((int64x2_t) __a, __b);
}
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqshlb_n_s8 (int8x1_t __a, const int __b)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vqshlb_n_s8 (int8_t __a, const int __b)
{
- return (int8x1_t) __builtin_aarch64_sqshl_nqi (__a, __b);
+ return (int8_t) __builtin_aarch64_sqshl_nqi (__a, __b);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqshlh_n_s16 (int16x1_t __a, const int __b)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqshlh_n_s16 (int16_t __a, const int __b)
{
- return (int16x1_t) __builtin_aarch64_sqshl_nhi (__a, __b);
+ return (int16_t) __builtin_aarch64_sqshl_nhi (__a, __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqshls_n_s32 (int32x1_t __a, const int __b)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqshls_n_s32 (int32_t __a, const int __b)
{
- return (int32x1_t) __builtin_aarch64_sqshl_nsi (__a, __b);
+ return (int32_t) __builtin_aarch64_sqshl_nsi (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
@@ -22129,22 +22123,22 @@ vqshld_n_s64 (int64x1_t __a, const int __b)
return (int64x1_t) __builtin_aarch64_sqshl_ndi (__a, __b);
}
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqshlb_n_u8 (uint8x1_t __a, const int __b)
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vqshlb_n_u8 (uint8_t __a, const int __b)
{
- return (uint8x1_t) __builtin_aarch64_uqshl_nqi (__a, __b);
+ return (uint8_t) __builtin_aarch64_uqshl_nqi (__a, __b);
}
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqshlh_n_u16 (uint16x1_t __a, const int __b)
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vqshlh_n_u16 (uint16_t __a, const int __b)
{
- return (uint16x1_t) __builtin_aarch64_uqshl_nhi (__a, __b);
+ return (uint16_t) __builtin_aarch64_uqshl_nhi (__a, __b);
}
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vqshls_n_u32 (uint32x1_t __a, const int __b)
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vqshls_n_u32 (uint32_t __a, const int __b)
{
- return (uint32x1_t) __builtin_aarch64_uqshl_nsi (__a, __b);
+ return (uint32_t) __builtin_aarch64_uqshl_nsi (__a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
@@ -22203,22 +22197,22 @@ vqshluq_n_s64 (int64x2_t __a, const int __b)
return (uint64x2_t) __builtin_aarch64_sqshlu_nv2di (__a, __b);
}
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqshlub_n_s8 (int8x1_t __a, const int __b)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vqshlub_n_s8 (int8_t __a, const int __b)
{
- return (int8x1_t) __builtin_aarch64_sqshlu_nqi (__a, __b);
+ return (int8_t) __builtin_aarch64_sqshlu_nqi (__a, __b);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqshluh_n_s16 (int16x1_t __a, const int __b)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqshluh_n_s16 (int16_t __a, const int __b)
{
- return (int16x1_t) __builtin_aarch64_sqshlu_nhi (__a, __b);
+ return (int16_t) __builtin_aarch64_sqshlu_nhi (__a, __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqshlus_n_s32 (int32x1_t __a, const int __b)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqshlus_n_s32 (int32_t __a, const int __b)
{
- return (int32x1_t) __builtin_aarch64_sqshlu_nsi (__a, __b);
+ return (int32_t) __builtin_aarch64_sqshlu_nsi (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
@@ -22265,40 +22259,40 @@ vqshrn_n_u64 (uint64x2_t __a, const int __b)
return (uint32x2_t) __builtin_aarch64_uqshrn_nv2di ((int64x2_t) __a, __b);
}
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqshrnh_n_s16 (int16x1_t __a, const int __b)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vqshrnh_n_s16 (int16_t __a, const int __b)
{
- return (int8x1_t) __builtin_aarch64_sqshrn_nhi (__a, __b);
+ return (int8_t) __builtin_aarch64_sqshrn_nhi (__a, __b);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqshrns_n_s32 (int32x1_t __a, const int __b)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqshrns_n_s32 (int32_t __a, const int __b)
{
- return (int16x1_t) __builtin_aarch64_sqshrn_nsi (__a, __b);
+ return (int16_t) __builtin_aarch64_sqshrn_nsi (__a, __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vqshrnd_n_s64 (int64x1_t __a, const int __b)
{
- return (int32x1_t) __builtin_aarch64_sqshrn_ndi (__a, __b);
+ return (int32_t) __builtin_aarch64_sqshrn_ndi (__a, __b);
}
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqshrnh_n_u16 (uint16x1_t __a, const int __b)
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vqshrnh_n_u16 (uint16_t __a, const int __b)
{
- return (uint8x1_t) __builtin_aarch64_uqshrn_nhi (__a, __b);
+ return (uint8_t) __builtin_aarch64_uqshrn_nhi (__a, __b);
}
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqshrns_n_u32 (uint32x1_t __a, const int __b)
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vqshrns_n_u32 (uint32_t __a, const int __b)
{
- return (uint16x1_t) __builtin_aarch64_uqshrn_nsi (__a, __b);
+ return (uint16_t) __builtin_aarch64_uqshrn_nsi (__a, __b);
}
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
vqshrnd_n_u64 (uint64x1_t __a, const int __b)
{
- return (uint32x1_t) __builtin_aarch64_uqshrn_ndi (__a, __b);
+ return (uint32_t) __builtin_aarch64_uqshrn_ndi (__a, __b);
}
/* vqshrun */
@@ -22321,42 +22315,42 @@ vqshrun_n_s64 (int64x2_t __a, const int __b)
return (uint32x2_t) __builtin_aarch64_sqshrun_nv2di (__a, __b);
}
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqshrunh_n_s16 (int16x1_t __a, const int __b)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vqshrunh_n_s16 (int16_t __a, const int __b)
{
- return (int8x1_t) __builtin_aarch64_sqshrun_nhi (__a, __b);
+ return (int8_t) __builtin_aarch64_sqshrun_nhi (__a, __b);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqshruns_n_s32 (int32x1_t __a, const int __b)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqshruns_n_s32 (int32_t __a, const int __b)
{
- return (int16x1_t) __builtin_aarch64_sqshrun_nsi (__a, __b);
+ return (int16_t) __builtin_aarch64_sqshrun_nsi (__a, __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vqshrund_n_s64 (int64x1_t __a, const int __b)
{
- return (int32x1_t) __builtin_aarch64_sqshrun_ndi (__a, __b);
+ return (int32_t) __builtin_aarch64_sqshrun_ndi (__a, __b);
}
/* vqsub */
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqsubb_s8 (int8x1_t __a, int8x1_t __b)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vqsubb_s8 (int8_t __a, int8_t __b)
{
- return (int8x1_t) __builtin_aarch64_sqsubqi (__a, __b);
+ return (int8_t) __builtin_aarch64_sqsubqi (__a, __b);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqsubh_s16 (int16x1_t __a, int16x1_t __b)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vqsubh_s16 (int16_t __a, int16_t __b)
{
- return (int16x1_t) __builtin_aarch64_sqsubhi (__a, __b);
+ return (int16_t) __builtin_aarch64_sqsubhi (__a, __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqsubs_s32 (int32x1_t __a, int32x1_t __b)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vqsubs_s32 (int32_t __a, int32_t __b)
{
- return (int32x1_t) __builtin_aarch64_sqsubsi (__a, __b);
+ return (int32_t) __builtin_aarch64_sqsubsi (__a, __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
@@ -22365,22 +22359,22 @@ vqsubd_s64 (int64x1_t __a, int64x1_t __b)
return (int64x1_t) __builtin_aarch64_sqsubdi (__a, __b);
}
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqsubb_u8 (uint8x1_t __a, uint8x1_t __b)
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vqsubb_u8 (uint8_t __a, uint8_t __b)
{
- return (uint8x1_t) __builtin_aarch64_uqsubqi (__a, __b);
+ return (uint8_t) __builtin_aarch64_uqsubqi (__a, __b);
}
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqsubh_u16 (uint16x1_t __a, uint16x1_t __b)
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vqsubh_u16 (uint16_t __a, uint16_t __b)
{
- return (uint16x1_t) __builtin_aarch64_uqsubhi (__a, __b);
+ return (uint16_t) __builtin_aarch64_uqsubhi (__a, __b);
}
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vqsubs_u32 (uint32x1_t __a, uint32x1_t __b)
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vqsubs_u32 (uint32_t __a, uint32_t __b)
{
- return (uint32x1_t) __builtin_aarch64_uqsubsi (__a, __b);
+ return (uint32_t) __builtin_aarch64_uqsubsi (__a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
@@ -23596,22 +23590,22 @@ vsqaddq_u64 (uint64x2_t __a, int64x2_t __b)
(int64x2_t) __b);
}
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vsqaddb_u8 (uint8x1_t __a, int8x1_t __b)
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vsqaddb_u8 (uint8_t __a, int8_t __b)
{
- return (uint8x1_t) __builtin_aarch64_usqaddqi ((int8x1_t) __a, __b);
+ return (uint8_t) __builtin_aarch64_usqaddqi ((int8_t) __a, __b);
}
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vsqaddh_u16 (uint16x1_t __a, int16x1_t __b)
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vsqaddh_u16 (uint16_t __a, int16_t __b)
{
- return (uint16x1_t) __builtin_aarch64_usqaddhi ((int16x1_t) __a, __b);
+ return (uint16_t) __builtin_aarch64_usqaddhi ((int16_t) __a, __b);
}
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vsqadds_u32 (uint32x1_t __a, int32x1_t __b)
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vsqadds_u32 (uint32_t __a, int32_t __b)
{
- return (uint32x1_t) __builtin_aarch64_usqaddsi ((int32x1_t) __a, __b);
+ return (uint32_t) __builtin_aarch64_usqaddsi ((int32_t) __a, __b);
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
@@ -25251,22 +25245,22 @@ vuqaddq_s64 (int64x2_t __a, uint64x2_t __b)
return (int64x2_t) __builtin_aarch64_suqaddv2di (__a, (int64x2_t) __b);
}
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vuqaddb_s8 (int8x1_t __a, uint8x1_t __b)
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vuqaddb_s8 (int8_t __a, uint8_t __b)
{
- return (int8x1_t) __builtin_aarch64_suqaddqi (__a, (int8x1_t) __b);
+ return (int8_t) __builtin_aarch64_suqaddqi (__a, (int8_t) __b);
}
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vuqaddh_s16 (int16x1_t __a, uint16x1_t __b)
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vuqaddh_s16 (int16_t __a, uint16_t __b)
{
- return (int16x1_t) __builtin_aarch64_suqaddhi (__a, (int16x1_t) __b);
+ return (int16_t) __builtin_aarch64_suqaddhi (__a, (int16_t) __b);
}
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vuqadds_s32 (int32x1_t __a, uint32x1_t __b)
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vuqadds_s32 (int32_t __a, uint32_t __b)
{
- return (int32x1_t) __builtin_aarch64_suqaddsi (__a, (int32x1_t) __b);
+ return (int32_t) __builtin_aarch64_suqaddsi (__a, (int32_t) __b);
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))