aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.9/gcc/config/aarch64
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.9/gcc/config/aarch64')
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-arches.def29
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-builtins.c1253
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-cores.def42
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-elf-raw.h33
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-elf.h161
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-linux.h47
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-modes.def55
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-option-extensions.def38
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-opts.h64
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-protos.h292
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-simd-builtins.def395
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-simd.md4363
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64-tune.md5
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64.c8544
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64.h873
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64.md3642
-rw-r--r--gcc-4.9/gcc/config/aarch64/aarch64.opt118
-rw-r--r--gcc-4.9/gcc/config/aarch64/arm_neon.h25403
-rw-r--r--gcc-4.9/gcc/config/aarch64/atomics.md382
-rw-r--r--gcc-4.9/gcc/config/aarch64/biarchilp32.h29
-rw-r--r--gcc-4.9/gcc/config/aarch64/biarchlp64.h29
-rw-r--r--gcc-4.9/gcc/config/aarch64/constraints.md188
-rw-r--r--gcc-4.9/gcc/config/aarch64/gentune.sh32
-rw-r--r--gcc-4.9/gcc/config/aarch64/iterators.md997
-rw-r--r--gcc-4.9/gcc/config/aarch64/predicates.md302
-rw-r--r--gcc-4.9/gcc/config/aarch64/t-aarch6445
-rw-r--r--gcc-4.9/gcc/config/aarch64/t-aarch64-linux31
27 files changed, 47392 insertions, 0 deletions
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-arches.def b/gcc-4.9/gcc/config/aarch64/aarch64-arches.def
new file mode 100644
index 000000000..4b796d8c9
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-arches.def
@@ -0,0 +1,29 @@
+/* Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Before using #include to read this file, define a macro:
+
+ AARCH64_ARCH(NAME, CORE, ARCH, FLAGS)
+
+ The NAME is the name of the architecture, represented as a string
+ constant. The CORE is the identifier for a core representative of
+ this architecture. ARCH is the architecture revision. FLAGS are
+ the flags implied by the architecture. */
+
+AARCH64_ARCH("armv8-a", generic, 8, AARCH64_FL_FOR_ARCH8)
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-builtins.c b/gcc-4.9/gcc/config/aarch64/aarch64-builtins.c
new file mode 100644
index 000000000..55cfe0ab2
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-builtins.c
@@ -0,0 +1,1253 @@
+/* Builtins' description for AArch64 SIMD architecture.
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tree.h"
+#include "stor-layout.h"
+#include "stringpool.h"
+#include "calls.h"
+#include "expr.h"
+#include "tm_p.h"
+#include "recog.h"
+#include "langhooks.h"
+#include "diagnostic-core.h"
+#include "optabs.h"
+#include "pointer-set.h"
+#include "hash-table.h"
+#include "vec.h"
+#include "ggc.h"
+#include "basic-block.h"
+#include "tree-ssa-alias.h"
+#include "internal-fn.h"
+#include "gimple-fold.h"
+#include "tree-eh.h"
+#include "gimple-expr.h"
+#include "is-a.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+
+enum aarch64_simd_builtin_type_mode
+{
+ T_V8QI,
+ T_V4HI,
+ T_V2SI,
+ T_V2SF,
+ T_DI,
+ T_DF,
+ T_V16QI,
+ T_V8HI,
+ T_V4SI,
+ T_V4SF,
+ T_V2DI,
+ T_V2DF,
+ T_TI,
+ T_EI,
+ T_OI,
+ T_XI,
+ T_SI,
+ T_SF,
+ T_HI,
+ T_QI,
+ T_MAX
+};
+
+#define v8qi_UP T_V8QI
+#define v4hi_UP T_V4HI
+#define v2si_UP T_V2SI
+#define v2sf_UP T_V2SF
+#define di_UP T_DI
+#define df_UP T_DF
+#define v16qi_UP T_V16QI
+#define v8hi_UP T_V8HI
+#define v4si_UP T_V4SI
+#define v4sf_UP T_V4SF
+#define v2di_UP T_V2DI
+#define v2df_UP T_V2DF
+#define ti_UP T_TI
+#define ei_UP T_EI
+#define oi_UP T_OI
+#define xi_UP T_XI
+#define si_UP T_SI
+#define sf_UP T_SF
+#define hi_UP T_HI
+#define qi_UP T_QI
+
+#define UP(X) X##_UP
+
+#define SIMD_MAX_BUILTIN_ARGS 5
+
+enum aarch64_type_qualifiers
+{
+ /* T foo. */
+ qualifier_none = 0x0,
+ /* unsigned T foo. */
+ qualifier_unsigned = 0x1, /* 1 << 0 */
+ /* const T foo. */
+ qualifier_const = 0x2, /* 1 << 1 */
+ /* T *foo. */
+ qualifier_pointer = 0x4, /* 1 << 2 */
+ /* const T *foo. */
+ qualifier_const_pointer = 0x6, /* qualifier_const | qualifier_pointer */
+ /* Used when expanding arguments if an operand could
+ be an immediate. */
+ qualifier_immediate = 0x8, /* 1 << 3 */
+ qualifier_maybe_immediate = 0x10, /* 1 << 4 */
+ /* void foo (...). */
+ qualifier_void = 0x20, /* 1 << 5 */
+ /* Some patterns may have internal operands, this qualifier is an
+ instruction to the initialisation code to skip this operand. */
+ qualifier_internal = 0x40, /* 1 << 6 */
+ /* Some builtins should use the T_*mode* encoded in a simd_builtin_datum
+ rather than using the type of the operand. */
+ qualifier_map_mode = 0x80, /* 1 << 7 */
+ /* qualifier_pointer | qualifier_map_mode */
+ qualifier_pointer_map_mode = 0x84,
+ /* qualifier_const_pointer | qualifier_map_mode */
+ qualifier_const_pointer_map_mode = 0x86,
+ /* Polynomial types. */
+ qualifier_poly = 0x100
+};
+
+typedef struct
+{
+ const char *name;
+ enum aarch64_simd_builtin_type_mode mode;
+ const enum insn_code code;
+ unsigned int fcode;
+ enum aarch64_type_qualifiers *qualifiers;
+} aarch64_simd_builtin_datum;
+
+static enum aarch64_type_qualifiers
+aarch64_types_unop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none };
+#define TYPES_UNOP (aarch64_types_unop_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_unopu_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned };
+#define TYPES_UNOPU (aarch64_types_unopu_qualifiers)
+#define TYPES_CREATE (aarch64_types_unop_qualifiers)
+#define TYPES_REINTERP (aarch64_types_unop_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_binop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_maybe_immediate };
+#define TYPES_BINOP (aarch64_types_binop_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_binopu_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned, qualifier_unsigned };
+#define TYPES_BINOPU (aarch64_types_binopu_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_binopp_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_poly, qualifier_poly, qualifier_poly };
+#define TYPES_BINOPP (aarch64_types_binopp_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_ternop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_none, qualifier_none };
+#define TYPES_TERNOP (aarch64_types_ternop_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_ternopu_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned,
+ qualifier_unsigned, qualifier_unsigned };
+#define TYPES_TERNOPU (aarch64_types_ternopu_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_quadop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_none,
+ qualifier_none, qualifier_none };
+#define TYPES_QUADOP (aarch64_types_quadop_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_getlane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_immediate };
+#define TYPES_GETLANE (aarch64_types_getlane_qualifiers)
+#define TYPES_SHIFTIMM (aarch64_types_getlane_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_unsigned_shift_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned, qualifier_immediate };
+#define TYPES_USHIFTIMM (aarch64_types_unsigned_shift_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_setlane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_none, qualifier_immediate };
+#define TYPES_SETLANE (aarch64_types_setlane_qualifiers)
+#define TYPES_SHIFTINSERT (aarch64_types_setlane_qualifiers)
+#define TYPES_SHIFTACC (aarch64_types_setlane_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_combine_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_none, qualifier_none };
+#define TYPES_COMBINE (aarch64_types_combine_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_load1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_const_pointer_map_mode };
+#define TYPES_LOAD1 (aarch64_types_load1_qualifiers)
+#define TYPES_LOADSTRUCT (aarch64_types_load1_qualifiers)
+
+static enum aarch64_type_qualifiers
+aarch64_types_bsl_p_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_poly, qualifier_unsigned,
+ qualifier_poly, qualifier_poly };
+#define TYPES_BSL_P (aarch64_types_bsl_p_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_bsl_s_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_none, qualifier_unsigned,
+ qualifier_none, qualifier_none };
+#define TYPES_BSL_S (aarch64_types_bsl_s_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_bsl_u_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned,
+ qualifier_unsigned, qualifier_unsigned };
+#define TYPES_BSL_U (aarch64_types_bsl_u_qualifiers)
+
+/* The first argument (return type) of a store should be void type,
+ which we represent with qualifier_void. Their first operand will be
+ a DImode pointer to the location to store to, so we must use
+ qualifier_map_mode | qualifier_pointer to build a pointer to the
+ element type of the vector. */
+static enum aarch64_type_qualifiers
+aarch64_types_store1_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_void, qualifier_pointer_map_mode, qualifier_none };
+#define TYPES_STORE1 (aarch64_types_store1_qualifiers)
+#define TYPES_STORESTRUCT (aarch64_types_store1_qualifiers)
+
+#define CF0(N, X) CODE_FOR_aarch64_##N##X
+#define CF1(N, X) CODE_FOR_##N##X##1
+#define CF2(N, X) CODE_FOR_##N##X##2
+#define CF3(N, X) CODE_FOR_##N##X##3
+#define CF4(N, X) CODE_FOR_##N##X##4
+#define CF10(N, X) CODE_FOR_##N##X
+
+#define VAR1(T, N, MAP, A) \
+ {#N, UP (A), CF##MAP (N, A), 0, TYPES_##T},
+#define VAR2(T, N, MAP, A, B) \
+ VAR1 (T, N, MAP, A) \
+ VAR1 (T, N, MAP, B)
+#define VAR3(T, N, MAP, A, B, C) \
+ VAR2 (T, N, MAP, A, B) \
+ VAR1 (T, N, MAP, C)
+#define VAR4(T, N, MAP, A, B, C, D) \
+ VAR3 (T, N, MAP, A, B, C) \
+ VAR1 (T, N, MAP, D)
+#define VAR5(T, N, MAP, A, B, C, D, E) \
+ VAR4 (T, N, MAP, A, B, C, D) \
+ VAR1 (T, N, MAP, E)
+#define VAR6(T, N, MAP, A, B, C, D, E, F) \
+ VAR5 (T, N, MAP, A, B, C, D, E) \
+ VAR1 (T, N, MAP, F)
+#define VAR7(T, N, MAP, A, B, C, D, E, F, G) \
+ VAR6 (T, N, MAP, A, B, C, D, E, F) \
+ VAR1 (T, N, MAP, G)
+#define VAR8(T, N, MAP, A, B, C, D, E, F, G, H) \
+ VAR7 (T, N, MAP, A, B, C, D, E, F, G) \
+ VAR1 (T, N, MAP, H)
+#define VAR9(T, N, MAP, A, B, C, D, E, F, G, H, I) \
+ VAR8 (T, N, MAP, A, B, C, D, E, F, G, H) \
+ VAR1 (T, N, MAP, I)
+#define VAR10(T, N, MAP, A, B, C, D, E, F, G, H, I, J) \
+ VAR9 (T, N, MAP, A, B, C, D, E, F, G, H, I) \
+ VAR1 (T, N, MAP, J)
+#define VAR11(T, N, MAP, A, B, C, D, E, F, G, H, I, J, K) \
+ VAR10 (T, N, MAP, A, B, C, D, E, F, G, H, I, J) \
+ VAR1 (T, N, MAP, K)
+#define VAR12(T, N, MAP, A, B, C, D, E, F, G, H, I, J, K, L) \
+ VAR11 (T, N, MAP, A, B, C, D, E, F, G, H, I, J, K) \
+ VAR1 (T, N, MAP, L)
+
+/* BUILTIN_<ITERATOR> macros should expand to cover the same range of
+ modes as is given for each define_mode_iterator in
+ config/aarch64/iterators.md. */
+
+#define BUILTIN_DX(T, N, MAP) \
+ VAR2 (T, N, MAP, di, df)
+#define BUILTIN_GPF(T, N, MAP) \
+ VAR2 (T, N, MAP, sf, df)
+#define BUILTIN_SDQ_I(T, N, MAP) \
+ VAR4 (T, N, MAP, qi, hi, si, di)
+#define BUILTIN_SD_HSI(T, N, MAP) \
+ VAR2 (T, N, MAP, hi, si)
+#define BUILTIN_V2F(T, N, MAP) \
+ VAR2 (T, N, MAP, v2sf, v2df)
+#define BUILTIN_VALL(T, N, MAP) \
+ VAR10 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, \
+ v4si, v2di, v2sf, v4sf, v2df)
+#define BUILTIN_VALLDI(T, N, MAP) \
+ VAR11 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, \
+ v4si, v2di, v2sf, v4sf, v2df, di)
+#define BUILTIN_VALLDIF(T, N, MAP) \
+ VAR12 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, \
+ v4si, v2di, v2sf, v4sf, v2df, di, df)
+#define BUILTIN_VB(T, N, MAP) \
+ VAR2 (T, N, MAP, v8qi, v16qi)
+#define BUILTIN_VD(T, N, MAP) \
+ VAR4 (T, N, MAP, v8qi, v4hi, v2si, v2sf)
+#define BUILTIN_VDC(T, N, MAP) \
+ VAR6 (T, N, MAP, v8qi, v4hi, v2si, v2sf, di, df)
+#define BUILTIN_VDIC(T, N, MAP) \
+ VAR3 (T, N, MAP, v8qi, v4hi, v2si)
+#define BUILTIN_VDN(T, N, MAP) \
+ VAR3 (T, N, MAP, v4hi, v2si, di)
+#define BUILTIN_VDQ(T, N, MAP) \
+ VAR7 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di)
+#define BUILTIN_VDQF(T, N, MAP) \
+ VAR3 (T, N, MAP, v2sf, v4sf, v2df)
+#define BUILTIN_VDQH(T, N, MAP) \
+ VAR2 (T, N, MAP, v4hi, v8hi)
+#define BUILTIN_VDQHS(T, N, MAP) \
+ VAR4 (T, N, MAP, v4hi, v8hi, v2si, v4si)
+#define BUILTIN_VDQIF(T, N, MAP) \
+ VAR9 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2sf, v4sf, v2df)
+#define BUILTIN_VDQM(T, N, MAP) \
+ VAR6 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si)
+#define BUILTIN_VDQV(T, N, MAP) \
+ VAR5 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v4si)
+#define BUILTIN_VDQQH(T, N, MAP) \
+ VAR4 (T, N, MAP, v8qi, v16qi, v4hi, v8hi)
+#define BUILTIN_VDQ_BHSI(T, N, MAP) \
+ VAR6 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si)
+#define BUILTIN_VDQ_I(T, N, MAP) \
+ VAR7 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di)
+#define BUILTIN_VDW(T, N, MAP) \
+ VAR3 (T, N, MAP, v8qi, v4hi, v2si)
+#define BUILTIN_VD_BHSI(T, N, MAP) \
+ VAR3 (T, N, MAP, v8qi, v4hi, v2si)
+#define BUILTIN_VD_HSI(T, N, MAP) \
+ VAR2 (T, N, MAP, v4hi, v2si)
+#define BUILTIN_VD_RE(T, N, MAP) \
+ VAR6 (T, N, MAP, v8qi, v4hi, v2si, v2sf, di, df)
+#define BUILTIN_VQ(T, N, MAP) \
+ VAR6 (T, N, MAP, v16qi, v8hi, v4si, v2di, v4sf, v2df)
+#define BUILTIN_VQN(T, N, MAP) \
+ VAR3 (T, N, MAP, v8hi, v4si, v2di)
+#define BUILTIN_VQW(T, N, MAP) \
+ VAR3 (T, N, MAP, v16qi, v8hi, v4si)
+#define BUILTIN_VQ_HSI(T, N, MAP) \
+ VAR2 (T, N, MAP, v8hi, v4si)
+#define BUILTIN_VQ_S(T, N, MAP) \
+ VAR6 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si)
+#define BUILTIN_VSDQ_HSI(T, N, MAP) \
+ VAR6 (T, N, MAP, v4hi, v8hi, v2si, v4si, hi, si)
+#define BUILTIN_VSDQ_I(T, N, MAP) \
+ VAR11 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di, qi, hi, si, di)
+#define BUILTIN_VSDQ_I_BHSI(T, N, MAP) \
+ VAR10 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di, qi, hi, si)
+#define BUILTIN_VSDQ_I_DI(T, N, MAP) \
+ VAR8 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di, di)
+#define BUILTIN_VSD_HSI(T, N, MAP) \
+ VAR4 (T, N, MAP, v4hi, v2si, hi, si)
+#define BUILTIN_VSQN_HSDI(T, N, MAP) \
+ VAR6 (T, N, MAP, v8hi, v4si, v2di, hi, si, di)
+#define BUILTIN_VSTRUCT(T, N, MAP) \
+ VAR3 (T, N, MAP, oi, ci, xi)
+
+static aarch64_simd_builtin_datum aarch64_simd_builtin_data[] = {
+#include "aarch64-simd-builtins.def"
+};
+
+#undef VAR1
+#define VAR1(T, N, MAP, A) \
+ AARCH64_SIMD_BUILTIN_##T##_##N##A,
+
+enum aarch64_builtins
+{
+ AARCH64_BUILTIN_MIN,
+ AARCH64_SIMD_BUILTIN_BASE,
+#include "aarch64-simd-builtins.def"
+ AARCH64_SIMD_BUILTIN_MAX = AARCH64_SIMD_BUILTIN_BASE
+ + ARRAY_SIZE (aarch64_simd_builtin_data),
+ AARCH64_BUILTIN_MAX
+};
+
+static GTY(()) tree aarch64_builtin_decls[AARCH64_BUILTIN_MAX];
+
+#define NUM_DREG_TYPES 6
+#define NUM_QREG_TYPES 6
+
+/* Return a tree for a signed or unsigned argument of either
+ the mode specified by MODE, or the inner mode of MODE. */
+tree
+aarch64_build_scalar_type (enum machine_mode mode,
+ bool unsigned_p,
+ bool poly_p)
+{
+#undef INT_TYPES
+#define INT_TYPES \
+ AARCH64_TYPE_BUILDER (QI) \
+ AARCH64_TYPE_BUILDER (HI) \
+ AARCH64_TYPE_BUILDER (SI) \
+ AARCH64_TYPE_BUILDER (DI) \
+ AARCH64_TYPE_BUILDER (EI) \
+ AARCH64_TYPE_BUILDER (OI) \
+ AARCH64_TYPE_BUILDER (CI) \
+ AARCH64_TYPE_BUILDER (XI) \
+ AARCH64_TYPE_BUILDER (TI) \
+
+/* Statically declare all the possible types we might need. */
+#undef AARCH64_TYPE_BUILDER
+#define AARCH64_TYPE_BUILDER(X) \
+ static tree X##_aarch64_type_node_p = NULL; \
+ static tree X##_aarch64_type_node_s = NULL; \
+ static tree X##_aarch64_type_node_u = NULL;
+
+ INT_TYPES
+
+ static tree float_aarch64_type_node = NULL;
+ static tree double_aarch64_type_node = NULL;
+
+ gcc_assert (!VECTOR_MODE_P (mode));
+
+/* If we've already initialised this type, don't initialise it again,
+ otherwise ask for a new type of the correct size. */
+#undef AARCH64_TYPE_BUILDER
+#define AARCH64_TYPE_BUILDER(X) \
+ case X##mode: \
+ if (unsigned_p) \
+ return (X##_aarch64_type_node_u \
+ ? X##_aarch64_type_node_u \
+ : X##_aarch64_type_node_u \
+ = make_unsigned_type (GET_MODE_PRECISION (mode))); \
+ else if (poly_p) \
+ return (X##_aarch64_type_node_p \
+ ? X##_aarch64_type_node_p \
+ : X##_aarch64_type_node_p \
+ = make_unsigned_type (GET_MODE_PRECISION (mode))); \
+ else \
+ return (X##_aarch64_type_node_s \
+ ? X##_aarch64_type_node_s \
+ : X##_aarch64_type_node_s \
+ = make_signed_type (GET_MODE_PRECISION (mode))); \
+ break;
+
+ switch (mode)
+ {
+ INT_TYPES
+ case SFmode:
+ if (!float_aarch64_type_node)
+ {
+ float_aarch64_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (float_aarch64_type_node) = FLOAT_TYPE_SIZE;
+ layout_type (float_aarch64_type_node);
+ }
+ return float_aarch64_type_node;
+ break;
+ case DFmode:
+ if (!double_aarch64_type_node)
+ {
+ double_aarch64_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (double_aarch64_type_node) = DOUBLE_TYPE_SIZE;
+ layout_type (double_aarch64_type_node);
+ }
+ return double_aarch64_type_node;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+tree
+aarch64_build_vector_type (enum machine_mode mode,
+ bool unsigned_p,
+ bool poly_p)
+{
+ tree eltype;
+
+#define VECTOR_TYPES \
+ AARCH64_TYPE_BUILDER (V16QI) \
+ AARCH64_TYPE_BUILDER (V8HI) \
+ AARCH64_TYPE_BUILDER (V4SI) \
+ AARCH64_TYPE_BUILDER (V2DI) \
+ AARCH64_TYPE_BUILDER (V8QI) \
+ AARCH64_TYPE_BUILDER (V4HI) \
+ AARCH64_TYPE_BUILDER (V2SI) \
+ \
+ AARCH64_TYPE_BUILDER (V4SF) \
+ AARCH64_TYPE_BUILDER (V2DF) \
+ AARCH64_TYPE_BUILDER (V2SF) \
+/* Declare our "cache" of values. */
+#undef AARCH64_TYPE_BUILDER
+#define AARCH64_TYPE_BUILDER(X) \
+ static tree X##_aarch64_type_node_s = NULL; \
+ static tree X##_aarch64_type_node_u = NULL; \
+ static tree X##_aarch64_type_node_p = NULL;
+
+ VECTOR_TYPES
+
+ gcc_assert (VECTOR_MODE_P (mode));
+
+#undef AARCH64_TYPE_BUILDER
+#define AARCH64_TYPE_BUILDER(X) \
+ case X##mode: \
+ if (unsigned_p) \
+ return X##_aarch64_type_node_u \
+ ? X##_aarch64_type_node_u \
+ : X##_aarch64_type_node_u \
+ = build_vector_type_for_mode (aarch64_build_scalar_type \
+ (GET_MODE_INNER (mode), \
+ unsigned_p, poly_p), mode); \
+ else if (poly_p) \
+ return X##_aarch64_type_node_p \
+ ? X##_aarch64_type_node_p \
+ : X##_aarch64_type_node_p \
+ = build_vector_type_for_mode (aarch64_build_scalar_type \
+ (GET_MODE_INNER (mode), \
+ unsigned_p, poly_p), mode); \
+ else \
+ return X##_aarch64_type_node_s \
+ ? X##_aarch64_type_node_s \
+ : X##_aarch64_type_node_s \
+ = build_vector_type_for_mode (aarch64_build_scalar_type \
+ (GET_MODE_INNER (mode), \
+ unsigned_p, poly_p), mode); \
+ break;
+
+ switch (mode)
+ {
+ default:
+ eltype = aarch64_build_scalar_type (GET_MODE_INNER (mode),
+ unsigned_p, poly_p);
+ return build_vector_type_for_mode (eltype, mode);
+ break;
+ VECTOR_TYPES
+ }
+}
+
+tree
+aarch64_build_type (enum machine_mode mode, bool unsigned_p, bool poly_p)
+{
+ if (VECTOR_MODE_P (mode))
+ return aarch64_build_vector_type (mode, unsigned_p, poly_p);
+ else
+ return aarch64_build_scalar_type (mode, unsigned_p, poly_p);
+}
+
+tree
+aarch64_build_signed_type (enum machine_mode mode)
+{
+ return aarch64_build_type (mode, false, false);
+}
+
+tree
+aarch64_build_unsigned_type (enum machine_mode mode)
+{
+ return aarch64_build_type (mode, true, false);
+}
+
+tree
+aarch64_build_poly_type (enum machine_mode mode)
+{
+ return aarch64_build_type (mode, false, true);
+}
+
+static void
+aarch64_init_simd_builtins (void)
+{
+ unsigned int i, fcode = AARCH64_SIMD_BUILTIN_BASE + 1;
+
+ /* Signed scalar type nodes. */
+ tree aarch64_simd_intQI_type_node = aarch64_build_signed_type (QImode);
+ tree aarch64_simd_intHI_type_node = aarch64_build_signed_type (HImode);
+ tree aarch64_simd_intSI_type_node = aarch64_build_signed_type (SImode);
+ tree aarch64_simd_intDI_type_node = aarch64_build_signed_type (DImode);
+ tree aarch64_simd_intTI_type_node = aarch64_build_signed_type (TImode);
+ tree aarch64_simd_intEI_type_node = aarch64_build_signed_type (EImode);
+ tree aarch64_simd_intOI_type_node = aarch64_build_signed_type (OImode);
+ tree aarch64_simd_intCI_type_node = aarch64_build_signed_type (CImode);
+ tree aarch64_simd_intXI_type_node = aarch64_build_signed_type (XImode);
+
+ /* Unsigned scalar type nodes. */
+ tree aarch64_simd_intUQI_type_node = aarch64_build_unsigned_type (QImode);
+ tree aarch64_simd_intUHI_type_node = aarch64_build_unsigned_type (HImode);
+ tree aarch64_simd_intUSI_type_node = aarch64_build_unsigned_type (SImode);
+ tree aarch64_simd_intUDI_type_node = aarch64_build_unsigned_type (DImode);
+
+ /* Poly scalar type nodes. */
+ tree aarch64_simd_polyQI_type_node = aarch64_build_poly_type (QImode);
+ tree aarch64_simd_polyHI_type_node = aarch64_build_poly_type (HImode);
+ tree aarch64_simd_polyDI_type_node = aarch64_build_poly_type (DImode);
+ tree aarch64_simd_polyTI_type_node = aarch64_build_poly_type (TImode);
+
+ /* Float type nodes. */
+ tree aarch64_simd_float_type_node = aarch64_build_signed_type (SFmode);
+ tree aarch64_simd_double_type_node = aarch64_build_signed_type (DFmode);
+
+ /* Define typedefs which exactly correspond to the modes we are basing vector
+ types on. If you change these names you'll need to change
+ the table used by aarch64_mangle_type too. */
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intQI_type_node,
+ "__builtin_aarch64_simd_qi");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intHI_type_node,
+ "__builtin_aarch64_simd_hi");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intSI_type_node,
+ "__builtin_aarch64_simd_si");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_float_type_node,
+ "__builtin_aarch64_simd_sf");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intDI_type_node,
+ "__builtin_aarch64_simd_di");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_double_type_node,
+ "__builtin_aarch64_simd_df");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyQI_type_node,
+ "__builtin_aarch64_simd_poly8");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyHI_type_node,
+ "__builtin_aarch64_simd_poly16");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyDI_type_node,
+ "__builtin_aarch64_simd_poly64");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyTI_type_node,
+ "__builtin_aarch64_simd_poly128");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intTI_type_node,
+ "__builtin_aarch64_simd_ti");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intEI_type_node,
+ "__builtin_aarch64_simd_ei");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intOI_type_node,
+ "__builtin_aarch64_simd_oi");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intCI_type_node,
+ "__builtin_aarch64_simd_ci");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intXI_type_node,
+ "__builtin_aarch64_simd_xi");
+
+ /* Unsigned integer types for various mode sizes. */
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUQI_type_node,
+ "__builtin_aarch64_simd_uqi");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUHI_type_node,
+ "__builtin_aarch64_simd_uhi");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUSI_type_node,
+ "__builtin_aarch64_simd_usi");
+ (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUDI_type_node,
+ "__builtin_aarch64_simd_udi");
+
+ for (i = 0; i < ARRAY_SIZE (aarch64_simd_builtin_data); i++, fcode++)
+ {
+ bool print_type_signature_p = false;
+ char type_signature[SIMD_MAX_BUILTIN_ARGS] = { 0 };
+ aarch64_simd_builtin_datum *d = &aarch64_simd_builtin_data[i];
+ const char *const modenames[] =
+ {
+ "v8qi", "v4hi", "v2si", "v2sf", "di", "df",
+ "v16qi", "v8hi", "v4si", "v4sf", "v2di", "v2df",
+ "ti", "ei", "oi", "xi", "si", "sf", "hi", "qi"
+ };
+ const enum machine_mode modes[] =
+ {
+ V8QImode, V4HImode, V2SImode, V2SFmode, DImode, DFmode,
+ V16QImode, V8HImode, V4SImode, V4SFmode, V2DImode,
+ V2DFmode, TImode, EImode, OImode, XImode, SImode,
+ SFmode, HImode, QImode
+ };
+ char namebuf[60];
+ tree ftype = NULL;
+ tree fndecl = NULL;
+
+ gcc_assert (ARRAY_SIZE (modenames) == T_MAX);
+
+ d->fcode = fcode;
+
+ /* We must track two variables here. op_num is
+ the operand number as in the RTL pattern. This is
+ required to access the mode (e.g. V4SF mode) of the
+ argument, from which the base type can be derived.
+ arg_num is an index in to the qualifiers data, which
+ gives qualifiers to the type (e.g. const unsigned).
+ The reason these two variables may differ by one is the
+ void return type. While all return types take the 0th entry
+ in the qualifiers array, there is no operand for them in the
+ RTL pattern. */
+ int op_num = insn_data[d->code].n_operands - 1;
+ int arg_num = d->qualifiers[0] & qualifier_void
+ ? op_num + 1
+ : op_num;
+ tree return_type = void_type_node, args = void_list_node;
+ tree eltype;
+
+ /* Build a function type directly from the insn_data for this
+ builtin. The build_function_type () function takes care of
+ removing duplicates for us. */
+ for (; op_num >= 0; arg_num--, op_num--)
+ {
+ enum machine_mode op_mode = insn_data[d->code].operand[op_num].mode;
+ enum aarch64_type_qualifiers qualifiers = d->qualifiers[arg_num];
+
+ if (qualifiers & qualifier_unsigned)
+ {
+ type_signature[arg_num] = 'u';
+ print_type_signature_p = true;
+ }
+ else if (qualifiers & qualifier_poly)
+ {
+ type_signature[arg_num] = 'p';
+ print_type_signature_p = true;
+ }
+ else
+ type_signature[arg_num] = 's';
+
+ /* Skip an internal operand for vget_{low, high}. */
+ if (qualifiers & qualifier_internal)
+ continue;
+
+ /* Some builtins have different user-facing types
+ for certain arguments, encoded in d->mode. */
+ if (qualifiers & qualifier_map_mode)
+ op_mode = modes[d->mode];
+
+ /* For pointers, we want a pointer to the basic type
+ of the vector. */
+ if (qualifiers & qualifier_pointer && VECTOR_MODE_P (op_mode))
+ op_mode = GET_MODE_INNER (op_mode);
+
+ eltype = aarch64_build_type (op_mode,
+ qualifiers & qualifier_unsigned,
+ qualifiers & qualifier_poly);
+
+ /* Add qualifiers. */
+ if (qualifiers & qualifier_const)
+ eltype = build_qualified_type (eltype, TYPE_QUAL_CONST);
+
+ if (qualifiers & qualifier_pointer)
+ eltype = build_pointer_type (eltype);
+
+ /* If we have reached arg_num == 0, we are at a non-void
+ return type. Otherwise, we are still processing
+ arguments. */
+ if (arg_num == 0)
+ return_type = eltype;
+ else
+ args = tree_cons (NULL_TREE, eltype, args);
+ }
+
+ ftype = build_function_type (return_type, args);
+
+ gcc_assert (ftype != NULL);
+
+ if (print_type_signature_p)
+ snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s%s_%s",
+ d->name, modenames[d->mode], type_signature);
+ else
+ snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s%s",
+ d->name, modenames[d->mode]);
+
+ fndecl = add_builtin_function (namebuf, ftype, fcode, BUILT_IN_MD,
+ NULL, NULL_TREE);
+ aarch64_builtin_decls[fcode] = fndecl;
+ }
+}
+
+void
+aarch64_init_builtins (void)
+{
+ if (TARGET_SIMD)
+ aarch64_init_simd_builtins ();
+}
+
+tree
+aarch64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
+{
+ if (code >= AARCH64_BUILTIN_MAX)
+ return error_mark_node;
+
+ return aarch64_builtin_decls[code];
+}
+
+typedef enum
+{
+ SIMD_ARG_COPY_TO_REG,
+ SIMD_ARG_CONSTANT,
+ SIMD_ARG_STOP
+} builtin_simd_arg;
+
+static rtx
+aarch64_simd_expand_args (rtx target, int icode, int have_retval,
+ tree exp, ...)
+{
+ va_list ap;
+ rtx pat;
+ tree arg[SIMD_MAX_BUILTIN_ARGS];
+ rtx op[SIMD_MAX_BUILTIN_ARGS];
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode[SIMD_MAX_BUILTIN_ARGS];
+ int argc = 0;
+
+ if (have_retval
+ && (!target
+ || GET_MODE (target) != tmode
+ || !(*insn_data[icode].operand[0].predicate) (target, tmode)))
+ target = gen_reg_rtx (tmode);
+
+ va_start (ap, exp);
+
+ for (;;)
+ {
+ builtin_simd_arg thisarg = (builtin_simd_arg) va_arg (ap, int);
+
+ if (thisarg == SIMD_ARG_STOP)
+ break;
+ else
+ {
+ arg[argc] = CALL_EXPR_ARG (exp, argc);
+ op[argc] = expand_normal (arg[argc]);
+ mode[argc] = insn_data[icode].operand[argc + have_retval].mode;
+
+ switch (thisarg)
+ {
+ case SIMD_ARG_COPY_TO_REG:
+ if (POINTER_TYPE_P (TREE_TYPE (arg[argc])))
+ op[argc] = convert_memory_address (Pmode, op[argc]);
+ /*gcc_assert (GET_MODE (op[argc]) == mode[argc]); */
+ if (!(*insn_data[icode].operand[argc + have_retval].predicate)
+ (op[argc], mode[argc]))
+ op[argc] = copy_to_mode_reg (mode[argc], op[argc]);
+ break;
+
+ case SIMD_ARG_CONSTANT:
+ if (!(*insn_data[icode].operand[argc + have_retval].predicate)
+ (op[argc], mode[argc]))
+ error_at (EXPR_LOCATION (exp), "incompatible type for argument %d, "
+ "expected %<const int%>", argc + 1);
+ break;
+
+ case SIMD_ARG_STOP:
+ gcc_unreachable ();
+ }
+
+ argc++;
+ }
+ }
+
+ va_end (ap);
+
+ if (have_retval)
+ switch (argc)
+ {
+ case 1:
+ pat = GEN_FCN (icode) (target, op[0]);
+ break;
+
+ case 2:
+ pat = GEN_FCN (icode) (target, op[0], op[1]);
+ break;
+
+ case 3:
+ pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
+ break;
+
+ case 4:
+ pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
+ break;
+
+ case 5:
+ pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ else
+ switch (argc)
+ {
+ case 1:
+ pat = GEN_FCN (icode) (op[0]);
+ break;
+
+ case 2:
+ pat = GEN_FCN (icode) (op[0], op[1]);
+ break;
+
+ case 3:
+ pat = GEN_FCN (icode) (op[0], op[1], op[2]);
+ break;
+
+ case 4:
+ pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
+ break;
+
+ case 5:
+ pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (!pat)
+ return 0;
+
+ emit_insn (pat);
+
+ return target;
+}
+
+/* Expand an AArch64 AdvSIMD builtin(intrinsic). */
+rtx
+aarch64_simd_expand_builtin (int fcode, tree exp, rtx target)
+{
+ aarch64_simd_builtin_datum *d =
+ &aarch64_simd_builtin_data[fcode - (AARCH64_SIMD_BUILTIN_BASE + 1)];
+ enum insn_code icode = d->code;
+ builtin_simd_arg args[SIMD_MAX_BUILTIN_ARGS];
+ int num_args = insn_data[d->code].n_operands;
+ int is_void = 0;
+ int k;
+
+ is_void = !!(d->qualifiers[0] & qualifier_void);
+
+ num_args += is_void;
+
+ for (k = 1; k < num_args; k++)
+ {
+ /* We have four arrays of data, each indexed in a different fashion.
+ qualifiers - element 0 always describes the function return type.
+ operands - element 0 is either the operand for return value (if
+ the function has a non-void return type) or the operand for the
+ first argument.
+ expr_args - element 0 always holds the first argument.
+ args - element 0 is always used for the return type. */
+ int qualifiers_k = k;
+ int operands_k = k - is_void;
+ int expr_args_k = k - 1;
+
+ if (d->qualifiers[qualifiers_k] & qualifier_immediate)
+ args[k] = SIMD_ARG_CONSTANT;
+ else if (d->qualifiers[qualifiers_k] & qualifier_maybe_immediate)
+ {
+ rtx arg
+ = expand_normal (CALL_EXPR_ARG (exp,
+ (expr_args_k)));
+ /* Handle constants only if the predicate allows it. */
+ bool op_const_int_p =
+ (CONST_INT_P (arg)
+ && (*insn_data[icode].operand[operands_k].predicate)
+ (arg, insn_data[icode].operand[operands_k].mode));
+ args[k] = op_const_int_p ? SIMD_ARG_CONSTANT : SIMD_ARG_COPY_TO_REG;
+ }
+ else
+ args[k] = SIMD_ARG_COPY_TO_REG;
+
+ }
+ args[k] = SIMD_ARG_STOP;
+
+ /* The interface to aarch64_simd_expand_args expects a 0 if
+ the function is void, and a 1 if it is not. */
+ return aarch64_simd_expand_args
+ (target, icode, !is_void, exp,
+ args[1],
+ args[2],
+ args[3],
+ args[4],
+ SIMD_ARG_STOP);
+}
+
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient. */
+rtx
+aarch64_expand_builtin (tree exp,
+ rtx target,
+ rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
+{
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ int fcode = DECL_FUNCTION_CODE (fndecl);
+
+ if (fcode >= AARCH64_SIMD_BUILTIN_BASE)
+ return aarch64_simd_expand_builtin (fcode, exp, target);
+
+ return NULL_RTX;
+}
+
+tree
+aarch64_builtin_vectorized_function (tree fndecl, tree type_out, tree type_in)
+{
+ enum machine_mode in_mode, out_mode;
+ int in_n, out_n;
+
+ if (TREE_CODE (type_out) != VECTOR_TYPE
+ || TREE_CODE (type_in) != VECTOR_TYPE)
+ return NULL_TREE;
+
+ out_mode = TYPE_MODE (TREE_TYPE (type_out));
+ out_n = TYPE_VECTOR_SUBPARTS (type_out);
+ in_mode = TYPE_MODE (TREE_TYPE (type_in));
+ in_n = TYPE_VECTOR_SUBPARTS (type_in);
+
+#undef AARCH64_CHECK_BUILTIN_MODE
+#define AARCH64_CHECK_BUILTIN_MODE(C, N) 1
+#define AARCH64_FIND_FRINT_VARIANT(N) \
+ (AARCH64_CHECK_BUILTIN_MODE (2, D) \
+ ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_UNOP_##N##v2df] \
+ : (AARCH64_CHECK_BUILTIN_MODE (4, S) \
+ ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_UNOP_##N##v4sf] \
+ : (AARCH64_CHECK_BUILTIN_MODE (2, S) \
+ ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_UNOP_##N##v2sf] \
+ : NULL_TREE)))
+ if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ {
+ enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
+ switch (fn)
+ {
+#undef AARCH64_CHECK_BUILTIN_MODE
+#define AARCH64_CHECK_BUILTIN_MODE(C, N) \
+ (out_mode == N##Fmode && out_n == C \
+ && in_mode == N##Fmode && in_n == C)
+ case BUILT_IN_FLOOR:
+ case BUILT_IN_FLOORF:
+ return AARCH64_FIND_FRINT_VARIANT (floor);
+ case BUILT_IN_CEIL:
+ case BUILT_IN_CEILF:
+ return AARCH64_FIND_FRINT_VARIANT (ceil);
+ case BUILT_IN_TRUNC:
+ case BUILT_IN_TRUNCF:
+ return AARCH64_FIND_FRINT_VARIANT (btrunc);
+ case BUILT_IN_ROUND:
+ case BUILT_IN_ROUNDF:
+ return AARCH64_FIND_FRINT_VARIANT (round);
+ case BUILT_IN_NEARBYINT:
+ case BUILT_IN_NEARBYINTF:
+ return AARCH64_FIND_FRINT_VARIANT (nearbyint);
+ case BUILT_IN_SQRT:
+ case BUILT_IN_SQRTF:
+ return AARCH64_FIND_FRINT_VARIANT (sqrt);
+#undef AARCH64_CHECK_BUILTIN_MODE
+#define AARCH64_CHECK_BUILTIN_MODE(C, N) \
+ (out_mode == SImode && out_n == C \
+ && in_mode == N##Imode && in_n == C)
+ case BUILT_IN_CLZ:
+ {
+ if (AARCH64_CHECK_BUILTIN_MODE (4, S))
+ return aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_UNOP_clzv4si];
+ return NULL_TREE;
+ }
+#undef AARCH64_CHECK_BUILTIN_MODE
+#define AARCH64_CHECK_BUILTIN_MODE(C, N) \
+ (out_mode == N##Imode && out_n == C \
+ && in_mode == N##Fmode && in_n == C)
+ case BUILT_IN_LFLOOR:
+ case BUILT_IN_LFLOORF:
+ case BUILT_IN_LLFLOOR:
+ case BUILT_IN_IFLOORF:
+ {
+ enum aarch64_builtins builtin;
+ if (AARCH64_CHECK_BUILTIN_MODE (2, D))
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lfloorv2dfv2di;
+ else if (AARCH64_CHECK_BUILTIN_MODE (4, S))
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lfloorv4sfv4si;
+ else if (AARCH64_CHECK_BUILTIN_MODE (2, S))
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lfloorv2sfv2si;
+ else
+ return NULL_TREE;
+
+ return aarch64_builtin_decls[builtin];
+ }
+ case BUILT_IN_LCEIL:
+ case BUILT_IN_LCEILF:
+ case BUILT_IN_LLCEIL:
+ case BUILT_IN_ICEILF:
+ {
+ enum aarch64_builtins builtin;
+ if (AARCH64_CHECK_BUILTIN_MODE (2, D))
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lceilv2dfv2di;
+ else if (AARCH64_CHECK_BUILTIN_MODE (4, S))
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lceilv4sfv4si;
+ else if (AARCH64_CHECK_BUILTIN_MODE (2, S))
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lceilv2sfv2si;
+ else
+ return NULL_TREE;
+
+ return aarch64_builtin_decls[builtin];
+ }
+ case BUILT_IN_LROUND:
+ case BUILT_IN_IROUNDF:
+ {
+ enum aarch64_builtins builtin;
+ if (AARCH64_CHECK_BUILTIN_MODE (2, D))
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lroundv2dfv2di;
+ else if (AARCH64_CHECK_BUILTIN_MODE (4, S))
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lroundv4sfv4si;
+ else if (AARCH64_CHECK_BUILTIN_MODE (2, S))
+ builtin = AARCH64_SIMD_BUILTIN_UNOP_lroundv2sfv2si;
+ else
+ return NULL_TREE;
+
+ return aarch64_builtin_decls[builtin];
+ }
+
+ default:
+ return NULL_TREE;
+ }
+ }
+
+ return NULL_TREE;
+}
+
+#undef VAR1
+#define VAR1(T, N, MAP, A) \
+ case AARCH64_SIMD_BUILTIN_##T##_##N##A:
+
+tree
+aarch64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args,
+ bool ignore ATTRIBUTE_UNUSED)
+{
+ int fcode = DECL_FUNCTION_CODE (fndecl);
+ tree type = TREE_TYPE (TREE_TYPE (fndecl));
+
+ switch (fcode)
+ {
+ BUILTIN_VALLDI (UNOP, abs, 2)
+ return fold_build1 (ABS_EXPR, type, args[0]);
+ break;
+ BUILTIN_VALLDI (BINOP, cmge, 0)
+ return fold_build2 (GE_EXPR, type, args[0], args[1]);
+ break;
+ BUILTIN_VALLDI (BINOP, cmgt, 0)
+ return fold_build2 (GT_EXPR, type, args[0], args[1]);
+ break;
+ BUILTIN_VALLDI (BINOP, cmeq, 0)
+ return fold_build2 (EQ_EXPR, type, args[0], args[1]);
+ break;
+ BUILTIN_VSDQ_I_DI (BINOP, cmtst, 0)
+ {
+ tree and_node = fold_build2 (BIT_AND_EXPR, type, args[0], args[1]);
+ tree vec_zero_node = build_zero_cst (type);
+ return fold_build2 (NE_EXPR, type, and_node, vec_zero_node);
+ break;
+ }
+ VAR1 (UNOP, floatv2si, 2, v2sf)
+ VAR1 (UNOP, floatv4si, 2, v4sf)
+ VAR1 (UNOP, floatv2di, 2, v2df)
+ return fold_build1 (FLOAT_EXPR, type, args[0]);
+ default:
+ break;
+ }
+
+ return NULL_TREE;
+}
+
+bool
+aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi)
+{
+ bool changed = false;
+ gimple stmt = gsi_stmt (*gsi);
+ tree call = gimple_call_fn (stmt);
+ tree fndecl;
+ gimple new_stmt = NULL;
+ if (call)
+ {
+ fndecl = gimple_call_fndecl (stmt);
+ if (fndecl)
+ {
+ int fcode = DECL_FUNCTION_CODE (fndecl);
+ int nargs = gimple_call_num_args (stmt);
+ tree *args = (nargs > 0
+ ? gimple_call_arg_ptr (stmt, 0)
+ : &error_mark_node);
+
+ switch (fcode)
+ {
+ BUILTIN_VALL (UNOP, reduc_splus_, 10)
+ new_stmt = gimple_build_assign_with_ops (
+ REDUC_PLUS_EXPR,
+ gimple_call_lhs (stmt),
+ args[0],
+ NULL_TREE);
+ break;
+ BUILTIN_VDQIF (UNOP, reduc_smax_, 10)
+ new_stmt = gimple_build_assign_with_ops (
+ REDUC_MAX_EXPR,
+ gimple_call_lhs (stmt),
+ args[0],
+ NULL_TREE);
+ break;
+ BUILTIN_VDQIF (UNOP, reduc_smin_, 10)
+ new_stmt = gimple_build_assign_with_ops (
+ REDUC_MIN_EXPR,
+ gimple_call_lhs (stmt),
+ args[0],
+ NULL_TREE);
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ if (new_stmt)
+ {
+ gsi_replace (gsi, new_stmt, true);
+ changed = true;
+ }
+
+ return changed;
+}
+
+#undef AARCH64_CHECK_BUILTIN_MODE
+#undef AARCH64_FIND_FRINT_VARIANT
+#undef BUILTIN_DX
+#undef BUILTIN_SDQ_I
+#undef BUILTIN_SD_HSI
+#undef BUILTIN_V2F
+#undef BUILTIN_VALL
+#undef BUILTIN_VB
+#undef BUILTIN_VD
+#undef BUILTIN_VDC
+#undef BUILTIN_VDIC
+#undef BUILTIN_VDN
+#undef BUILTIN_VDQ
+#undef BUILTIN_VDQF
+#undef BUILTIN_VDQH
+#undef BUILTIN_VDQHS
+#undef BUILTIN_VDQIF
+#undef BUILTIN_VDQM
+#undef BUILTIN_VDQV
+#undef BUILTIN_VDQ_BHSI
+#undef BUILTIN_VDQ_I
+#undef BUILTIN_VDW
+#undef BUILTIN_VD_BHSI
+#undef BUILTIN_VD_HSI
+#undef BUILTIN_VD_RE
+#undef BUILTIN_VQ
+#undef BUILTIN_VQN
+#undef BUILTIN_VQW
+#undef BUILTIN_VQ_HSI
+#undef BUILTIN_VQ_S
+#undef BUILTIN_VSDQ_HSI
+#undef BUILTIN_VSDQ_I
+#undef BUILTIN_VSDQ_I_BHSI
+#undef BUILTIN_VSDQ_I_DI
+#undef BUILTIN_VSD_HSI
+#undef BUILTIN_VSQN_HSDI
+#undef BUILTIN_VSTRUCT
+#undef CF0
+#undef CF1
+#undef CF2
+#undef CF3
+#undef CF4
+#undef CF10
+#undef VAR1
+#undef VAR2
+#undef VAR3
+#undef VAR4
+#undef VAR5
+#undef VAR6
+#undef VAR7
+#undef VAR8
+#undef VAR9
+#undef VAR10
+#undef VAR11
+
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-cores.def b/gcc-4.9/gcc/config/aarch64/aarch64-cores.def
new file mode 100644
index 000000000..9319249e6
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-cores.def
@@ -0,0 +1,42 @@
+/* Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* This is a list of cores that implement AArch64.
+
+ Before using #include to read this file, define a macro:
+
+ AARCH64_CORE(CORE_NAME, CORE_IDENT, SCHEDULER_IDENT, ARCH, FLAGS, COSTS)
+
+ The CORE_NAME is the name of the core, represented as a string constant.
+ The CORE_IDENT is the name of the core, represented as an identifier.
+ The SCHEDULER_IDENT is the name of the core for which scheduling decisions
+ will be made, represented as an identifier.
+ ARCH is the architecture revision implemented by the chip.
+ FLAGS are the bitwise-or of the traits that apply to that core.
+ This need not include flags implied by the architecture.
+ COSTS is the name of the rtx_costs routine to use. */
+
+/* V8 Architecture Processors. */
+
+AARCH64_CORE("cortex-a53", cortexa53, cortexa53, 8, AARCH64_FL_FPSIMD | AARCH64_FL_CRC | AARCH64_FL_CRYPTO, cortexa53)
+AARCH64_CORE("cortex-a57", cortexa15, cortexa15, 8, AARCH64_FL_FPSIMD | AARCH64_FL_CRC | AARCH64_FL_CRYPTO, cortexa57)
+
+/* V8 big.LITTLE implementations. */
+
+AARCH64_CORE("cortex-a57.cortex-a53", cortexa57cortexa53, cortexa53, 8, AARCH64_FL_FPSIMD | AARCH64_FL_CRC | AARCH64_FL_CRYPTO, cortexa57)
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-elf-raw.h b/gcc-4.9/gcc/config/aarch64/aarch64-elf-raw.h
new file mode 100644
index 000000000..adec7e7ba
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-elf-raw.h
@@ -0,0 +1,33 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Support for bare-metal builds. */
+#ifndef GCC_AARCH64_ELF_RAW_H
+#define GCC_AARCH64_ELF_RAW_H
+
+#define STARTFILE_SPEC " crti%O%s crtbegin%O%s crt0%O%s"
+#define ENDFILE_SPEC " crtend%O%s crtn%O%s"
+
+#ifndef LINK_SPEC
+#define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} -X \
+ -maarch64elf%{mabi=ilp32*:32}%{mbig-endian:b}"
+#endif
+
+#endif /* GCC_AARCH64_ELF_RAW_H */
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-elf.h b/gcc-4.9/gcc/config/aarch64/aarch64-elf.h
new file mode 100644
index 000000000..15ab630de
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-elf.h
@@ -0,0 +1,161 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_AARCH64_ELF_H
+#define GCC_AARCH64_ELF_H
+
+
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+ aarch64_asm_output_labelref (FILE, NAME)
+
+#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \
+ do \
+ { \
+ assemble_name (FILE, NAME1); \
+ fputs (" = ", FILE); \
+ assemble_name (FILE, NAME2); \
+ fputc ('\n', FILE); \
+ } while (0)
+
+#define TEXT_SECTION_ASM_OP "\t.text"
+#define DATA_SECTION_ASM_OP "\t.data"
+#define BSS_SECTION_ASM_OP "\t.bss"
+
+#define CTORS_SECTION_ASM_OP "\t.section\t.init_array,\"aw\",%init_array"
+#define DTORS_SECTION_ASM_OP "\t.section\t.fini_array,\"aw\",%fini_array"
+
+#undef INIT_SECTION_ASM_OP
+#undef FINI_SECTION_ASM_OP
+#define INIT_ARRAY_SECTION_ASM_OP CTORS_SECTION_ASM_OP
+#define FINI_ARRAY_SECTION_ASM_OP DTORS_SECTION_ASM_OP
+
+/* Since we use .init_array/.fini_array we don't need the markers at
+ the start and end of the ctors/dtors arrays. */
+#define CTOR_LIST_BEGIN asm (CTORS_SECTION_ASM_OP)
+#define CTOR_LIST_END /* empty */
+#define DTOR_LIST_BEGIN asm (DTORS_SECTION_ASM_OP)
+#define DTOR_LIST_END /* empty */
+
+#undef TARGET_ASM_CONSTRUCTOR
+#define TARGET_ASM_CONSTRUCTOR aarch64_elf_asm_constructor
+
+#undef TARGET_ASM_DESTRUCTOR
+#define TARGET_ASM_DESTRUCTOR aarch64_elf_asm_destructor
+
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+/* Support for -falign-* switches. Use .p2align to ensure that code
+ sections are padded with NOP instructions, rather than zeros. */
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE, LOG, MAX_SKIP) \
+ do \
+ { \
+ if ((LOG) != 0) \
+ { \
+ if ((MAX_SKIP) == 0) \
+ fprintf ((FILE), "\t.p2align %d\n", (int) (LOG)); \
+ else \
+ fprintf ((FILE), "\t.p2align %d,,%d\n", \
+ (int) (LOG), (int) (MAX_SKIP)); \
+ } \
+ } while (0)
+
+#endif /* HAVE_GAS_MAX_SKIP_P2ALIGN */
+
+#define JUMP_TABLES_IN_TEXT_SECTION 0
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
+ do { \
+ switch (GET_MODE (BODY)) \
+ { \
+ case QImode: \
+ asm_fprintf (STREAM, "\t.byte\t(%LL%d - %LLrtx%d) / 4\n", \
+ VALUE, REL); \
+ break; \
+ case HImode: \
+ asm_fprintf (STREAM, "\t.2byte\t(%LL%d - %LLrtx%d) / 4\n", \
+ VALUE, REL); \
+ break; \
+ case SImode: \
+ case DImode: /* See comment in aarch64_output_casesi. */ \
+ asm_fprintf (STREAM, "\t.word\t(%LL%d - %LLrtx%d) / 4\n", \
+ VALUE, REL); \
+ break; \
+ default: \
+ gcc_unreachable (); \
+ } \
+ } while (0)
+
+#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
+ fprintf(STREAM, "\t.align\t%d\n", (int)POWER)
+
+#define ASM_COMMENT_START "//"
+
+#define LOCAL_LABEL_PREFIX "."
+#define USER_LABEL_PREFIX ""
+
+#define GLOBAL_ASM_OP "\t.global\t"
+
+#ifdef TARGET_BIG_ENDIAN_DEFAULT
+#define ENDIAN_SPEC "-mbig-endian"
+#else
+#define ENDIAN_SPEC "-mlittle-endian"
+#endif
+
+#if TARGET_DATA_MODEL == 1
+#define ABI_SPEC "-mabi=lp64"
+#define MULTILIB_DEFAULTS { "mabi=lp64" }
+#elif TARGET_DATA_MODEL == 2
+#define ABI_SPEC "-mabi=ilp32"
+#define MULTILIB_DEFAULTS { "mabi=ilp32" }
+#else
+#error "Unknown or undefined TARGET_DATA_MODEL!"
+#endif
+
+/* Force the default endianness and ABI flags onto the command line
+ in order to make the other specs easier to write. */
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS \
+ " %{!mbig-endian:%{!mlittle-endian:" ENDIAN_SPEC "}}" \
+ " %{!mabi=*:" ABI_SPEC "}"
+
+#ifdef HAVE_AS_MABI_OPTION
+#define ASM_MABI_SPEC "%{mabi=*:-mabi=%*}"
+#else
+#define ASM_MABI_SPEC "%{mabi=lp64:}"
+#endif
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "\
+%{mbig-endian:-EB} \
+%{mlittle-endian:-EL} \
+%{march=*:-march=%*} \
+%(asm_cpu_spec)" \
+ASM_MABI_SPEC
+#endif
+
+#undef TYPE_OPERAND_FMT
+#define TYPE_OPERAND_FMT "%%%s"
+
+#undef TARGET_ASM_NAMED_SECTION
+#define TARGET_ASM_NAMED_SECTION aarch64_elf_asm_named_section
+
+/* Stabs debug not required. */
+#undef DBX_DEBUGGING_INFO
+
+#endif /* GCC_AARCH64_ELF_H */
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-linux.h b/gcc-4.9/gcc/config/aarch64/aarch64-linux.h
new file mode 100644
index 000000000..a8f077156
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-linux.h
@@ -0,0 +1,47 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_AARCH64_LINUX_H
+#define GCC_AARCH64_LINUX_H
+
+#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-aarch64%{mbig-endian:_be}.so.1"
+
+#define CPP_SPEC "%{pthread:-D_REENTRANT}"
+
+#define LINUX_TARGET_LINK_SPEC "%{h*} \
+ %{static:-Bstatic} \
+ %{shared:-shared} \
+ %{symbolic:-Bsymbolic} \
+ %{rdynamic:-export-dynamic} \
+ -dynamic-linker " GNU_USER_DYNAMIC_LINKER " \
+ -X \
+ %{mbig-endian:-EB} %{mlittle-endian:-EL} \
+ -maarch64linux%{mbig-endian:b}"
+
+#define LINK_SPEC LINUX_TARGET_LINK_SPEC
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ GNU_USER_TARGET_OS_CPP_BUILTINS(); \
+ } \
+ while (0)
+
+#endif /* GCC_AARCH64_LINUX_H */
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-modes.def b/gcc-4.9/gcc/config/aarch64/aarch64-modes.def
new file mode 100644
index 000000000..1d2cc7679
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-modes.def
@@ -0,0 +1,55 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+CC_MODE (CCFP);
+CC_MODE (CCFPE);
+CC_MODE (CC_SWP);
+CC_MODE (CC_ZESWP); /* zero-extend LHS (but swap to make it RHS). */
+CC_MODE (CC_SESWP); /* sign-extend LHS (but swap to make it RHS). */
+CC_MODE (CC_NZ); /* Only N and Z bits of condition flags are valid. */
+CC_MODE (CC_Z); /* Only Z bit of condition flags is valid. */
+
+/* Vector modes. */
+VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI. */
+VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI. */
+VECTOR_MODES (FLOAT, 8); /* V2SF. */
+VECTOR_MODES (FLOAT, 16); /* V4SF V2DF. */
+
+/* Oct Int: 256-bit integer mode needed for 32-byte vector arguments. */
+INT_MODE (OI, 32);
+
+/* Opaque integer modes for 3, 6 or 8 Neon double registers (2 is
+ TImode). */
+INT_MODE (EI, 24);
+INT_MODE (CI, 48);
+INT_MODE (XI, 64);
+
+/* Vector modes for register lists. */
+VECTOR_MODES (INT, 32); /* V32QI V16HI V8SI V4DI. */
+VECTOR_MODES (FLOAT, 32); /* V8SF V4DF. */
+
+VECTOR_MODES (INT, 48); /* V32QI V16HI V8SI V4DI. */
+VECTOR_MODES (FLOAT, 48); /* V8SF V4DF. */
+
+VECTOR_MODES (INT, 64); /* V32QI V16HI V8SI V4DI. */
+VECTOR_MODES (FLOAT, 64); /* V8SF V4DF. */
+
+/* Quad float: 128-bit floating mode for long doubles. */
+FLOAT_MODE (TF, 16, ieee_quad_format);
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-option-extensions.def b/gcc-4.9/gcc/config/aarch64/aarch64-option-extensions.def
new file mode 100644
index 000000000..1aa65d32a
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-option-extensions.def
@@ -0,0 +1,38 @@
+/* Copyright (C) 2012-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* This is a list of ISA extentsions in AArch64.
+
+ Before using #include to read this file, define a macro:
+
+ AARCH64_OPT_EXTENSION(EXT_NAME, FLAGS_ON, FLAGS_OFF)
+
+ EXT_NAME is the name of the extension, represented as a string constant.
+ FLAGS_ON are the bitwise-or of the features that the extension adds.
+ FLAGS_OFF are the bitwise-or of the features that the extension removes. */
+
+/* V8 Architecture Extensions.
+ This list currently contains example extensions for CPUs that implement
+ AArch64, and therefore serves as a template for adding more CPUs in the
+ future. */
+
+AARCH64_OPT_EXTENSION("fp", AARCH64_FL_FP, AARCH64_FL_FPSIMD | AARCH64_FL_CRYPTO)
+AARCH64_OPT_EXTENSION("simd", AARCH64_FL_FPSIMD, AARCH64_FL_SIMD | AARCH64_FL_CRYPTO)
+AARCH64_OPT_EXTENSION("crypto", AARCH64_FL_CRYPTO | AARCH64_FL_FPSIMD, AARCH64_FL_CRYPTO)
+AARCH64_OPT_EXTENSION("crc", AARCH64_FL_CRC, AARCH64_FL_CRC)
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-opts.h b/gcc-4.9/gcc/config/aarch64/aarch64-opts.h
new file mode 100644
index 000000000..370931536
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-opts.h
@@ -0,0 +1,64 @@
+/* Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Definitions for option handling for AArch64. */
+
+#ifndef GCC_AARCH64_OPTS_H
+#define GCC_AARCH64_OPTS_H
+
+/* The various cores that implement AArch64. */
+enum aarch64_processor
+{
+#define AARCH64_CORE(NAME, INTERNAL_IDENT, IDENT, ARCH, FLAGS, COSTS) \
+ INTERNAL_IDENT,
+#include "aarch64-cores.def"
+#undef AARCH64_CORE
+ /* Used to indicate that no processor has been specified. */
+ generic,
+ /* Used to mark the end of the processor table. */
+ aarch64_none
+};
+
+/* TLS types. */
+enum aarch64_tls_type {
+ TLS_TRADITIONAL,
+ TLS_DESCRIPTORS
+};
+
+/* The code model defines the address generation strategy.
+ Most have a PIC and non-PIC variant. */
+enum aarch64_code_model {
+ /* Static code and data fit within a 1MB region.
+ Not fully implemented, mostly treated as SMALL. */
+ AARCH64_CMODEL_TINY,
+ /* Static code, data and GOT/PLT fit within a 1MB region.
+ Not fully implemented, mostly treated as SMALL_PIC. */
+ AARCH64_CMODEL_TINY_PIC,
+ /* Static code and data fit within a 4GB region.
+ The default non-PIC code model. */
+ AARCH64_CMODEL_SMALL,
+ /* Static code, data and GOT/PLT fit within a 4GB region.
+ The default PIC code model. */
+ AARCH64_CMODEL_SMALL_PIC,
+ /* No assumptions about addresses of code and data.
+ The PIC variant is not yet implemented. */
+ AARCH64_CMODEL_LARGE
+};
+
+#endif
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-protos.h b/gcc-4.9/gcc/config/aarch64/aarch64-protos.h
new file mode 100644
index 000000000..5542f023b
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-protos.h
@@ -0,0 +1,292 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_AARCH64_PROTOS_H
+#define GCC_AARCH64_PROTOS_H
+
+/*
+ SYMBOL_CONTEXT_ADR
+ The symbol is used in a load-address operation.
+ SYMBOL_CONTEXT_MEM
+ The symbol is used as the address in a MEM.
+ */
+enum aarch64_symbol_context
+{
+ SYMBOL_CONTEXT_MEM,
+ SYMBOL_CONTEXT_ADR
+};
+
+/* SYMBOL_SMALL_ABSOLUTE: Generate symbol accesses through
+ high and lo relocs that calculate the base address using a PC
+ relative reloc.
+ So to get the address of foo, we generate
+ adrp x0, foo
+ add x0, x0, :lo12:foo
+
+ To load or store something to foo, we could use the corresponding
+ load store variants that generate an
+ ldr x0, [x0,:lo12:foo]
+ or
+ str x1, [x0, :lo12:foo]
+
+ This corresponds to the small code model of the compiler.
+
+ SYMBOL_SMALL_GOT: Similar to the one above but this
+ gives us the GOT entry of the symbol being referred to :
+ Thus calculating the GOT entry for foo is done using the
+ following sequence of instructions. The ADRP instruction
+ gets us to the page containing the GOT entry of the symbol
+ and the got_lo12 gets us the actual offset in it.
+
+ adrp x0, :got:foo
+ ldr x0, [x0, :gotoff_lo12:foo]
+
+ This corresponds to the small PIC model of the compiler.
+
+ SYMBOL_SMALL_TLSGD
+ SYMBOL_SMALL_TLSDESC
+ SYMBOL_SMALL_GOTTPREL
+ SYMBOL_SMALL_TPREL
+ Each of of these represents a thread-local symbol, and corresponds to the
+ thread local storage relocation operator for the symbol being referred to.
+
+ SYMBOL_TINY_ABSOLUTE
+
+ Generate symbol accesses as a PC relative address using a single
+ instruction. To compute the address of symbol foo, we generate:
+
+ ADR x0, foo
+
+ SYMBOL_TINY_GOT
+
+ Generate symbol accesses via the GOT using a single PC relative
+ instruction. To compute the address of symbol foo, we generate:
+
+ ldr t0, :got:foo
+
+ The value of foo can subsequently read using:
+
+ ldrb t0, [t0]
+
+ SYMBOL_FORCE_TO_MEM : Global variables are addressed using
+ constant pool. All variable addresses are spilled into constant
+ pools. The constant pools themselves are addressed using PC
+ relative accesses. This only works for the large code model.
+ */
+enum aarch64_symbol_type
+{
+ SYMBOL_SMALL_ABSOLUTE,
+ SYMBOL_SMALL_GOT,
+ SYMBOL_SMALL_TLSGD,
+ SYMBOL_SMALL_TLSDESC,
+ SYMBOL_SMALL_GOTTPREL,
+ SYMBOL_SMALL_TPREL,
+ SYMBOL_TINY_ABSOLUTE,
+ SYMBOL_TINY_GOT,
+ SYMBOL_FORCE_TO_MEM
+};
+
+/* A set of tuning parameters contains references to size and time
+ cost models and vectors for address cost calculations, register
+ move costs and memory move costs. */
+
+/* Additional cost for addresses. */
+struct cpu_addrcost_table
+{
+ const int pre_modify;
+ const int post_modify;
+ const int register_offset;
+ const int register_extend;
+ const int imm_offset;
+};
+
+/* Additional costs for register copies. Cost is for one register. */
+struct cpu_regmove_cost
+{
+ const int GP2GP;
+ const int GP2FP;
+ const int FP2GP;
+ const int FP2FP;
+};
+
+/* Cost for vector insn classes. */
+struct cpu_vector_cost
+{
+ const int scalar_stmt_cost; /* Cost of any scalar operation,
+ excluding load and store. */
+ const int scalar_load_cost; /* Cost of scalar load. */
+ const int scalar_store_cost; /* Cost of scalar store. */
+ const int vec_stmt_cost; /* Cost of any vector operation,
+ excluding load, store,
+ vector-to-scalar and
+ scalar-to-vector operation. */
+ const int vec_to_scalar_cost; /* Cost of vec-to-scalar operation. */
+ const int scalar_to_vec_cost; /* Cost of scalar-to-vector
+ operation. */
+ const int vec_align_load_cost; /* Cost of aligned vector load. */
+ const int vec_unalign_load_cost; /* Cost of unaligned vector load. */
+ const int vec_unalign_store_cost; /* Cost of unaligned vector store. */
+ const int vec_store_cost; /* Cost of vector store. */
+ const int cond_taken_branch_cost; /* Cost of taken branch. */
+ const int cond_not_taken_branch_cost; /* Cost of not taken branch. */
+};
+
+struct tune_params
+{
+ const struct cpu_cost_table *const insn_extra_cost;
+ const struct cpu_addrcost_table *const addr_cost;
+ const struct cpu_regmove_cost *const regmove_cost;
+ const struct cpu_vector_cost *const vec_costs;
+ const int memmov_cost;
+ const int issue_rate;
+};
+
+HOST_WIDE_INT aarch64_initial_elimination_offset (unsigned, unsigned);
+bool aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode);
+bool aarch64_cannot_change_mode_class (enum machine_mode,
+ enum machine_mode,
+ enum reg_class);
+enum aarch64_symbol_type
+aarch64_classify_symbolic_expression (rtx, enum aarch64_symbol_context);
+bool aarch64_constant_address_p (rtx);
+bool aarch64_float_const_zero_rtx_p (rtx);
+bool aarch64_function_arg_regno_p (unsigned);
+bool aarch64_gen_movmemqi (rtx *);
+bool aarch64_gimple_fold_builtin (gimple_stmt_iterator *);
+bool aarch64_is_extend_from_extract (enum machine_mode, rtx, rtx);
+bool aarch64_is_long_call_p (rtx);
+bool aarch64_label_mentioned_p (rtx);
+bool aarch64_legitimate_pic_operand_p (rtx);
+bool aarch64_move_imm (HOST_WIDE_INT, enum machine_mode);
+bool aarch64_mov_operand_p (rtx, enum aarch64_symbol_context,
+ enum machine_mode);
+char *aarch64_output_scalar_simd_mov_immediate (rtx, enum machine_mode);
+char *aarch64_output_simd_mov_immediate (rtx, enum machine_mode, unsigned);
+bool aarch64_pad_arg_upward (enum machine_mode, const_tree);
+bool aarch64_pad_reg_upward (enum machine_mode, const_tree, bool);
+bool aarch64_regno_ok_for_base_p (int, bool);
+bool aarch64_regno_ok_for_index_p (int, bool);
+bool aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode);
+bool aarch64_simd_imm_zero_p (rtx, enum machine_mode);
+bool aarch64_simd_scalar_immediate_valid_for_move (rtx, enum machine_mode);
+bool aarch64_simd_shift_imm_p (rtx, enum machine_mode, bool);
+bool aarch64_simd_valid_immediate (rtx, enum machine_mode, bool,
+ struct simd_immediate_info *);
+bool aarch64_symbolic_address_p (rtx);
+bool aarch64_uimm12_shift (HOST_WIDE_INT);
+const char *aarch64_output_casesi (rtx *);
+const char *aarch64_rewrite_selected_cpu (const char *name);
+
+enum aarch64_symbol_type aarch64_classify_symbol (rtx,
+ enum aarch64_symbol_context);
+enum aarch64_symbol_type aarch64_classify_tls_symbol (rtx);
+enum reg_class aarch64_regno_regclass (unsigned);
+int aarch64_asm_preferred_eh_data_format (int, int);
+int aarch64_hard_regno_mode_ok (unsigned, enum machine_mode);
+int aarch64_hard_regno_nregs (unsigned, enum machine_mode);
+int aarch64_simd_attr_length_move (rtx);
+int aarch64_uxt_size (int, HOST_WIDE_INT);
+rtx aarch64_final_eh_return_addr (void);
+rtx aarch64_legitimize_reload_address (rtx *, enum machine_mode, int, int, int);
+const char *aarch64_output_move_struct (rtx *operands);
+rtx aarch64_return_addr (int, rtx);
+rtx aarch64_simd_gen_const_vector_dup (enum machine_mode, int);
+bool aarch64_simd_mem_operand_p (rtx);
+rtx aarch64_simd_vect_par_cnst_half (enum machine_mode, bool);
+rtx aarch64_tls_get_addr (void);
+tree aarch64_fold_builtin (tree, int, tree *, bool);
+unsigned aarch64_dbx_register_number (unsigned);
+unsigned aarch64_trampoline_size (void);
+void aarch64_asm_output_labelref (FILE *, const char *);
+void aarch64_elf_asm_named_section (const char *, unsigned, tree);
+void aarch64_expand_epilogue (bool);
+void aarch64_expand_mov_immediate (rtx, rtx);
+void aarch64_expand_prologue (void);
+void aarch64_expand_vector_init (rtx, rtx);
+void aarch64_function_profiler (FILE *, int);
+void aarch64_init_cumulative_args (CUMULATIVE_ARGS *, const_tree, rtx,
+ const_tree, unsigned);
+void aarch64_init_expanders (void);
+void aarch64_print_operand (FILE *, rtx, char);
+void aarch64_print_operand_address (FILE *, rtx);
+
+/* Initialize builtins for SIMD intrinsics. */
+void init_aarch64_simd_builtins (void);
+
+void aarch64_simd_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+void aarch64_simd_disambiguate_copy (rtx *, rtx *, rtx *, unsigned int);
+
+/* Emit code to place a AdvSIMD pair result in memory locations (with equal
+ registers). */
+void aarch64_simd_emit_pair_result_insn (enum machine_mode,
+ rtx (*intfn) (rtx, rtx, rtx), rtx,
+ rtx);
+
+/* Expand builtins for SIMD intrinsics. */
+rtx aarch64_simd_expand_builtin (int, tree, rtx);
+
+void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+
+/* Emit code for reinterprets. */
+void aarch64_simd_reinterpret (rtx, rtx);
+
+void aarch64_split_128bit_move (rtx, rtx);
+
+bool aarch64_split_128bit_move_p (rtx, rtx);
+
+void aarch64_split_simd_combine (rtx, rtx, rtx);
+
+void aarch64_split_simd_move (rtx, rtx);
+
+/* Check for a legitimate floating point constant for FMOV. */
+bool aarch64_float_const_representable_p (rtx);
+
+#if defined (RTX_CODE)
+
+bool aarch64_legitimate_address_p (enum machine_mode, rtx, RTX_CODE, bool);
+enum machine_mode aarch64_select_cc_mode (RTX_CODE, rtx, rtx);
+rtx aarch64_gen_compare_reg (RTX_CODE, rtx, rtx);
+rtx aarch64_load_tp (rtx);
+
+void aarch64_expand_compare_and_swap (rtx op[]);
+void aarch64_split_compare_and_swap (rtx op[]);
+void aarch64_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx);
+
+#endif /* RTX_CODE */
+
+void aarch64_init_builtins (void);
+rtx aarch64_expand_builtin (tree exp,
+ rtx target,
+ rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED);
+tree aarch64_builtin_decl (unsigned, bool ATTRIBUTE_UNUSED);
+
+tree
+aarch64_builtin_vectorized_function (tree fndecl,
+ tree type_out,
+ tree type_in);
+
+extern void aarch64_split_combinev16qi (rtx operands[3]);
+extern void aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel);
+extern bool
+aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel);
+#endif /* GCC_AARCH64_PROTOS_H */
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc-4.9/gcc/config/aarch64/aarch64-simd-builtins.def
new file mode 100644
index 000000000..c9b7570e5
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -0,0 +1,395 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2012-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* In the list below, the BUILTIN_<ITERATOR> macros expand to create
+ builtins for each of the modes described by <ITERATOR>. When adding
+ new builtins to this list, a helpful idiom to follow is to add
+ a line for each pattern in the md file. Thus, ADDP, which has one
+ pattern defined for the VD_BHSI iterator, and one for DImode, has two
+ entries below.
+
+ Parameter 1 is the 'type' of the intrinsic. This is used to
+ describe the type modifiers (for example; unsigned) applied to
+ each of the parameters to the intrinsic function.
+
+ Parameter 2 is the name of the intrinsic. This is appended
+ to `__builtin_aarch64_<name><mode>` to give the intrinsic name
+ as exported to the front-ends.
+
+ Parameter 3 describes how to map from the name to the CODE_FOR_
+ macro holding the RTL pattern for the intrinsic. This mapping is:
+ 0 - CODE_FOR_aarch64_<name><mode>
+ 1-9 - CODE_FOR_<name><mode><1-9>
+ 10 - CODE_FOR_<name><mode>. */
+
+ BUILTIN_VD_RE (CREATE, create, 0)
+ BUILTIN_VDC (COMBINE, combine, 0)
+ BUILTIN_VB (BINOP, pmul, 0)
+ BUILTIN_VDQF (UNOP, sqrt, 2)
+ BUILTIN_VD_BHSI (BINOP, addp, 0)
+ VAR1 (UNOP, addp, 0, di)
+ BUILTIN_VDQ_BHSI (UNOP, clz, 2)
+
+ BUILTIN_VALL (GETLANE, get_lane, 0)
+ VAR1 (GETLANE, get_lane, 0, di)
+ BUILTIN_VALL (GETLANE, be_checked_get_lane, 0)
+
+ BUILTIN_VD_RE (REINTERP, reinterpretdi, 0)
+ BUILTIN_VDC (REINTERP, reinterpretv8qi, 0)
+ BUILTIN_VDC (REINTERP, reinterpretv4hi, 0)
+ BUILTIN_VDC (REINTERP, reinterpretv2si, 0)
+ BUILTIN_VDC (REINTERP, reinterpretv2sf, 0)
+ BUILTIN_VQ (REINTERP, reinterpretv16qi, 0)
+ BUILTIN_VQ (REINTERP, reinterpretv8hi, 0)
+ BUILTIN_VQ (REINTERP, reinterpretv4si, 0)
+ BUILTIN_VQ (REINTERP, reinterpretv4sf, 0)
+ BUILTIN_VQ (REINTERP, reinterpretv2di, 0)
+ BUILTIN_VQ (REINTERP, reinterpretv2df, 0)
+
+ BUILTIN_VDQ_I (BINOP, dup_lane, 0)
+ /* Implemented by aarch64_<sur>q<r>shl<mode>. */
+ BUILTIN_VSDQ_I (BINOP, sqshl, 0)
+ BUILTIN_VSDQ_I (BINOP, uqshl, 0)
+ BUILTIN_VSDQ_I (BINOP, sqrshl, 0)
+ BUILTIN_VSDQ_I (BINOP, uqrshl, 0)
+ /* Implemented by aarch64_<su_optab><optab><mode>. */
+ BUILTIN_VSDQ_I (BINOP, sqadd, 0)
+ BUILTIN_VSDQ_I (BINOP, uqadd, 0)
+ BUILTIN_VSDQ_I (BINOP, sqsub, 0)
+ BUILTIN_VSDQ_I (BINOP, uqsub, 0)
+ /* Implemented by aarch64_<sur>qadd<mode>. */
+ BUILTIN_VSDQ_I (BINOP, suqadd, 0)
+ BUILTIN_VSDQ_I (BINOP, usqadd, 0)
+
+ /* Implemented by aarch64_get_dreg<VSTRUCT:mode><VDC:mode>. */
+ BUILTIN_VDC (GETLANE, get_dregoi, 0)
+ BUILTIN_VDC (GETLANE, get_dregci, 0)
+ BUILTIN_VDC (GETLANE, get_dregxi, 0)
+ /* Implemented by aarch64_get_qreg<VSTRUCT:mode><VQ:mode>. */
+ BUILTIN_VQ (GETLANE, get_qregoi, 0)
+ BUILTIN_VQ (GETLANE, get_qregci, 0)
+ BUILTIN_VQ (GETLANE, get_qregxi, 0)
+ /* Implemented by aarch64_set_qreg<VSTRUCT:mode><VQ:mode>. */
+ BUILTIN_VQ (SETLANE, set_qregoi, 0)
+ BUILTIN_VQ (SETLANE, set_qregci, 0)
+ BUILTIN_VQ (SETLANE, set_qregxi, 0)
+ /* Implemented by aarch64_ld<VSTRUCT:nregs><VDC:mode>. */
+ BUILTIN_VDC (LOADSTRUCT, ld2, 0)
+ BUILTIN_VDC (LOADSTRUCT, ld3, 0)
+ BUILTIN_VDC (LOADSTRUCT, ld4, 0)
+ /* Implemented by aarch64_ld<VSTRUCT:nregs><VQ:mode>. */
+ BUILTIN_VQ (LOADSTRUCT, ld2, 0)
+ BUILTIN_VQ (LOADSTRUCT, ld3, 0)
+ BUILTIN_VQ (LOADSTRUCT, ld4, 0)
+ /* Implemented by aarch64_st<VSTRUCT:nregs><VDC:mode>. */
+ BUILTIN_VDC (STORESTRUCT, st2, 0)
+ BUILTIN_VDC (STORESTRUCT, st3, 0)
+ BUILTIN_VDC (STORESTRUCT, st4, 0)
+ /* Implemented by aarch64_st<VSTRUCT:nregs><VQ:mode>. */
+ BUILTIN_VQ (STORESTRUCT, st2, 0)
+ BUILTIN_VQ (STORESTRUCT, st3, 0)
+ BUILTIN_VQ (STORESTRUCT, st4, 0)
+
+ BUILTIN_VQW (BINOP, saddl2, 0)
+ BUILTIN_VQW (BINOP, uaddl2, 0)
+ BUILTIN_VQW (BINOP, ssubl2, 0)
+ BUILTIN_VQW (BINOP, usubl2, 0)
+ BUILTIN_VQW (BINOP, saddw2, 0)
+ BUILTIN_VQW (BINOP, uaddw2, 0)
+ BUILTIN_VQW (BINOP, ssubw2, 0)
+ BUILTIN_VQW (BINOP, usubw2, 0)
+ /* Implemented by aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>. */
+ BUILTIN_VDW (BINOP, saddl, 0)
+ BUILTIN_VDW (BINOP, uaddl, 0)
+ BUILTIN_VDW (BINOP, ssubl, 0)
+ BUILTIN_VDW (BINOP, usubl, 0)
+ /* Implemented by aarch64_<ANY_EXTEND:su><ADDSUB:optab>w<mode>. */
+ BUILTIN_VDW (BINOP, saddw, 0)
+ BUILTIN_VDW (BINOP, uaddw, 0)
+ BUILTIN_VDW (BINOP, ssubw, 0)
+ BUILTIN_VDW (BINOP, usubw, 0)
+ /* Implemented by aarch64_<sur>h<addsub><mode>. */
+ BUILTIN_VQ_S (BINOP, shadd, 0)
+ BUILTIN_VQ_S (BINOP, uhadd, 0)
+ BUILTIN_VQ_S (BINOP, srhadd, 0)
+ BUILTIN_VQ_S (BINOP, urhadd, 0)
+ /* Implemented by aarch64_<sur><addsub>hn<mode>. */
+ BUILTIN_VQN (BINOP, addhn, 0)
+ BUILTIN_VQN (BINOP, raddhn, 0)
+ /* Implemented by aarch64_<sur><addsub>hn2<mode>. */
+ BUILTIN_VQN (TERNOP, addhn2, 0)
+ BUILTIN_VQN (TERNOP, raddhn2, 0)
+
+ BUILTIN_VSQN_HSDI (UNOP, sqmovun, 0)
+ /* Implemented by aarch64_<sur>qmovn<mode>. */
+ BUILTIN_VSQN_HSDI (UNOP, sqmovn, 0)
+ BUILTIN_VSQN_HSDI (UNOP, uqmovn, 0)
+ /* Implemented by aarch64_s<optab><mode>. */
+ BUILTIN_VSDQ_I_BHSI (UNOP, sqabs, 0)
+ BUILTIN_VSDQ_I_BHSI (UNOP, sqneg, 0)
+
+ BUILTIN_VSD_HSI (QUADOP, sqdmlal_lane, 0)
+ BUILTIN_VSD_HSI (QUADOP, sqdmlsl_lane, 0)
+ BUILTIN_VSD_HSI (QUADOP, sqdmlal_laneq, 0)
+ BUILTIN_VSD_HSI (QUADOP, sqdmlsl_laneq, 0)
+ BUILTIN_VQ_HSI (TERNOP, sqdmlal2, 0)
+ BUILTIN_VQ_HSI (TERNOP, sqdmlsl2, 0)
+ BUILTIN_VQ_HSI (QUADOP, sqdmlal2_lane, 0)
+ BUILTIN_VQ_HSI (QUADOP, sqdmlsl2_lane, 0)
+ BUILTIN_VQ_HSI (QUADOP, sqdmlal2_laneq, 0)
+ BUILTIN_VQ_HSI (QUADOP, sqdmlsl2_laneq, 0)
+ BUILTIN_VQ_HSI (TERNOP, sqdmlal2_n, 0)
+ BUILTIN_VQ_HSI (TERNOP, sqdmlsl2_n, 0)
+ /* Implemented by aarch64_sqdml<SBINQOPS:as>l<mode>. */
+ BUILTIN_VSD_HSI (TERNOP, sqdmlal, 0)
+ BUILTIN_VSD_HSI (TERNOP, sqdmlsl, 0)
+ /* Implemented by aarch64_sqdml<SBINQOPS:as>l_n<mode>. */
+ BUILTIN_VD_HSI (TERNOP, sqdmlal_n, 0)
+ BUILTIN_VD_HSI (TERNOP, sqdmlsl_n, 0)
+
+ BUILTIN_VSD_HSI (BINOP, sqdmull, 0)
+ BUILTIN_VSD_HSI (TERNOP, sqdmull_lane, 0)
+ BUILTIN_VD_HSI (TERNOP, sqdmull_laneq, 0)
+ BUILTIN_VD_HSI (BINOP, sqdmull_n, 0)
+ BUILTIN_VQ_HSI (BINOP, sqdmull2, 0)
+ BUILTIN_VQ_HSI (TERNOP, sqdmull2_lane, 0)
+ BUILTIN_VQ_HSI (TERNOP, sqdmull2_laneq, 0)
+ BUILTIN_VQ_HSI (BINOP, sqdmull2_n, 0)
+ /* Implemented by aarch64_sq<r>dmulh<mode>. */
+ BUILTIN_VSDQ_HSI (BINOP, sqdmulh, 0)
+ BUILTIN_VSDQ_HSI (BINOP, sqrdmulh, 0)
+ /* Implemented by aarch64_sq<r>dmulh_lane<q><mode>. */
+ BUILTIN_VDQHS (TERNOP, sqdmulh_lane, 0)
+ BUILTIN_VDQHS (TERNOP, sqdmulh_laneq, 0)
+ BUILTIN_VDQHS (TERNOP, sqrdmulh_lane, 0)
+ BUILTIN_VDQHS (TERNOP, sqrdmulh_laneq, 0)
+ BUILTIN_SD_HSI (TERNOP, sqdmulh_lane, 0)
+ BUILTIN_SD_HSI (TERNOP, sqrdmulh_lane, 0)
+
+ BUILTIN_VSDQ_I_DI (BINOP, ashl, 3)
+ /* Implemented by aarch64_<sur>shl<mode>. */
+ BUILTIN_VSDQ_I_DI (BINOP, sshl, 0)
+ BUILTIN_VSDQ_I_DI (BINOP, ushl, 0)
+ BUILTIN_VSDQ_I_DI (BINOP, srshl, 0)
+ BUILTIN_VSDQ_I_DI (BINOP, urshl, 0)
+
+ BUILTIN_VDQ_I (SHIFTIMM, ashr, 3)
+ VAR1 (SHIFTIMM, ashr_simd, 0, di)
+ BUILTIN_VDQ_I (SHIFTIMM, lshr, 3)
+ VAR1 (USHIFTIMM, lshr_simd, 0, di)
+ /* Implemented by aarch64_<sur>shr_n<mode>. */
+ BUILTIN_VSDQ_I_DI (SHIFTIMM, srshr_n, 0)
+ BUILTIN_VSDQ_I_DI (SHIFTIMM, urshr_n, 0)
+ /* Implemented by aarch64_<sur>sra_n<mode>. */
+ BUILTIN_VSDQ_I_DI (SHIFTACC, ssra_n, 0)
+ BUILTIN_VSDQ_I_DI (SHIFTACC, usra_n, 0)
+ BUILTIN_VSDQ_I_DI (SHIFTACC, srsra_n, 0)
+ BUILTIN_VSDQ_I_DI (SHIFTACC, ursra_n, 0)
+ /* Implemented by aarch64_<sur>shll_n<mode>. */
+ BUILTIN_VDW (SHIFTIMM, sshll_n, 0)
+ BUILTIN_VDW (SHIFTIMM, ushll_n, 0)
+ /* Implemented by aarch64_<sur>shll2_n<mode>. */
+ BUILTIN_VQW (SHIFTIMM, sshll2_n, 0)
+ BUILTIN_VQW (SHIFTIMM, ushll2_n, 0)
+ /* Implemented by aarch64_<sur>q<r>shr<u>n_n<mode>. */
+ BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrun_n, 0)
+ BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrun_n, 0)
+ BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrn_n, 0)
+ BUILTIN_VSQN_HSDI (SHIFTIMM, uqshrn_n, 0)
+ BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrn_n, 0)
+ BUILTIN_VSQN_HSDI (SHIFTIMM, uqrshrn_n, 0)
+ /* Implemented by aarch64_<sur>s<lr>i_n<mode>. */
+ BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssri_n, 0)
+ BUILTIN_VSDQ_I_DI (SHIFTINSERT, usri_n, 0)
+ BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssli_n, 0)
+ BUILTIN_VSDQ_I_DI (SHIFTINSERT, usli_n, 0)
+ /* Implemented by aarch64_<sur>qshl<u>_n<mode>. */
+ BUILTIN_VSDQ_I (SHIFTIMM, sqshlu_n, 0)
+ BUILTIN_VSDQ_I (SHIFTIMM, sqshl_n, 0)
+ BUILTIN_VSDQ_I (SHIFTIMM, uqshl_n, 0)
+
+ /* Implemented by aarch64_cm<cmp><mode>. */
+ BUILTIN_VALLDI (BINOP, cmeq, 0)
+ BUILTIN_VALLDI (BINOP, cmge, 0)
+ BUILTIN_VALLDI (BINOP, cmgt, 0)
+ BUILTIN_VALLDI (BINOP, cmle, 0)
+ BUILTIN_VALLDI (BINOP, cmlt, 0)
+ /* Implemented by aarch64_cm<cmp><mode>. */
+ BUILTIN_VSDQ_I_DI (BINOP, cmgeu, 0)
+ BUILTIN_VSDQ_I_DI (BINOP, cmgtu, 0)
+ BUILTIN_VSDQ_I_DI (BINOP, cmtst, 0)
+
+ /* Implemented by reduc_<sur>plus_<mode>. */
+ BUILTIN_VALL (UNOP, reduc_splus_, 10)
+ BUILTIN_VDQ (UNOP, reduc_uplus_, 10)
+
+ /* Implemented by reduc_<maxmin_uns>_<mode>. */
+ BUILTIN_VDQIF (UNOP, reduc_smax_, 10)
+ BUILTIN_VDQIF (UNOP, reduc_smin_, 10)
+ BUILTIN_VDQ_BHSI (UNOP, reduc_umax_, 10)
+ BUILTIN_VDQ_BHSI (UNOP, reduc_umin_, 10)
+ BUILTIN_VDQF (UNOP, reduc_smax_nan_, 10)
+ BUILTIN_VDQF (UNOP, reduc_smin_nan_, 10)
+
+ /* Implemented by <maxmin><mode>3.
+ smax variants map to fmaxnm,
+ smax_nan variants map to fmax. */
+ BUILTIN_VDQIF (BINOP, smax, 3)
+ BUILTIN_VDQIF (BINOP, smin, 3)
+ BUILTIN_VDQ_BHSI (BINOP, umax, 3)
+ BUILTIN_VDQ_BHSI (BINOP, umin, 3)
+ BUILTIN_VDQF (BINOP, smax_nan, 3)
+ BUILTIN_VDQF (BINOP, smin_nan, 3)
+
+ /* Implemented by <frint_pattern><mode>2. */
+ BUILTIN_VDQF (UNOP, btrunc, 2)
+ BUILTIN_VDQF (UNOP, ceil, 2)
+ BUILTIN_VDQF (UNOP, floor, 2)
+ BUILTIN_VDQF (UNOP, nearbyint, 2)
+ BUILTIN_VDQF (UNOP, rint, 2)
+ BUILTIN_VDQF (UNOP, round, 2)
+ BUILTIN_VDQF (UNOP, frintn, 2)
+
+ /* Implemented by l<fcvt_pattern><su_optab><VQDF:mode><vcvt_target>2. */
+ VAR1 (UNOP, lbtruncv2sf, 2, v2si)
+ VAR1 (UNOP, lbtruncv4sf, 2, v4si)
+ VAR1 (UNOP, lbtruncv2df, 2, v2di)
+
+ VAR1 (UNOP, lbtruncuv2sf, 2, v2si)
+ VAR1 (UNOP, lbtruncuv4sf, 2, v4si)
+ VAR1 (UNOP, lbtruncuv2df, 2, v2di)
+
+ VAR1 (UNOP, lroundv2sf, 2, v2si)
+ VAR1 (UNOP, lroundv4sf, 2, v4si)
+ VAR1 (UNOP, lroundv2df, 2, v2di)
+ /* Implemented by l<fcvt_pattern><su_optab><GPF:mode><GPI:mode>2. */
+ VAR1 (UNOP, lroundsf, 2, si)
+ VAR1 (UNOP, lrounddf, 2, di)
+
+ VAR1 (UNOP, lrounduv2sf, 2, v2si)
+ VAR1 (UNOP, lrounduv4sf, 2, v4si)
+ VAR1 (UNOP, lrounduv2df, 2, v2di)
+ VAR1 (UNOP, lroundusf, 2, si)
+ VAR1 (UNOP, lroundudf, 2, di)
+
+ VAR1 (UNOP, lceilv2sf, 2, v2si)
+ VAR1 (UNOP, lceilv4sf, 2, v4si)
+ VAR1 (UNOP, lceilv2df, 2, v2di)
+
+ VAR1 (UNOP, lceiluv2sf, 2, v2si)
+ VAR1 (UNOP, lceiluv4sf, 2, v4si)
+ VAR1 (UNOP, lceiluv2df, 2, v2di)
+ VAR1 (UNOP, lceilusf, 2, si)
+ VAR1 (UNOP, lceiludf, 2, di)
+
+ VAR1 (UNOP, lfloorv2sf, 2, v2si)
+ VAR1 (UNOP, lfloorv4sf, 2, v4si)
+ VAR1 (UNOP, lfloorv2df, 2, v2di)
+
+ VAR1 (UNOP, lflooruv2sf, 2, v2si)
+ VAR1 (UNOP, lflooruv4sf, 2, v4si)
+ VAR1 (UNOP, lflooruv2df, 2, v2di)
+ VAR1 (UNOP, lfloorusf, 2, si)
+ VAR1 (UNOP, lfloorudf, 2, di)
+
+ VAR1 (UNOP, lfrintnv2sf, 2, v2si)
+ VAR1 (UNOP, lfrintnv4sf, 2, v4si)
+ VAR1 (UNOP, lfrintnv2df, 2, v2di)
+ VAR1 (UNOP, lfrintnsf, 2, si)
+ VAR1 (UNOP, lfrintndf, 2, di)
+
+ VAR1 (UNOP, lfrintnuv2sf, 2, v2si)
+ VAR1 (UNOP, lfrintnuv4sf, 2, v4si)
+ VAR1 (UNOP, lfrintnuv2df, 2, v2di)
+ VAR1 (UNOP, lfrintnusf, 2, si)
+ VAR1 (UNOP, lfrintnudf, 2, di)
+
+ /* Implemented by <optab><fcvt_target><VDQF:mode>2. */
+ VAR1 (UNOP, floatv2si, 2, v2sf)
+ VAR1 (UNOP, floatv4si, 2, v4sf)
+ VAR1 (UNOP, floatv2di, 2, v2df)
+
+ VAR1 (UNOP, floatunsv2si, 2, v2sf)
+ VAR1 (UNOP, floatunsv4si, 2, v4sf)
+ VAR1 (UNOP, floatunsv2di, 2, v2df)
+
+ /* Implemented by
+ aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>. */
+ BUILTIN_VALL (BINOP, zip1, 0)
+ BUILTIN_VALL (BINOP, zip2, 0)
+ BUILTIN_VALL (BINOP, uzp1, 0)
+ BUILTIN_VALL (BINOP, uzp2, 0)
+ BUILTIN_VALL (BINOP, trn1, 0)
+ BUILTIN_VALL (BINOP, trn2, 0)
+
+ /* Implemented by
+ aarch64_frecp<FRECP:frecp_suffix><mode>. */
+ BUILTIN_GPF (UNOP, frecpe, 0)
+ BUILTIN_GPF (BINOP, frecps, 0)
+ BUILTIN_GPF (UNOP, frecpx, 0)
+
+ BUILTIN_VDQF (UNOP, frecpe, 0)
+ BUILTIN_VDQF (BINOP, frecps, 0)
+
+ BUILTIN_VALLDI (UNOP, abs, 2)
+
+ VAR1 (UNOP, vec_unpacks_hi_, 10, v4sf)
+ VAR1 (BINOP, float_truncate_hi_, 0, v4sf)
+
+ VAR1 (UNOP, float_extend_lo_, 0, v2df)
+ VAR1 (UNOP, float_truncate_lo_, 0, v2sf)
+
+ /* Implemented by aarch64_ld1<VALL:mode>. */
+ BUILTIN_VALL (LOAD1, ld1, 0)
+
+ /* Implemented by aarch64_st1<VALL:mode>. */
+ BUILTIN_VALL (STORE1, st1, 0)
+
+ /* Implemented by fma<mode>4. */
+ BUILTIN_VDQF (TERNOP, fma, 4)
+
+ /* Implemented by aarch64_simd_bsl<mode>. */
+ BUILTIN_VDQQH (BSL_P, simd_bsl, 0)
+ BUILTIN_VSDQ_I_DI (BSL_U, simd_bsl, 0)
+ BUILTIN_VALLDIF (BSL_S, simd_bsl, 0)
+
+ /* Implemented by aarch64_crypto_aes<op><mode>. */
+ VAR1 (BINOPU, crypto_aese, 0, v16qi)
+ VAR1 (BINOPU, crypto_aesd, 0, v16qi)
+ VAR1 (UNOPU, crypto_aesmc, 0, v16qi)
+ VAR1 (UNOPU, crypto_aesimc, 0, v16qi)
+
+ /* Implemented by aarch64_crypto_sha1<op><mode>. */
+ VAR1 (UNOPU, crypto_sha1h, 0, si)
+ VAR1 (BINOPU, crypto_sha1su1, 0, v4si)
+ VAR1 (TERNOPU, crypto_sha1c, 0, v4si)
+ VAR1 (TERNOPU, crypto_sha1m, 0, v4si)
+ VAR1 (TERNOPU, crypto_sha1p, 0, v4si)
+ VAR1 (TERNOPU, crypto_sha1su0, 0, v4si)
+
+ /* Implemented by aarch64_crypto_sha256<op><mode>. */
+ VAR1 (TERNOPU, crypto_sha256h, 0, v4si)
+ VAR1 (TERNOPU, crypto_sha256h2, 0, v4si)
+ VAR1 (BINOPU, crypto_sha256su0, 0, v4si)
+ VAR1 (TERNOPU, crypto_sha256su1, 0, v4si)
+
+ /* Implemented by aarch64_crypto_pmull<mode>. */
+ VAR1 (BINOPP, crypto_pmull, 0, di)
+ VAR1 (BINOPP, crypto_pmull, 0, v2di)
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-simd.md b/gcc-4.9/gcc/config/aarch64/aarch64-simd.md
new file mode 100644
index 000000000..6048d605c
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-simd.md
@@ -0,0 +1,4363 @@
+;; Machine description for AArch64 AdvSIMD architecture.
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_expand "mov<mode>"
+ [(set (match_operand:VALL 0 "aarch64_simd_nonimmediate_operand" "")
+ (match_operand:VALL 1 "aarch64_simd_general_operand" ""))]
+ "TARGET_SIMD"
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+ "
+)
+
+(define_expand "movmisalign<mode>"
+ [(set (match_operand:VALL 0 "aarch64_simd_nonimmediate_operand" "")
+ (match_operand:VALL 1 "aarch64_simd_general_operand" ""))]
+ "TARGET_SIMD"
+{
+ /* This pattern is not permitted to fail during expansion: if both arguments
+ are non-registers (e.g. memory := constant, which can be created by the
+ auto-vectorizer), force operand 1 into a register. */
+ if (!register_operand (operands[0], <MODE>mode)
+ && !register_operand (operands[1], <MODE>mode))
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+})
+
+(define_insn "aarch64_simd_dup<mode>"
+ [(set (match_operand:VDQ 0 "register_operand" "=w, w")
+ (vec_duplicate:VDQ (match_operand:<VEL> 1 "register_operand" "r, w")))]
+ "TARGET_SIMD"
+ "@
+ dup\\t%0.<Vtype>, %<vw>1
+ dup\\t%0.<Vtype>, %1.<Vetype>[0]"
+ [(set_attr "type" "neon_from_gp<q>, neon_dup<q>")]
+)
+
+(define_insn "aarch64_simd_dup<mode>"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (vec_duplicate:VDQF (match_operand:<VEL> 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "dup\\t%0.<Vtype>, %1.<Vetype>[0]"
+ [(set_attr "type" "neon_dup<q>")]
+)
+
+(define_insn "aarch64_dup_lane<mode>"
+ [(set (match_operand:VALL 0 "register_operand" "=w")
+ (vec_duplicate:VALL
+ (vec_select:<VEL>
+ (match_operand:VALL 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")])
+ )))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2])));
+ return "dup\\t%0.<Vtype>, %1.<Vetype>[%2]";
+ }
+ [(set_attr "type" "neon_dup<q>")]
+)
+
+(define_insn "aarch64_dup_lane_<vswap_width_name><mode>"
+ [(set (match_operand:VALL 0 "register_operand" "=w")
+ (vec_duplicate:VALL
+ (vec_select:<VEL>
+ (match_operand:<VSWAP_WIDTH> 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")])
+ )))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<VSWAP_WIDTH>mode,
+ INTVAL (operands[2])));
+ return "dup\\t%0.<Vtype>, %1.<Vetype>[%2]";
+ }
+ [(set_attr "type" "neon_dup<q>")]
+)
+
+(define_insn "*aarch64_simd_mov<mode>"
+ [(set (match_operand:VD 0 "aarch64_simd_nonimmediate_operand"
+ "=w, m, w, ?r, ?w, ?r, w")
+ (match_operand:VD 1 "aarch64_simd_general_operand"
+ "m, w, w, w, r, r, Dn"))]
+ "TARGET_SIMD
+ && (register_operand (operands[0], <MODE>mode)
+ || register_operand (operands[1], <MODE>mode))"
+{
+ switch (which_alternative)
+ {
+ case 0: return "ldr\\t%d0, %1";
+ case 1: return "str\\t%d1, %0";
+ case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>";
+ case 3: return "umov\t%0, %1.d[0]";
+ case 4: return "ins\t%0.d[0], %1";
+ case 5: return "mov\t%0, %1";
+ case 6:
+ return aarch64_output_simd_mov_immediate (operands[1],
+ <MODE>mode, 64);
+ default: gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "neon_load1_1reg<q>, neon_store1_1reg<q>,\
+ neon_logic<q>, neon_to_gp<q>, neon_from_gp<q>,\
+ mov_reg, neon_move<q>")]
+)
+
+(define_insn "*aarch64_simd_mov<mode>"
+ [(set (match_operand:VQ 0 "aarch64_simd_nonimmediate_operand"
+ "=w, m, w, ?r, ?w, ?r, w")
+ (match_operand:VQ 1 "aarch64_simd_general_operand"
+ "m, w, w, w, r, r, Dn"))]
+ "TARGET_SIMD
+ && (register_operand (operands[0], <MODE>mode)
+ || register_operand (operands[1], <MODE>mode))"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return "ldr\\t%q0, %1";
+ case 1:
+ return "str\\t%q1, %0";
+ case 2:
+ return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>";
+ case 3:
+ case 4:
+ case 5:
+ return "#";
+ case 6:
+ return aarch64_output_simd_mov_immediate (operands[1], <MODE>mode, 128);
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "neon_load1_1reg<q>, neon_store1_1reg<q>,\
+ neon_logic<q>, multiple, multiple, multiple,\
+ neon_move<q>")
+ (set_attr "length" "4,4,4,8,8,8,4")]
+)
+
+(define_split
+ [(set (match_operand:VQ 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" ""))]
+ "TARGET_SIMD && reload_completed
+ && GP_REGNUM_P (REGNO (operands[0]))
+ && GP_REGNUM_P (REGNO (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))]
+{
+ int rdest = REGNO (operands[0]);
+ int rsrc = REGNO (operands[1]);
+ rtx dest[2], src[2];
+
+ dest[0] = gen_rtx_REG (DImode, rdest);
+ src[0] = gen_rtx_REG (DImode, rsrc);
+ dest[1] = gen_rtx_REG (DImode, rdest + 1);
+ src[1] = gen_rtx_REG (DImode, rsrc + 1);
+
+ aarch64_simd_disambiguate_copy (operands, dest, src, 2);
+})
+
+(define_split
+ [(set (match_operand:VQ 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" ""))]
+ "TARGET_SIMD && reload_completed
+ && ((FP_REGNUM_P (REGNO (operands[0])) && GP_REGNUM_P (REGNO (operands[1])))
+ || (GP_REGNUM_P (REGNO (operands[0])) && FP_REGNUM_P (REGNO (operands[1]))))"
+ [(const_int 0)]
+{
+ aarch64_split_simd_move (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_split_simd_mov<mode>"
+ [(set (match_operand:VQ 0)
+ (match_operand:VQ 1))]
+ "TARGET_SIMD"
+ {
+ rtx dst = operands[0];
+ rtx src = operands[1];
+
+ if (GP_REGNUM_P (REGNO (src)))
+ {
+ rtx src_low_part = gen_lowpart (<VHALF>mode, src);
+ rtx src_high_part = gen_highpart (<VHALF>mode, src);
+
+ emit_insn
+ (gen_move_lo_quad_<mode> (dst, src_low_part));
+ emit_insn
+ (gen_move_hi_quad_<mode> (dst, src_high_part));
+ }
+
+ else
+ {
+ rtx dst_low_part = gen_lowpart (<VHALF>mode, dst);
+ rtx dst_high_part = gen_highpart (<VHALF>mode, dst);
+ rtx lo = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
+ rtx hi = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+
+ emit_insn
+ (gen_aarch64_simd_mov_from_<mode>low (dst_low_part, src, lo));
+ emit_insn
+ (gen_aarch64_simd_mov_from_<mode>high (dst_high_part, src, hi));
+ }
+ DONE;
+ }
+)
+
+(define_insn "aarch64_simd_mov_from_<mode>low"
+ [(set (match_operand:<VHALF> 0 "register_operand" "=r")
+ (vec_select:<VHALF>
+ (match_operand:VQ 1 "register_operand" "w")
+ (match_operand:VQ 2 "vect_par_cnst_lo_half" "")))]
+ "TARGET_SIMD && reload_completed"
+ "umov\t%0, %1.d[0]"
+ [(set_attr "type" "neon_to_gp<q>")
+ (set_attr "length" "4")
+ ])
+
+(define_insn "aarch64_simd_mov_from_<mode>high"
+ [(set (match_operand:<VHALF> 0 "register_operand" "=r")
+ (vec_select:<VHALF>
+ (match_operand:VQ 1 "register_operand" "w")
+ (match_operand:VQ 2 "vect_par_cnst_hi_half" "")))]
+ "TARGET_SIMD && reload_completed"
+ "umov\t%0, %1.d[1]"
+ [(set_attr "type" "neon_to_gp<q>")
+ (set_attr "length" "4")
+ ])
+
+(define_insn "orn<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (ior:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w"))
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "orn\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
+ [(set_attr "type" "neon_logic<q>")]
+)
+
+(define_insn "bic<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (and:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w"))
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "bic\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
+ [(set_attr "type" "neon_logic<q>")]
+)
+
+(define_insn "add<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (plus:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "add\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_add<q>")]
+)
+
+(define_insn "sub<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (minus:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "sub\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_sub<q>")]
+)
+
+(define_insn "mul<mode>3"
+ [(set (match_operand:VDQM 0 "register_operand" "=w")
+ (mult:VDQM (match_operand:VDQM 1 "register_operand" "w")
+ (match_operand:VDQM 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "mul\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_mul_<Vetype><q>")]
+)
+
+(define_insn "*aarch64_mul3_elt<mode>"
+ [(set (match_operand:VMUL 0 "register_operand" "=w")
+ (mult:VMUL
+ (vec_duplicate:VMUL
+ (vec_select:<VEL>
+ (match_operand:VMUL 1 "register_operand" "<h_con>")
+ (parallel [(match_operand:SI 2 "immediate_operand")])))
+ (match_operand:VMUL 3 "register_operand" "w")))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2])));
+ return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]";
+ }
+ [(set_attr "type" "neon<fp>_mul_<Vetype>_scalar<q>")]
+)
+
+(define_insn "*aarch64_mul3_elt_<vswap_width_name><mode>"
+ [(set (match_operand:VMUL_CHANGE_NLANES 0 "register_operand" "=w")
+ (mult:VMUL_CHANGE_NLANES
+ (vec_duplicate:VMUL_CHANGE_NLANES
+ (vec_select:<VEL>
+ (match_operand:<VSWAP_WIDTH> 1 "register_operand" "<h_con>")
+ (parallel [(match_operand:SI 2 "immediate_operand")])))
+ (match_operand:VMUL_CHANGE_NLANES 3 "register_operand" "w")))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<VSWAP_WIDTH>mode,
+ INTVAL (operands[2])));
+ return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]";
+ }
+ [(set_attr "type" "neon<fp>_mul_<Vetype>_scalar<q>")]
+)
+
+(define_insn "*aarch64_mul3_elt_to_128df"
+ [(set (match_operand:V2DF 0 "register_operand" "=w")
+ (mult:V2DF
+ (vec_duplicate:V2DF
+ (match_operand:DF 2 "register_operand" "w"))
+ (match_operand:V2DF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fmul\\t%0.2d, %1.2d, %2.d[0]"
+ [(set_attr "type" "neon_fp_mul_d_scalar_q")]
+)
+
+(define_insn "*aarch64_mul3_elt_to_64v2df"
+ [(set (match_operand:DF 0 "register_operand" "=w")
+ (mult:DF
+ (vec_select:DF
+ (match_operand:V2DF 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand")]))
+ (match_operand:DF 3 "register_operand" "w")))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (V2DFmode, INTVAL (operands[2])));
+ return "fmul\\t%0.2d, %3.2d, %1.d[%2]";
+ }
+ [(set_attr "type" "neon_fp_mul_d_scalar_q")]
+)
+
+(define_insn "neg<mode>2"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (neg:VDQ (match_operand:VDQ 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "neg\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "type" "neon_neg<q>")]
+)
+
+(define_insn "abs<mode>2"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (abs:VDQ (match_operand:VDQ 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "abs\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "type" "neon_abs<q>")]
+)
+
+(define_insn "abd<mode>_3"
+ [(set (match_operand:VDQ_BHSI 0 "register_operand" "=w")
+ (abs:VDQ_BHSI (minus:VDQ_BHSI
+ (match_operand:VDQ_BHSI 1 "register_operand" "w")
+ (match_operand:VDQ_BHSI 2 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "sabd\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_abd<q>")]
+)
+
+(define_insn "aba<mode>_3"
+ [(set (match_operand:VDQ_BHSI 0 "register_operand" "=w")
+ (plus:VDQ_BHSI (abs:VDQ_BHSI (minus:VDQ_BHSI
+ (match_operand:VDQ_BHSI 1 "register_operand" "w")
+ (match_operand:VDQ_BHSI 2 "register_operand" "w")))
+ (match_operand:VDQ_BHSI 3 "register_operand" "0")))]
+ "TARGET_SIMD"
+ "saba\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_arith_acc<q>")]
+)
+
+(define_insn "fabd<mode>_3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (abs:VDQF (minus:VDQF
+ (match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "fabd\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_fp_abd_<Vetype><q>")]
+)
+
+(define_insn "*fabd_scalar<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (abs:GPF (minus:GPF
+ (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "fabd\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "type" "neon_fp_abd_<Vetype><q>")]
+)
+
+(define_insn "and<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (and:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "and\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
+ [(set_attr "type" "neon_logic<q>")]
+)
+
+(define_insn "ior<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (ior:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
+ [(set_attr "type" "neon_logic<q>")]
+)
+
+(define_insn "xor<mode>3"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (xor:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "eor\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
+ [(set_attr "type" "neon_logic<q>")]
+)
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (not:VDQ (match_operand:VDQ 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "not\t%0.<Vbtype>, %1.<Vbtype>"
+ [(set_attr "type" "neon_logic<q>")]
+)
+
+(define_insn "aarch64_simd_vec_set<mode>"
+ [(set (match_operand:VQ_S 0 "register_operand" "=w,w")
+ (vec_merge:VQ_S
+ (vec_duplicate:VQ_S
+ (match_operand:<VEL> 1 "register_operand" "r,w"))
+ (match_operand:VQ_S 3 "register_operand" "0,0")
+ (match_operand:SI 2 "immediate_operand" "i,i")))]
+ "TARGET_SIMD"
+ {
+ int elt = ENDIAN_LANE_N (<MODE>mode, exact_log2 (INTVAL (operands[2])));
+ operands[2] = GEN_INT ((HOST_WIDE_INT) 1 << elt);
+ switch (which_alternative)
+ {
+ case 0:
+ return "ins\\t%0.<Vetype>[%p2], %w1";
+ case 1:
+ return "ins\\t%0.<Vetype>[%p2], %1.<Vetype>[0]";
+ default:
+ gcc_unreachable ();
+ }
+ }
+ [(set_attr "type" "neon_from_gp<q>, neon_ins<q>")]
+)
+
+(define_insn "aarch64_simd_lshr<mode>"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (lshiftrt:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "aarch64_simd_rshift_imm" "Dr")))]
+ "TARGET_SIMD"
+ "ushr\t%0.<Vtype>, %1.<Vtype>, %2"
+ [(set_attr "type" "neon_shift_imm<q>")]
+)
+
+(define_insn "aarch64_simd_ashr<mode>"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (ashiftrt:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "aarch64_simd_rshift_imm" "Dr")))]
+ "TARGET_SIMD"
+ "sshr\t%0.<Vtype>, %1.<Vtype>, %2"
+ [(set_attr "type" "neon_shift_imm<q>")]
+)
+
+(define_insn "aarch64_simd_imm_shl<mode>"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (ashift:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "aarch64_simd_lshift_imm" "Dl")))]
+ "TARGET_SIMD"
+ "shl\t%0.<Vtype>, %1.<Vtype>, %2"
+ [(set_attr "type" "neon_shift_imm<q>")]
+)
+
+(define_insn "aarch64_simd_reg_sshl<mode>"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (ashift:VDQ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "sshl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_shift_reg<q>")]
+)
+
+(define_insn "aarch64_simd_reg_shl<mode>_unsigned"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (unspec:VDQ [(match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")]
+ UNSPEC_ASHIFT_UNSIGNED))]
+ "TARGET_SIMD"
+ "ushl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_shift_reg<q>")]
+)
+
+(define_insn "aarch64_simd_reg_shl<mode>_signed"
+ [(set (match_operand:VDQ 0 "register_operand" "=w")
+ (unspec:VDQ [(match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")]
+ UNSPEC_ASHIFT_SIGNED))]
+ "TARGET_SIMD"
+ "sshl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_shift_reg<q>")]
+)
+
+(define_expand "ashl<mode>3"
+ [(match_operand:VDQ 0 "register_operand" "")
+ (match_operand:VDQ 1 "register_operand" "")
+ (match_operand:SI 2 "general_operand" "")]
+ "TARGET_SIMD"
+{
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ int shift_amount;
+
+ if (CONST_INT_P (operands[2]))
+ {
+ shift_amount = INTVAL (operands[2]);
+ if (shift_amount >= 0 && shift_amount < bit_width)
+ {
+ rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
+ shift_amount);
+ emit_insn (gen_aarch64_simd_imm_shl<mode> (operands[0],
+ operands[1],
+ tmp));
+ DONE;
+ }
+ else
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+ }
+ else if (MEM_P (operands[2]))
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+
+ if (REG_P (operands[2]))
+ {
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_aarch64_simd_dup<mode> (tmp,
+ convert_to_mode (<VEL>mode,
+ operands[2],
+ 0)));
+ emit_insn (gen_aarch64_simd_reg_sshl<mode> (operands[0], operands[1],
+ tmp));
+ DONE;
+ }
+ else
+ FAIL;
+}
+)
+
+(define_expand "lshr<mode>3"
+ [(match_operand:VDQ 0 "register_operand" "")
+ (match_operand:VDQ 1 "register_operand" "")
+ (match_operand:SI 2 "general_operand" "")]
+ "TARGET_SIMD"
+{
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ int shift_amount;
+
+ if (CONST_INT_P (operands[2]))
+ {
+ shift_amount = INTVAL (operands[2]);
+ if (shift_amount > 0 && shift_amount <= bit_width)
+ {
+ rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
+ shift_amount);
+ emit_insn (gen_aarch64_simd_lshr<mode> (operands[0],
+ operands[1],
+ tmp));
+ DONE;
+ }
+ else
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+ else if (MEM_P (operands[2]))
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+
+ if (REG_P (operands[2]))
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+ rtx tmp1 = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_negsi2 (tmp, operands[2]));
+ emit_insn (gen_aarch64_simd_dup<mode> (tmp1,
+ convert_to_mode (<VEL>mode,
+ tmp, 0)));
+ emit_insn (gen_aarch64_simd_reg_shl<mode>_unsigned (operands[0],
+ operands[1],
+ tmp1));
+ DONE;
+ }
+ else
+ FAIL;
+}
+)
+
+(define_expand "ashr<mode>3"
+ [(match_operand:VDQ 0 "register_operand" "")
+ (match_operand:VDQ 1 "register_operand" "")
+ (match_operand:SI 2 "general_operand" "")]
+ "TARGET_SIMD"
+{
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ int shift_amount;
+
+ if (CONST_INT_P (operands[2]))
+ {
+ shift_amount = INTVAL (operands[2]);
+ if (shift_amount > 0 && shift_amount <= bit_width)
+ {
+ rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
+ shift_amount);
+ emit_insn (gen_aarch64_simd_ashr<mode> (operands[0],
+ operands[1],
+ tmp));
+ DONE;
+ }
+ else
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+ else if (MEM_P (operands[2]))
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+
+ if (REG_P (operands[2]))
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+ rtx tmp1 = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_negsi2 (tmp, operands[2]));
+ emit_insn (gen_aarch64_simd_dup<mode> (tmp1,
+ convert_to_mode (<VEL>mode,
+ tmp, 0)));
+ emit_insn (gen_aarch64_simd_reg_shl<mode>_signed (operands[0],
+ operands[1],
+ tmp1));
+ DONE;
+ }
+ else
+ FAIL;
+}
+)
+
+(define_expand "vashl<mode>3"
+ [(match_operand:VDQ 0 "register_operand" "")
+ (match_operand:VDQ 1 "register_operand" "")
+ (match_operand:VDQ 2 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ emit_insn (gen_aarch64_simd_reg_sshl<mode> (operands[0], operands[1],
+ operands[2]));
+ DONE;
+})
+
+;; Using mode VQ_S as there is no V2DImode neg!
+;; Negating individual lanes most certainly offsets the
+;; gain from vectorization.
+(define_expand "vashr<mode>3"
+ [(match_operand:VQ_S 0 "register_operand" "")
+ (match_operand:VQ_S 1 "register_operand" "")
+ (match_operand:VQ_S 2 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ rtx neg = gen_reg_rtx (<MODE>mode);
+ emit (gen_neg<mode>2 (neg, operands[2]));
+ emit_insn (gen_aarch64_simd_reg_shl<mode>_signed (operands[0], operands[1],
+ neg));
+ DONE;
+})
+
+;; DI vector shift
+(define_expand "aarch64_ashr_simddi"
+ [(match_operand:DI 0 "register_operand" "=w")
+ (match_operand:DI 1 "register_operand" "w")
+ (match_operand:SI 2 "aarch64_shift_imm64_di" "")]
+ "TARGET_SIMD"
+ {
+ if (INTVAL (operands[2]) == 64)
+ emit_insn (gen_aarch64_sshr_simddi (operands[0], operands[1]));
+ else
+ emit_insn (gen_ashrdi3 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+)
+
+;; SIMD shift by 64. This pattern is a special case as standard pattern does
+;; not handle NEON shifts by 64.
+(define_insn "aarch64_sshr_simddi"
+ [(set (match_operand:DI 0 "register_operand" "=w")
+ (unspec:DI
+ [(match_operand:DI 1 "register_operand" "w")] UNSPEC_SSHR64))]
+ "TARGET_SIMD"
+ "sshr\t%d0, %d1, 64"
+ [(set_attr "type" "neon_shift_imm")]
+)
+
+(define_expand "vlshr<mode>3"
+ [(match_operand:VQ_S 0 "register_operand" "")
+ (match_operand:VQ_S 1 "register_operand" "")
+ (match_operand:VQ_S 2 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ rtx neg = gen_reg_rtx (<MODE>mode);
+ emit (gen_neg<mode>2 (neg, operands[2]));
+ emit_insn (gen_aarch64_simd_reg_shl<mode>_unsigned (operands[0], operands[1],
+ neg));
+ DONE;
+})
+
+(define_expand "aarch64_lshr_simddi"
+ [(match_operand:DI 0 "register_operand" "=w")
+ (match_operand:DI 1 "register_operand" "w")
+ (match_operand:SI 2 "aarch64_shift_imm64_di" "")]
+ "TARGET_SIMD"
+ {
+ if (INTVAL (operands[2]) == 64)
+ emit_insn (gen_aarch64_ushr_simddi (operands[0], operands[1]));
+ else
+ emit_insn (gen_lshrdi3 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+)
+
+;; SIMD shift by 64. This pattern is a special case as standard pattern does
+;; not handle NEON shifts by 64.
+(define_insn "aarch64_ushr_simddi"
+ [(set (match_operand:DI 0 "register_operand" "=w")
+ (unspec:DI
+ [(match_operand:DI 1 "register_operand" "w")] UNSPEC_USHR64))]
+ "TARGET_SIMD"
+ "ushr\t%d0, %d1, 64"
+ [(set_attr "type" "neon_shift_imm")]
+)
+
+(define_expand "vec_set<mode>"
+ [(match_operand:VQ_S 0 "register_operand")
+ (match_operand:<VEL> 1 "register_operand")
+ (match_operand:SI 2 "immediate_operand")]
+ "TARGET_SIMD"
+ {
+ HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
+ emit_insn (gen_aarch64_simd_vec_set<mode> (operands[0], operands[1],
+ GEN_INT (elem), operands[0]));
+ DONE;
+ }
+)
+
+(define_insn "aarch64_simd_vec_setv2di"
+ [(set (match_operand:V2DI 0 "register_operand" "=w,w")
+ (vec_merge:V2DI
+ (vec_duplicate:V2DI
+ (match_operand:DI 1 "register_operand" "r,w"))
+ (match_operand:V2DI 3 "register_operand" "0,0")
+ (match_operand:SI 2 "immediate_operand" "i,i")))]
+ "TARGET_SIMD"
+ {
+ int elt = ENDIAN_LANE_N (V2DImode, exact_log2 (INTVAL (operands[2])));
+ operands[2] = GEN_INT ((HOST_WIDE_INT) 1 << elt);
+ switch (which_alternative)
+ {
+ case 0:
+ return "ins\\t%0.d[%p2], %1";
+ case 1:
+ return "ins\\t%0.d[%p2], %1.d[0]";
+ default:
+ gcc_unreachable ();
+ }
+ }
+ [(set_attr "type" "neon_from_gp, neon_ins_q")]
+)
+
+(define_expand "vec_setv2di"
+ [(match_operand:V2DI 0 "register_operand")
+ (match_operand:DI 1 "register_operand")
+ (match_operand:SI 2 "immediate_operand")]
+ "TARGET_SIMD"
+ {
+ HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
+ emit_insn (gen_aarch64_simd_vec_setv2di (operands[0], operands[1],
+ GEN_INT (elem), operands[0]));
+ DONE;
+ }
+)
+
+(define_insn "aarch64_simd_vec_set<mode>"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (vec_merge:VDQF
+ (vec_duplicate:VDQF
+ (match_operand:<VEL> 1 "register_operand" "w"))
+ (match_operand:VDQF 3 "register_operand" "0")
+ (match_operand:SI 2 "immediate_operand" "i")))]
+ "TARGET_SIMD"
+ {
+ int elt = ENDIAN_LANE_N (<MODE>mode, exact_log2 (INTVAL (operands[2])));
+
+ operands[2] = GEN_INT ((HOST_WIDE_INT)1 << elt);
+ return "ins\t%0.<Vetype>[%p2], %1.<Vetype>[0]";
+ }
+ [(set_attr "type" "neon_ins<q>")]
+)
+
+(define_expand "vec_set<mode>"
+ [(match_operand:VDQF 0 "register_operand" "+w")
+ (match_operand:<VEL> 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "")]
+ "TARGET_SIMD"
+ {
+ HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
+ emit_insn (gen_aarch64_simd_vec_set<mode> (operands[0], operands[1],
+ GEN_INT (elem), operands[0]));
+ DONE;
+ }
+)
+
+
+(define_insn "aarch64_mla<mode>"
+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
+ (plus:VQ_S (mult:VQ_S (match_operand:VQ_S 2 "register_operand" "w")
+ (match_operand:VQ_S 3 "register_operand" "w"))
+ (match_operand:VQ_S 1 "register_operand" "0")))]
+ "TARGET_SIMD"
+ "mla\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
+ [(set_attr "type" "neon_mla_<Vetype><q>")]
+)
+
+(define_insn "*aarch64_mla_elt<mode>"
+ [(set (match_operand:VDQHS 0 "register_operand" "=w")
+ (plus:VDQHS
+ (mult:VDQHS
+ (vec_duplicate:VDQHS
+ (vec_select:<VEL>
+ (match_operand:VDQHS 1 "register_operand" "<h_con>")
+ (parallel [(match_operand:SI 2 "immediate_operand")])))
+ (match_operand:VDQHS 3 "register_operand" "w"))
+ (match_operand:VDQHS 4 "register_operand" "0")))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2])));
+ return "mla\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]";
+ }
+ [(set_attr "type" "neon_mla_<Vetype>_scalar<q>")]
+)
+
+(define_insn "*aarch64_mla_elt_<vswap_width_name><mode>"
+ [(set (match_operand:VDQHS 0 "register_operand" "=w")
+ (plus:VDQHS
+ (mult:VDQHS
+ (vec_duplicate:VDQHS
+ (vec_select:<VEL>
+ (match_operand:<VSWAP_WIDTH> 1 "register_operand" "<h_con>")
+ (parallel [(match_operand:SI 2 "immediate_operand")])))
+ (match_operand:VDQHS 3 "register_operand" "w"))
+ (match_operand:VDQHS 4 "register_operand" "0")))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<VSWAP_WIDTH>mode,
+ INTVAL (operands[2])));
+ return "mla\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]";
+ }
+ [(set_attr "type" "neon_mla_<Vetype>_scalar<q>")]
+)
+
+(define_insn "aarch64_mls<mode>"
+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
+ (minus:VQ_S (match_operand:VQ_S 1 "register_operand" "0")
+ (mult:VQ_S (match_operand:VQ_S 2 "register_operand" "w")
+ (match_operand:VQ_S 3 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "mls\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
+ [(set_attr "type" "neon_mla_<Vetype><q>")]
+)
+
+(define_insn "*aarch64_mls_elt<mode>"
+ [(set (match_operand:VDQHS 0 "register_operand" "=w")
+ (minus:VDQHS
+ (match_operand:VDQHS 4 "register_operand" "0")
+ (mult:VDQHS
+ (vec_duplicate:VDQHS
+ (vec_select:<VEL>
+ (match_operand:VDQHS 1 "register_operand" "<h_con>")
+ (parallel [(match_operand:SI 2 "immediate_operand")])))
+ (match_operand:VDQHS 3 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2])));
+ return "mls\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]";
+ }
+ [(set_attr "type" "neon_mla_<Vetype>_scalar<q>")]
+)
+
+(define_insn "*aarch64_mls_elt_<vswap_width_name><mode>"
+ [(set (match_operand:VDQHS 0 "register_operand" "=w")
+ (minus:VDQHS
+ (match_operand:VDQHS 4 "register_operand" "0")
+ (mult:VDQHS
+ (vec_duplicate:VDQHS
+ (vec_select:<VEL>
+ (match_operand:<VSWAP_WIDTH> 1 "register_operand" "<h_con>")
+ (parallel [(match_operand:SI 2 "immediate_operand")])))
+ (match_operand:VDQHS 3 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<VSWAP_WIDTH>mode,
+ INTVAL (operands[2])));
+ return "mls\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]";
+ }
+ [(set_attr "type" "neon_mla_<Vetype>_scalar<q>")]
+)
+
+;; Max/Min operations.
+(define_insn "<su><maxmin><mode>3"
+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
+ (MAXMIN:VQ_S (match_operand:VQ_S 1 "register_operand" "w")
+ (match_operand:VQ_S 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "<su><maxmin>\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_minmax<q>")]
+)
+
+;; Move into low-half clearing high half to 0.
+
+(define_insn "move_lo_quad_<mode>"
+ [(set (match_operand:VQ 0 "register_operand" "=w,w,w")
+ (vec_concat:VQ
+ (match_operand:<VHALF> 1 "register_operand" "w,r,r")
+ (vec_duplicate:<VHALF> (const_int 0))))]
+ "TARGET_SIMD"
+ "@
+ dup\\t%d0, %1.d[0]
+ fmov\\t%d0, %1
+ dup\\t%d0, %1"
+ [(set_attr "type" "neon_dup<q>,fmov,neon_dup<q>")
+ (set_attr "simd" "yes,*,yes")
+ (set_attr "fp" "*,yes,*")
+ (set_attr "length" "4")]
+)
+
+;; Move into high-half.
+
+(define_insn "aarch64_simd_move_hi_quad_<mode>"
+ [(set (match_operand:VQ 0 "register_operand" "+w,w")
+ (vec_concat:VQ
+ (vec_select:<VHALF>
+ (match_dup 0)
+ (match_operand:VQ 2 "vect_par_cnst_lo_half" ""))
+ (match_operand:<VHALF> 1 "register_operand" "w,r")))]
+ "TARGET_SIMD"
+ "@
+ ins\\t%0.d[1], %1.d[0]
+ ins\\t%0.d[1], %1"
+ [(set_attr "type" "neon_ins")
+ (set_attr "length" "4")]
+)
+
+(define_expand "move_hi_quad_<mode>"
+ [(match_operand:VQ 0 "register_operand" "")
+ (match_operand:<VHALF> 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
+ emit_insn (gen_aarch64_simd_move_hi_quad_<mode> (operands[0],
+ operands[1], p));
+ DONE;
+})
+
+;; Narrowing operations.
+
+;; For doubles.
+(define_insn "aarch64_simd_vec_pack_trunc_<mode>"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+ (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "xtn\\t%0.<Vntype>, %1.<Vtype>"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_expand "vec_pack_trunc_<mode>"
+ [(match_operand:<VNARROWD> 0 "register_operand" "")
+ (match_operand:VDN 1 "register_operand" "")
+ (match_operand:VDN 2 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ rtx tempreg = gen_reg_rtx (<VDBL>mode);
+ int lo = BYTES_BIG_ENDIAN ? 2 : 1;
+ int hi = BYTES_BIG_ENDIAN ? 1 : 2;
+
+ emit_insn (gen_move_lo_quad_<Vdbl> (tempreg, operands[lo]));
+ emit_insn (gen_move_hi_quad_<Vdbl> (tempreg, operands[hi]));
+ emit_insn (gen_aarch64_simd_vec_pack_trunc_<Vdbl> (operands[0], tempreg));
+ DONE;
+})
+
+;; For quads.
+
+(define_insn "vec_pack_trunc_<mode>"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "+&w")
+ (vec_concat:<VNARROWQ2>
+ (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w"))
+ (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ {
+ if (BYTES_BIG_ENDIAN)
+ return "xtn\\t%0.<Vntype>, %2.<Vtype>\;xtn2\\t%0.<V2ntype>, %1.<Vtype>";
+ else
+ return "xtn\\t%0.<Vntype>, %1.<Vtype>\;xtn2\\t%0.<V2ntype>, %2.<Vtype>";
+ }
+ [(set_attr "type" "multiple")
+ (set_attr "length" "8")]
+)
+
+;; Widening operations.
+
+(define_insn "aarch64_simd_vec_unpack<su>_lo_<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 2 "vect_par_cnst_lo_half" "")
+ )))]
+ "TARGET_SIMD"
+ "<su>shll %0.<Vwtype>, %1.<Vhalftype>, 0"
+ [(set_attr "type" "neon_shift_imm_long")]
+)
+
+(define_insn "aarch64_simd_vec_unpack<su>_hi_<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 2 "vect_par_cnst_hi_half" "")
+ )))]
+ "TARGET_SIMD"
+ "<su>shll2 %0.<Vwtype>, %1.<Vtype>, 0"
+ [(set_attr "type" "neon_shift_imm_long")]
+)
+
+(define_expand "vec_unpack<su>_hi_<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "")
+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand"))]
+ "TARGET_SIMD"
+ {
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_simd_vec_unpack<su>_hi_<mode> (operands[0],
+ operands[1], p));
+ DONE;
+ }
+)
+
+(define_expand "vec_unpack<su>_lo_<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "")
+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))]
+ "TARGET_SIMD"
+ {
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
+ emit_insn (gen_aarch64_simd_vec_unpack<su>_lo_<mode> (operands[0],
+ operands[1], p));
+ DONE;
+ }
+)
+
+;; Widening arithmetic.
+
+(define_insn "*aarch64_<su>mlal_lo<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (plus:<VWIDE>
+ (mult:<VWIDE>
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 2 "register_operand" "w")
+ (match_operand:VQW 3 "vect_par_cnst_lo_half" "")))
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 4 "register_operand" "w")
+ (match_dup 3))))
+ (match_operand:<VWIDE> 1 "register_operand" "0")))]
+ "TARGET_SIMD"
+ "<su>mlal\t%0.<Vwtype>, %2.<Vhalftype>, %4.<Vhalftype>"
+ [(set_attr "type" "neon_mla_<Vetype>_long")]
+)
+
+(define_insn "*aarch64_<su>mlal_hi<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (plus:<VWIDE>
+ (mult:<VWIDE>
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 2 "register_operand" "w")
+ (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 4 "register_operand" "w")
+ (match_dup 3))))
+ (match_operand:<VWIDE> 1 "register_operand" "0")))]
+ "TARGET_SIMD"
+ "<su>mlal2\t%0.<Vwtype>, %2.<Vtype>, %4.<Vtype>"
+ [(set_attr "type" "neon_mla_<Vetype>_long")]
+)
+
+(define_insn "*aarch64_<su>mlsl_lo<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (minus:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (mult:<VWIDE>
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 2 "register_operand" "w")
+ (match_operand:VQW 3 "vect_par_cnst_lo_half" "")))
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 4 "register_operand" "w")
+ (match_dup 3))))))]
+ "TARGET_SIMD"
+ "<su>mlsl\t%0.<Vwtype>, %2.<Vhalftype>, %4.<Vhalftype>"
+ [(set_attr "type" "neon_mla_<Vetype>_long")]
+)
+
+(define_insn "*aarch64_<su>mlsl_hi<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (minus:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (mult:<VWIDE>
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 2 "register_operand" "w")
+ (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 4 "register_operand" "w")
+ (match_dup 3))))))]
+ "TARGET_SIMD"
+ "<su>mlsl2\t%0.<Vwtype>, %2.<Vtype>, %4.<Vtype>"
+ [(set_attr "type" "neon_mla_<Vetype>_long")]
+)
+
+(define_insn "*aarch64_<su>mlal<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (plus:<VWIDE>
+ (mult:<VWIDE>
+ (ANY_EXTEND:<VWIDE>
+ (match_operand:VDW 1 "register_operand" "w"))
+ (ANY_EXTEND:<VWIDE>
+ (match_operand:VDW 2 "register_operand" "w")))
+ (match_operand:<VWIDE> 3 "register_operand" "0")))]
+ "TARGET_SIMD"
+ "<su>mlal\t%0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_mla_<Vetype>_long")]
+)
+
+(define_insn "*aarch64_<su>mlsl<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (minus:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (mult:<VWIDE>
+ (ANY_EXTEND:<VWIDE>
+ (match_operand:VDW 2 "register_operand" "w"))
+ (ANY_EXTEND:<VWIDE>
+ (match_operand:VDW 3 "register_operand" "w")))))]
+ "TARGET_SIMD"
+ "<su>mlsl\t%0.<Vwtype>, %2.<Vtype>, %3.<Vtype>"
+ [(set_attr "type" "neon_mla_<Vetype>_long")]
+)
+
+(define_insn "aarch64_simd_vec_<su>mult_lo_<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (mult:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 3 "vect_par_cnst_lo_half" "")))
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 2 "register_operand" "w")
+ (match_dup 3)))))]
+ "TARGET_SIMD"
+ "<su>mull\\t%0.<Vwtype>, %1.<Vhalftype>, %2.<Vhalftype>"
+ [(set_attr "type" "neon_mul_<Vetype>_long")]
+)
+
+(define_expand "vec_widen_<su>mult_lo_<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "")
+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))
+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 2 "register_operand" ""))]
+ "TARGET_SIMD"
+ {
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
+ emit_insn (gen_aarch64_simd_vec_<su>mult_lo_<mode> (operands[0],
+ operands[1],
+ operands[2], p));
+ DONE;
+ }
+)
+
+(define_insn "aarch64_simd_vec_<su>mult_hi_<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (mult:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 2 "register_operand" "w")
+ (match_dup 3)))))]
+ "TARGET_SIMD"
+ "<su>mull2\\t%0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_mul_<Vetype>_long")]
+)
+
+(define_expand "vec_widen_<su>mult_hi_<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "")
+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))
+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 2 "register_operand" ""))]
+ "TARGET_SIMD"
+ {
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_simd_vec_<su>mult_hi_<mode> (operands[0],
+ operands[1],
+ operands[2], p));
+ DONE;
+
+ }
+)
+
+;; FP vector operations.
+;; AArch64 AdvSIMD supports single-precision (32-bit) and
+;; double-precision (64-bit) floating-point data types and arithmetic as
+;; defined by the IEEE 754-2008 standard. This makes them vectorizable
+;; without the need for -ffast-math or -funsafe-math-optimizations.
+;;
+;; Floating-point operations can raise an exception. Vectorizing such
+;; operations are safe because of reasons explained below.
+;;
+;; ARMv8 permits an extension to enable trapped floating-point
+;; exception handling, however this is an optional feature. In the
+;; event of a floating-point exception being raised by vectorised
+;; code then:
+;; 1. If trapped floating-point exceptions are available, then a trap
+;; will be taken when any lane raises an enabled exception. A trap
+;; handler may determine which lane raised the exception.
+;; 2. Alternatively a sticky exception flag is set in the
+;; floating-point status register (FPSR). Software may explicitly
+;; test the exception flags, in which case the tests will either
+;; prevent vectorisation, allowing precise identification of the
+;; failing operation, or if tested outside of vectorisable regions
+;; then the specific operation and lane are not of interest.
+
+;; FP arithmetic operations.
+
+(define_insn "add<mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (plus:VDQF (match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fadd\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_fp_addsub_<Vetype><q>")]
+)
+
+(define_insn "sub<mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (minus:VDQF (match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fsub\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_fp_addsub_<Vetype><q>")]
+)
+
+(define_insn "mul<mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (mult:VDQF (match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_fp_mul_<Vetype><q>")]
+)
+
+(define_insn "div<mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (div:VDQF (match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fdiv\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_fp_div_<Vetype><q>")]
+)
+
+(define_insn "neg<mode>2"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (neg:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fneg\\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "type" "neon_fp_neg_<Vetype><q>")]
+)
+
+(define_insn "abs<mode>2"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (abs:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fabs\\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "type" "neon_fp_abs_<Vetype><q>")]
+)
+
+(define_insn "fma<mode>4"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (fma:VDQF (match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")
+ (match_operand:VDQF 3 "register_operand" "0")))]
+ "TARGET_SIMD"
+ "fmla\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_fp_mla_<Vetype><q>")]
+)
+
+(define_insn "*aarch64_fma4_elt<mode>"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (fma:VDQF
+ (vec_duplicate:VDQF
+ (vec_select:<VEL>
+ (match_operand:VDQF 1 "register_operand" "<h_con>")
+ (parallel [(match_operand:SI 2 "immediate_operand")])))
+ (match_operand:VDQF 3 "register_operand" "w")
+ (match_operand:VDQF 4 "register_operand" "0")))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2])));
+ return "fmla\\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]";
+ }
+ [(set_attr "type" "neon_fp_mla_<Vetype>_scalar<q>")]
+)
+
+(define_insn "*aarch64_fma4_elt_<vswap_width_name><mode>"
+ [(set (match_operand:VDQSF 0 "register_operand" "=w")
+ (fma:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:<VSWAP_WIDTH> 1 "register_operand" "<h_con>")
+ (parallel [(match_operand:SI 2 "immediate_operand")])))
+ (match_operand:VDQSF 3 "register_operand" "w")
+ (match_operand:VDQSF 4 "register_operand" "0")))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<VSWAP_WIDTH>mode,
+ INTVAL (operands[2])));
+ return "fmla\\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]";
+ }
+ [(set_attr "type" "neon_fp_mla_<Vetype>_scalar<q>")]
+)
+
+(define_insn "*aarch64_fma4_elt_to_128df"
+ [(set (match_operand:V2DF 0 "register_operand" "=w")
+ (fma:V2DF
+ (vec_duplicate:V2DF
+ (match_operand:DF 1 "register_operand" "w"))
+ (match_operand:V2DF 2 "register_operand" "w")
+ (match_operand:V2DF 3 "register_operand" "0")))]
+ "TARGET_SIMD"
+ "fmla\\t%0.2d, %2.2d, %1.2d[0]"
+ [(set_attr "type" "neon_fp_mla_d_scalar_q")]
+)
+
+(define_insn "*aarch64_fma4_elt_to_64v2df"
+ [(set (match_operand:DF 0 "register_operand" "=w")
+ (fma:DF
+ (vec_select:DF
+ (match_operand:V2DF 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand")]))
+ (match_operand:DF 3 "register_operand" "w")
+ (match_operand:DF 4 "register_operand" "0")))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (V2DFmode, INTVAL (operands[2])));
+ return "fmla\\t%0.2d, %3.2d, %1.2d[%2]";
+ }
+ [(set_attr "type" "neon_fp_mla_d_scalar_q")]
+)
+
+(define_insn "fnma<mode>4"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (fma:VDQF
+ (match_operand:VDQF 1 "register_operand" "w")
+ (neg:VDQF
+ (match_operand:VDQF 2 "register_operand" "w"))
+ (match_operand:VDQF 3 "register_operand" "0")))]
+ "TARGET_SIMD"
+ "fmls\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_fp_mla_<Vetype><q>")]
+)
+
+(define_insn "*aarch64_fnma4_elt<mode>"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (fma:VDQF
+ (neg:VDQF
+ (match_operand:VDQF 3 "register_operand" "w"))
+ (vec_duplicate:VDQF
+ (vec_select:<VEL>
+ (match_operand:VDQF 1 "register_operand" "<h_con>")
+ (parallel [(match_operand:SI 2 "immediate_operand")])))
+ (match_operand:VDQF 4 "register_operand" "0")))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2])));
+ return "fmls\\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]";
+ }
+ [(set_attr "type" "neon_fp_mla_<Vetype>_scalar<q>")]
+)
+
+(define_insn "*aarch64_fnma4_elt_<vswap_width_name><mode>"
+ [(set (match_operand:VDQSF 0 "register_operand" "=w")
+ (fma:VDQSF
+ (neg:VDQSF
+ (match_operand:VDQSF 3 "register_operand" "w"))
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:<VSWAP_WIDTH> 1 "register_operand" "<h_con>")
+ (parallel [(match_operand:SI 2 "immediate_operand")])))
+ (match_operand:VDQSF 4 "register_operand" "0")))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<VSWAP_WIDTH>mode,
+ INTVAL (operands[2])));
+ return "fmls\\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]";
+ }
+ [(set_attr "type" "neon_fp_mla_<Vetype>_scalar<q>")]
+)
+
+(define_insn "*aarch64_fnma4_elt_to_128df"
+ [(set (match_operand:V2DF 0 "register_operand" "=w")
+ (fma:V2DF
+ (neg:V2DF
+ (match_operand:V2DF 2 "register_operand" "w"))
+ (vec_duplicate:V2DF
+ (match_operand:DF 1 "register_operand" "w"))
+ (match_operand:V2DF 3 "register_operand" "0")))]
+ "TARGET_SIMD"
+ "fmls\\t%0.2d, %2.2d, %1.2d[0]"
+ [(set_attr "type" "neon_fp_mla_d_scalar_q")]
+)
+
+(define_insn "*aarch64_fnma4_elt_to_64v2df"
+ [(set (match_operand:DF 0 "register_operand" "=w")
+ (fma:DF
+ (vec_select:DF
+ (match_operand:V2DF 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand")]))
+ (neg:DF
+ (match_operand:DF 3 "register_operand" "w"))
+ (match_operand:DF 4 "register_operand" "0")))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (V2DFmode, INTVAL (operands[2])));
+ return "fmls\\t%0.2d, %3.2d, %1.2d[%2]";
+ }
+ [(set_attr "type" "neon_fp_mla_d_scalar_q")]
+)
+
+;; Vector versions of the floating-point frint patterns.
+;; Expands to btrunc, ceil, floor, nearbyint, rint, round.
+(define_insn "<frint_pattern><mode>2"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")]
+ FRINT))]
+ "TARGET_SIMD"
+ "frint<frint_suffix>\\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "type" "neon_fp_round_<Vetype><q>")]
+)
+
+;; Vector versions of the fcvt standard patterns.
+;; Expands to lbtrunc, lround, lceil, lfloor
+(define_insn "l<fcvt_pattern><su_optab><VDQF:mode><fcvt_target>2"
+ [(set (match_operand:<FCVT_TARGET> 0 "register_operand" "=w")
+ (FIXUORS:<FCVT_TARGET> (unspec:<FCVT_TARGET>
+ [(match_operand:VDQF 1 "register_operand" "w")]
+ FCVT)))]
+ "TARGET_SIMD"
+ "fcvt<frint_suffix><su>\\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "type" "neon_fp_to_int_<Vetype><q>")]
+)
+
+(define_expand "<optab><VDQF:mode><fcvt_target>2"
+ [(set (match_operand:<FCVT_TARGET> 0 "register_operand")
+ (FIXUORS:<FCVT_TARGET> (unspec:<FCVT_TARGET>
+ [(match_operand:VDQF 1 "register_operand")]
+ UNSPEC_FRINTZ)))]
+ "TARGET_SIMD"
+ {})
+
+(define_expand "<fix_trunc_optab><VDQF:mode><fcvt_target>2"
+ [(set (match_operand:<FCVT_TARGET> 0 "register_operand")
+ (FIXUORS:<FCVT_TARGET> (unspec:<FCVT_TARGET>
+ [(match_operand:VDQF 1 "register_operand")]
+ UNSPEC_FRINTZ)))]
+ "TARGET_SIMD"
+ {})
+
+(define_expand "ftrunc<VDQF:mode>2"
+ [(set (match_operand:VDQF 0 "register_operand")
+ (unspec:VDQF [(match_operand:VDQF 1 "register_operand")]
+ UNSPEC_FRINTZ))]
+ "TARGET_SIMD"
+ {})
+
+(define_insn "<optab><fcvt_target><VDQF:mode>2"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (FLOATUORS:VDQF
+ (match_operand:<FCVT_TARGET> 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "<su_optab>cvtf\\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "type" "neon_int_to_fp_<Vetype><q>")]
+)
+
+;; Conversions between vectors of floats and doubles.
+;; Contains a mix of patterns to match standard pattern names
+;; and those for intrinsics.
+
+;; Float widening operations.
+
+(define_insn "vec_unpacks_lo_v4sf"
+ [(set (match_operand:V2DF 0 "register_operand" "=w")
+ (float_extend:V2DF
+ (vec_select:V2SF
+ (match_operand:V4SF 1 "register_operand" "w")
+ (parallel [(const_int 0) (const_int 1)])
+ )))]
+ "TARGET_SIMD"
+ "fcvtl\\t%0.2d, %1.2s"
+ [(set_attr "type" "neon_fp_cvt_widen_s")]
+)
+
+(define_insn "aarch64_float_extend_lo_v2df"
+ [(set (match_operand:V2DF 0 "register_operand" "=w")
+ (float_extend:V2DF
+ (match_operand:V2SF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fcvtl\\t%0.2d, %1.2s"
+ [(set_attr "type" "neon_fp_cvt_widen_s")]
+)
+
+(define_insn "vec_unpacks_hi_v4sf"
+ [(set (match_operand:V2DF 0 "register_operand" "=w")
+ (float_extend:V2DF
+ (vec_select:V2SF
+ (match_operand:V4SF 1 "register_operand" "w")
+ (parallel [(const_int 2) (const_int 3)])
+ )))]
+ "TARGET_SIMD"
+ "fcvtl2\\t%0.2d, %1.4s"
+ [(set_attr "type" "neon_fp_cvt_widen_s")]
+)
+
+;; Float narrowing operations.
+
+(define_insn "aarch64_float_truncate_lo_v2sf"
+ [(set (match_operand:V2SF 0 "register_operand" "=w")
+ (float_truncate:V2SF
+ (match_operand:V2DF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fcvtn\\t%0.2s, %1.2d"
+ [(set_attr "type" "neon_fp_cvt_narrow_d_q")]
+)
+
+(define_insn "aarch64_float_truncate_hi_v4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=w")
+ (vec_concat:V4SF
+ (match_operand:V2SF 1 "register_operand" "0")
+ (float_truncate:V2SF
+ (match_operand:V2DF 2 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "fcvtn2\\t%0.4s, %2.2d"
+ [(set_attr "type" "neon_fp_cvt_narrow_d_q")]
+)
+
+(define_expand "vec_pack_trunc_v2df"
+ [(set (match_operand:V4SF 0 "register_operand")
+ (vec_concat:V4SF
+ (float_truncate:V2SF
+ (match_operand:V2DF 1 "register_operand"))
+ (float_truncate:V2SF
+ (match_operand:V2DF 2 "register_operand"))
+ ))]
+ "TARGET_SIMD"
+ {
+ rtx tmp = gen_reg_rtx (V2SFmode);
+ int lo = BYTES_BIG_ENDIAN ? 2 : 1;
+ int hi = BYTES_BIG_ENDIAN ? 1 : 2;
+
+ emit_insn (gen_aarch64_float_truncate_lo_v2sf (tmp, operands[lo]));
+ emit_insn (gen_aarch64_float_truncate_hi_v4sf (operands[0],
+ tmp, operands[hi]));
+ DONE;
+ }
+)
+
+(define_expand "vec_pack_trunc_df"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (vec_concat:V2SF
+ (float_truncate:SF
+ (match_operand:DF 1 "register_operand"))
+ (float_truncate:SF
+ (match_operand:DF 2 "register_operand"))
+ ))]
+ "TARGET_SIMD"
+ {
+ rtx tmp = gen_reg_rtx (V2SFmode);
+ int lo = BYTES_BIG_ENDIAN ? 2 : 1;
+ int hi = BYTES_BIG_ENDIAN ? 1 : 2;
+
+ emit_insn (gen_move_lo_quad_v2df (tmp, operands[lo]));
+ emit_insn (gen_move_hi_quad_v2df (tmp, operands[hi]));
+ emit_insn (gen_aarch64_float_truncate_lo_v2sf (operands[0], tmp));
+ DONE;
+ }
+)
+
+(define_insn "aarch64_vmls<mode>"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (minus:VDQF (match_operand:VDQF 1 "register_operand" "0")
+ (mult:VDQF (match_operand:VDQF 2 "register_operand" "w")
+ (match_operand:VDQF 3 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "fmls\\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
+ [(set_attr "type" "neon_fp_mla_<Vetype>_scalar<q>")]
+)
+
+;; FP Max/Min
+;; Max/Min are introduced by idiom recognition by GCC's mid-end. An
+;; expression like:
+;; a = (b < c) ? b : c;
+;; is idiom-matched as MIN_EXPR<b,c> only if -ffinite-math-only is enabled
+;; either explicitly or indirectly via -ffast-math.
+;;
+;; MIN_EXPR and MAX_EXPR eventually map to 'smin' and 'smax' in RTL.
+;; The 'smax' and 'smin' RTL standard pattern names do not specify which
+;; operand will be returned when both operands are zero (i.e. they may not
+;; honour signed zeroes), or when either operand is NaN. Therefore GCC
+;; only introduces MIN_EXPR/MAX_EXPR in fast math mode or when not honouring
+;; NaNs.
+
+(define_insn "<su><maxmin><mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (FMAXMIN:VDQF (match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "f<maxmin>nm\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_fp_minmax_<Vetype><q>")]
+)
+
+(define_insn "<maxmin_uns><mode>3"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")]
+ FMAXMIN_UNS))]
+ "TARGET_SIMD"
+ "<maxmin_uns_op>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_fp_minmax_<Vetype><q>")]
+)
+
+;; 'across lanes' add.
+
+(define_insn "reduc_<sur>plus_<mode>"
+ [(set (match_operand:VDQV 0 "register_operand" "=w")
+ (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
+ SUADDV))]
+ "TARGET_SIMD"
+ "add<VDQV:vp>\\t%<Vetype>0, %1.<Vtype>"
+ [(set_attr "type" "neon_reduc_add<q>")]
+)
+
+(define_insn "reduc_<sur>plus_v2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=w")
+ (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
+ SUADDV))]
+ "TARGET_SIMD"
+ "addp\\t%0.2s, %1.2s, %1.2s"
+ [(set_attr "type" "neon_reduc_add")]
+)
+
+(define_insn "reduc_splus_<mode>"
+ [(set (match_operand:V2F 0 "register_operand" "=w")
+ (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
+ UNSPEC_FADDV))]
+ "TARGET_SIMD"
+ "faddp\\t%<Vetype>0, %1.<Vtype>"
+ [(set_attr "type" "neon_fp_reduc_add_<Vetype><q>")]
+)
+
+(define_insn "aarch64_addpv4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=w")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
+ UNSPEC_FADDV))]
+ "TARGET_SIMD"
+ "faddp\\t%0.4s, %1.4s, %1.4s"
+ [(set_attr "type" "neon_fp_reduc_add_s_q")]
+)
+
+(define_expand "reduc_splus_v4sf"
+ [(set (match_operand:V4SF 0 "register_operand")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand")]
+ UNSPEC_FADDV))]
+ "TARGET_SIMD"
+{
+ emit_insn (gen_aarch64_addpv4sf (operands[0], operands[1]));
+ emit_insn (gen_aarch64_addpv4sf (operands[0], operands[0]));
+ DONE;
+})
+
+(define_insn "clz<mode>2"
+ [(set (match_operand:VDQ_BHSI 0 "register_operand" "=w")
+ (clz:VDQ_BHSI (match_operand:VDQ_BHSI 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "clz\\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "type" "neon_cls<q>")]
+)
+
+;; 'across lanes' max and min ops.
+
+(define_insn "reduc_<maxmin_uns>_<mode>"
+ [(set (match_operand:VDQV_S 0 "register_operand" "=w")
+ (unspec:VDQV_S [(match_operand:VDQV_S 1 "register_operand" "w")]
+ MAXMINV))]
+ "TARGET_SIMD"
+ "<maxmin_uns_op>v\\t%<Vetype>0, %1.<Vtype>"
+ [(set_attr "type" "neon_reduc_minmax<q>")]
+)
+
+(define_insn "reduc_<maxmin_uns>_v2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=w")
+ (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
+ MAXMINV))]
+ "TARGET_SIMD"
+ "<maxmin_uns_op>p\\t%0.2s, %1.2s, %1.2s"
+ [(set_attr "type" "neon_reduc_minmax")]
+)
+
+(define_insn "reduc_<maxmin_uns>_<mode>"
+ [(set (match_operand:V2F 0 "register_operand" "=w")
+ (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
+ FMAXMINV))]
+ "TARGET_SIMD"
+ "<maxmin_uns_op>p\\t%<Vetype>0, %1.<Vtype>"
+ [(set_attr "type" "neon_fp_reduc_minmax_<Vetype><q>")]
+)
+
+(define_insn "reduc_<maxmin_uns>_v4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=w")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
+ FMAXMINV))]
+ "TARGET_SIMD"
+ "<maxmin_uns_op>v\\t%s0, %1.4s"
+ [(set_attr "type" "neon_fp_reduc_minmax_s_q")]
+)
+
+;; aarch64_simd_bsl may compile to any of bsl/bif/bit depending on register
+;; allocation.
+;; Operand 1 is the mask, operands 2 and 3 are the bitfields from which
+;; to select.
+;;
+;; Thus our BSL is of the form:
+;; op0 = bsl (mask, op2, op3)
+;; We can use any of:
+;;
+;; if (op0 = mask)
+;; bsl mask, op1, op2
+;; if (op0 = op1) (so 1-bits in mask choose bits from op2, else op0)
+;; bit op0, op2, mask
+;; if (op0 = op2) (so 0-bits in mask choose bits from op1, else op0)
+;; bif op0, op1, mask
+
+(define_insn "aarch64_simd_bsl<mode>_internal"
+ [(set (match_operand:VALLDIF 0 "register_operand" "=w,w,w")
+ (ior:VALLDIF
+ (and:VALLDIF
+ (match_operand:<V_cmp_result> 1 "register_operand" " 0,w,w")
+ (match_operand:VALLDIF 2 "register_operand" " w,w,0"))
+ (and:VALLDIF
+ (not:<V_cmp_result>
+ (match_dup:<V_cmp_result> 1))
+ (match_operand:VALLDIF 3 "register_operand" " w,0,w"))
+ ))]
+ "TARGET_SIMD"
+ "@
+ bsl\\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
+ bit\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
+ bif\\t%0.<Vbtype>, %3.<Vbtype>, %1.<Vbtype>"
+ [(set_attr "type" "neon_bsl<q>")]
+)
+
+(define_expand "aarch64_simd_bsl<mode>"
+ [(match_operand:VALLDIF 0 "register_operand")
+ (match_operand:<V_cmp_result> 1 "register_operand")
+ (match_operand:VALLDIF 2 "register_operand")
+ (match_operand:VALLDIF 3 "register_operand")]
+ "TARGET_SIMD"
+{
+ /* We can't alias operands together if they have different modes. */
+ operands[1] = gen_lowpart (<V_cmp_result>mode, operands[1]);
+ emit_insn (gen_aarch64_simd_bsl<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+})
+
+(define_expand "aarch64_vcond_internal<mode><mode>"
+ [(set (match_operand:VDQ 0 "register_operand")
+ (if_then_else:VDQ
+ (match_operator 3 "comparison_operator"
+ [(match_operand:VDQ 4 "register_operand")
+ (match_operand:VDQ 5 "nonmemory_operand")])
+ (match_operand:VDQ 1 "nonmemory_operand")
+ (match_operand:VDQ 2 "nonmemory_operand")))]
+ "TARGET_SIMD"
+{
+ int inverse = 0, has_zero_imm_form = 0;
+ rtx op1 = operands[1];
+ rtx op2 = operands[2];
+ rtx mask = gen_reg_rtx (<MODE>mode);
+
+ switch (GET_CODE (operands[3]))
+ {
+ case LE:
+ case LT:
+ case NE:
+ inverse = 1;
+ /* Fall through. */
+ case GE:
+ case GT:
+ case EQ:
+ has_zero_imm_form = 1;
+ break;
+ case LEU:
+ case LTU:
+ inverse = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (!REG_P (operands[5])
+ && (operands[5] != CONST0_RTX (<MODE>mode) || !has_zero_imm_form))
+ operands[5] = force_reg (<MODE>mode, operands[5]);
+
+ switch (GET_CODE (operands[3]))
+ {
+ case LT:
+ case GE:
+ emit_insn (gen_aarch64_cmge<mode> (mask, operands[4], operands[5]));
+ break;
+
+ case LE:
+ case GT:
+ emit_insn (gen_aarch64_cmgt<mode> (mask, operands[4], operands[5]));
+ break;
+
+ case LTU:
+ case GEU:
+ emit_insn (gen_aarch64_cmgeu<mode> (mask, operands[4], operands[5]));
+ break;
+
+ case LEU:
+ case GTU:
+ emit_insn (gen_aarch64_cmgtu<mode> (mask, operands[4], operands[5]));
+ break;
+
+ case NE:
+ case EQ:
+ emit_insn (gen_aarch64_cmeq<mode> (mask, operands[4], operands[5]));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (inverse)
+ {
+ op1 = operands[2];
+ op2 = operands[1];
+ }
+
+ /* If we have (a = (b CMP c) ? -1 : 0);
+ Then we can simply move the generated mask. */
+
+ if (op1 == CONSTM1_RTX (<V_cmp_result>mode)
+ && op2 == CONST0_RTX (<V_cmp_result>mode))
+ emit_move_insn (operands[0], mask);
+ else
+ {
+ if (!REG_P (op1))
+ op1 = force_reg (<MODE>mode, op1);
+ if (!REG_P (op2))
+ op2 = force_reg (<MODE>mode, op2);
+ emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask,
+ op1, op2));
+ }
+
+ DONE;
+})
+
+(define_expand "aarch64_vcond_internal<VDQF_COND:mode><VDQF:mode>"
+ [(set (match_operand:VDQF_COND 0 "register_operand")
+ (if_then_else:VDQF
+ (match_operator 3 "comparison_operator"
+ [(match_operand:VDQF 4 "register_operand")
+ (match_operand:VDQF 5 "nonmemory_operand")])
+ (match_operand:VDQF_COND 1 "nonmemory_operand")
+ (match_operand:VDQF_COND 2 "nonmemory_operand")))]
+ "TARGET_SIMD"
+{
+ int inverse = 0;
+ int use_zero_form = 0;
+ int swap_bsl_operands = 0;
+ rtx op1 = operands[1];
+ rtx op2 = operands[2];
+ rtx mask = gen_reg_rtx (<VDQF_COND:V_cmp_result>mode);
+ rtx tmp = gen_reg_rtx (<VDQF_COND:V_cmp_result>mode);
+
+ rtx (*base_comparison) (rtx, rtx, rtx);
+ rtx (*complimentary_comparison) (rtx, rtx, rtx);
+
+ switch (GET_CODE (operands[3]))
+ {
+ case GE:
+ case GT:
+ case LE:
+ case LT:
+ case EQ:
+ if (operands[5] == CONST0_RTX (<MODE>mode))
+ {
+ use_zero_form = 1;
+ break;
+ }
+ /* Fall through. */
+ default:
+ if (!REG_P (operands[5]))
+ operands[5] = force_reg (<VDQF:MODE>mode, operands[5]);
+ }
+
+ switch (GET_CODE (operands[3]))
+ {
+ case LT:
+ case UNLT:
+ inverse = 1;
+ /* Fall through. */
+ case GE:
+ case UNGE:
+ case ORDERED:
+ case UNORDERED:
+ base_comparison = gen_aarch64_cmge<VDQF:mode>;
+ complimentary_comparison = gen_aarch64_cmgt<VDQF:mode>;
+ break;
+ case LE:
+ case UNLE:
+ inverse = 1;
+ /* Fall through. */
+ case GT:
+ case UNGT:
+ base_comparison = gen_aarch64_cmgt<VDQF:mode>;
+ complimentary_comparison = gen_aarch64_cmge<VDQF:mode>;
+ break;
+ case EQ:
+ case NE:
+ case UNEQ:
+ base_comparison = gen_aarch64_cmeq<VDQF:mode>;
+ complimentary_comparison = gen_aarch64_cmeq<VDQF:mode>;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ switch (GET_CODE (operands[3]))
+ {
+ case LT:
+ case LE:
+ case GT:
+ case GE:
+ case EQ:
+ /* The easy case. Here we emit one of FCMGE, FCMGT or FCMEQ.
+ As a LT b <=> b GE a && a LE b <=> b GT a. Our transformations are:
+ a GE b -> a GE b
+ a GT b -> a GT b
+ a LE b -> b GE a
+ a LT b -> b GT a
+ a EQ b -> a EQ b
+ Note that there also exist direct comparison against 0 forms,
+ so catch those as a special case. */
+ if (use_zero_form)
+ {
+ inverse = 0;
+ switch (GET_CODE (operands[3]))
+ {
+ case LT:
+ base_comparison = gen_aarch64_cmlt<VDQF:mode>;
+ break;
+ case LE:
+ base_comparison = gen_aarch64_cmle<VDQF:mode>;
+ break;
+ default:
+ /* Do nothing, other zero form cases already have the correct
+ base_comparison. */
+ break;
+ }
+ }
+
+ if (!inverse)
+ emit_insn (base_comparison (mask, operands[4], operands[5]));
+ else
+ emit_insn (complimentary_comparison (mask, operands[5], operands[4]));
+ break;
+ case UNLT:
+ case UNLE:
+ case UNGT:
+ case UNGE:
+ case NE:
+ /* FCM returns false for lanes which are unordered, so if we use
+ the inverse of the comparison we actually want to emit, then
+ swap the operands to BSL, we will end up with the correct result.
+ Note that a NE NaN and NaN NE b are true for all a, b.
+
+ Our transformations are:
+ a GE b -> !(b GT a)
+ a GT b -> !(b GE a)
+ a LE b -> !(a GT b)
+ a LT b -> !(a GE b)
+ a NE b -> !(a EQ b) */
+
+ if (inverse)
+ emit_insn (base_comparison (mask, operands[4], operands[5]));
+ else
+ emit_insn (complimentary_comparison (mask, operands[5], operands[4]));
+
+ swap_bsl_operands = 1;
+ break;
+ case UNEQ:
+ /* We check (a > b || b > a). combining these comparisons give us
+ true iff !(a != b && a ORDERED b), swapping the operands to BSL
+ will then give us (a == b || a UNORDERED b) as intended. */
+
+ emit_insn (gen_aarch64_cmgt<VDQF:mode> (mask, operands[4], operands[5]));
+ emit_insn (gen_aarch64_cmgt<VDQF:mode> (tmp, operands[5], operands[4]));
+ emit_insn (gen_ior<VDQF_COND:v_cmp_result>3 (mask, mask, tmp));
+ swap_bsl_operands = 1;
+ break;
+ case UNORDERED:
+ /* Operands are ORDERED iff (a > b || b >= a).
+ Swapping the operands to BSL will give the UNORDERED case. */
+ swap_bsl_operands = 1;
+ /* Fall through. */
+ case ORDERED:
+ emit_insn (gen_aarch64_cmgt<VDQF:mode> (tmp, operands[4], operands[5]));
+ emit_insn (gen_aarch64_cmge<VDQF:mode> (mask, operands[5], operands[4]));
+ emit_insn (gen_ior<VDQF_COND:v_cmp_result>3 (mask, mask, tmp));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ if (swap_bsl_operands)
+ {
+ op1 = operands[2];
+ op2 = operands[1];
+ }
+
+ /* If we have (a = (b CMP c) ? -1 : 0);
+ Then we can simply move the generated mask. */
+
+ if (op1 == CONSTM1_RTX (<VDQF_COND:V_cmp_result>mode)
+ && op2 == CONST0_RTX (<VDQF_COND:V_cmp_result>mode))
+ emit_move_insn (operands[0], mask);
+ else
+ {
+ if (!REG_P (op1))
+ op1 = force_reg (<VDQF_COND:MODE>mode, op1);
+ if (!REG_P (op2))
+ op2 = force_reg (<VDQF_COND:MODE>mode, op2);
+ emit_insn (gen_aarch64_simd_bsl<VDQF_COND:mode> (operands[0], mask,
+ op1, op2));
+ }
+
+ DONE;
+})
+
+(define_expand "vcond<mode><mode>"
+ [(set (match_operand:VALL 0 "register_operand")
+ (if_then_else:VALL
+ (match_operator 3 "comparison_operator"
+ [(match_operand:VALL 4 "register_operand")
+ (match_operand:VALL 5 "nonmemory_operand")])
+ (match_operand:VALL 1 "nonmemory_operand")
+ (match_operand:VALL 2 "nonmemory_operand")))]
+ "TARGET_SIMD"
+{
+ emit_insn (gen_aarch64_vcond_internal<mode><mode> (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4], operands[5]));
+ DONE;
+})
+
+(define_expand "vcond<v_cmp_result><mode>"
+ [(set (match_operand:<V_cmp_result> 0 "register_operand")
+ (if_then_else:<V_cmp_result>
+ (match_operator 3 "comparison_operator"
+ [(match_operand:VDQF 4 "register_operand")
+ (match_operand:VDQF 5 "nonmemory_operand")])
+ (match_operand:<V_cmp_result> 1 "nonmemory_operand")
+ (match_operand:<V_cmp_result> 2 "nonmemory_operand")))]
+ "TARGET_SIMD"
+{
+ emit_insn (gen_aarch64_vcond_internal<v_cmp_result><mode> (
+ operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4], operands[5]));
+ DONE;
+})
+
+(define_expand "vcondu<mode><mode>"
+ [(set (match_operand:VDQ 0 "register_operand")
+ (if_then_else:VDQ
+ (match_operator 3 "comparison_operator"
+ [(match_operand:VDQ 4 "register_operand")
+ (match_operand:VDQ 5 "nonmemory_operand")])
+ (match_operand:VDQ 1 "nonmemory_operand")
+ (match_operand:VDQ 2 "nonmemory_operand")))]
+ "TARGET_SIMD"
+{
+ emit_insn (gen_aarch64_vcond_internal<mode><mode> (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4], operands[5]));
+ DONE;
+})
+
+;; Patterns for AArch64 SIMD Intrinsics.
+
+(define_expand "aarch64_create<mode>"
+ [(match_operand:VD_RE 0 "register_operand" "")
+ (match_operand:DI 1 "general_operand" "")]
+ "TARGET_SIMD"
+{
+ rtx src = gen_lowpart (<MODE>mode, operands[1]);
+ emit_move_insn (operands[0], src);
+ DONE;
+})
+
+;; Lane extraction with sign extension to general purpose register.
+(define_insn "*aarch64_get_lane_extend<GPI:mode><VDQQH:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (sign_extend:GPI
+ (vec_select:<VEL>
+ (match_operand:VDQQH 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2])));
+ return "smov\\t%<GPI:w>0, %1.<VDQQH:Vetype>[%2]";
+ }
+ [(set_attr "type" "neon_to_gp<q>")]
+)
+
+(define_insn "*aarch64_get_lane_zero_extendsi<mode>"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI
+ (vec_select:<VEL>
+ (match_operand:VDQQH 1 "register_operand" "w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2])));
+ return "umov\\t%w0, %1.<Vetype>[%2]";
+ }
+ [(set_attr "type" "neon_to_gp<q>")]
+)
+
+(define_expand "aarch64_be_checked_get_lane<mode>"
+ [(match_operand:<VEL> 0 "aarch64_simd_nonimmediate_operand")
+ (match_operand:VALL 1 "register_operand")
+ (match_operand:SI 2 "immediate_operand")]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2])));
+ emit_insn (gen_aarch64_get_lane<mode> (operands[0],
+ operands[1],
+ operands[2]));
+ DONE;
+ }
+)
+
+;; Lane extraction of a value, neither sign nor zero extension
+;; is guaranteed so upper bits should be considered undefined.
+(define_insn "aarch64_get_lane<mode>"
+ [(set (match_operand:<VEL> 0 "aarch64_simd_nonimmediate_operand" "=r, w, Utv")
+ (vec_select:<VEL>
+ (match_operand:VALL 1 "register_operand" "w, w, w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i, i, i")])))]
+ "TARGET_SIMD"
+ {
+ operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2])));
+ switch (which_alternative)
+ {
+ case 0:
+ return "umov\\t%<vwcore>0, %1.<Vetype>[%2]";
+ case 1:
+ return "dup\\t%<Vetype>0, %1.<Vetype>[%2]";
+ case 2:
+ return "st1\\t{%1.<Vetype>}[%2], %0";
+ default:
+ gcc_unreachable ();
+ }
+ }
+ [(set_attr "type" "neon_to_gp<q>, neon_dup<q>, neon_store1_one_lane<q>")]
+)
+
+(define_expand "aarch64_get_lanedi"
+ [(match_operand:DI 0 "register_operand")
+ (match_operand:DI 1 "register_operand")
+ (match_operand:SI 2 "immediate_operand")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_lane_bounds (operands[2], 0, 1);
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv8qi<mode>"
+ [(match_operand:V8QI 0 "register_operand" "")
+ (match_operand:VDC 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv4hi<mode>"
+ [(match_operand:V4HI 0 "register_operand" "")
+ (match_operand:VDC 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv2si<mode>"
+ [(match_operand:V2SI 0 "register_operand" "")
+ (match_operand:VDC 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv2sf<mode>"
+ [(match_operand:V2SF 0 "register_operand" "")
+ (match_operand:VDC 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretdi<mode>"
+ [(match_operand:DI 0 "register_operand" "")
+ (match_operand:VD_RE 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv16qi<mode>"
+ [(match_operand:V16QI 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv8hi<mode>"
+ [(match_operand:V8HI 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv4si<mode>"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv4sf<mode>"
+ [(match_operand:V4SF 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv2di<mode>"
+ [(match_operand:V2DI 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_reinterpretv2df<mode>"
+ [(match_operand:V2DF 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" "")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_reinterpret (operands[0], operands[1]);
+ DONE;
+})
+
+;; In this insn, operand 1 should be low, and operand 2 the high part of the
+;; dest vector.
+
+(define_insn "*aarch64_combinez<mode>"
+ [(set (match_operand:<VDBL> 0 "register_operand" "=&w")
+ (vec_concat:<VDBL>
+ (match_operand:VDIC 1 "register_operand" "w")
+ (match_operand:VDIC 2 "aarch64_simd_imm_zero" "Dz")))]
+ "TARGET_SIMD"
+ "mov\\t%0.8b, %1.8b"
+ [(set_attr "type" "neon_move<q>")]
+)
+
+(define_insn_and_split "aarch64_combine<mode>"
+ [(set (match_operand:<VDBL> 0 "register_operand" "=&w")
+ (vec_concat:<VDBL> (match_operand:VDC 1 "register_operand" "w")
+ (match_operand:VDC 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ aarch64_split_simd_combine (operands[0], operands[1], operands[2]);
+ DONE;
+}
+[(set_attr "type" "multiple")]
+)
+
+(define_expand "aarch64_simd_combine<mode>"
+ [(set (match_operand:<VDBL> 0 "register_operand" "=&w")
+ (vec_concat:<VDBL> (match_operand:VDC 1 "register_operand" "w")
+ (match_operand:VDC 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ {
+ emit_insn (gen_move_lo_quad_<Vdbl> (operands[0], operands[1]));
+ emit_insn (gen_move_hi_quad_<Vdbl> (operands[0], operands[2]));
+ DONE;
+ }
+[(set_attr "type" "multiple")]
+)
+
+;; <su><addsub>l<q>.
+
+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>_hi_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 2 "register_operand" "w")
+ (match_dup 3)))))]
+ "TARGET_SIMD"
+ "<ANY_EXTEND:su><ADDSUB:optab>l2\t%0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_<ADDSUB:optab>_long")]
+)
+
+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>_lo_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 3 "vect_par_cnst_lo_half" "")))
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQW 2 "register_operand" "w")
+ (match_dup 3)))))]
+ "TARGET_SIMD"
+ "<ANY_EXTEND:su><ADDSUB:optab>l\t%0.<Vwtype>, %1.<Vhalftype>, %2.<Vhalftype>"
+ [(set_attr "type" "neon_<ADDSUB:optab>_long")]
+)
+
+
+(define_expand "aarch64_saddl2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_saddl<mode>_hi_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+(define_expand "aarch64_uaddl2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_uaddl<mode>_hi_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+(define_expand "aarch64_ssubl2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_ssubl<mode>_hi_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+(define_expand "aarch64_usubl2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQW 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_usubl<mode>_hi_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE>
+ (match_operand:VDW 1 "register_operand" "w"))
+ (ANY_EXTEND:<VWIDE>
+ (match_operand:VDW 2 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "<ANY_EXTEND:su><ADDSUB:optab>l %0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_<ADDSUB:optab>_long")]
+)
+
+;; <su><addsub>w<q>.
+
+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>w<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ADDSUB:<VWIDE> (match_operand:<VWIDE> 1 "register_operand" "w")
+ (ANY_EXTEND:<VWIDE>
+ (match_operand:VDW 2 "register_operand" "w"))))]
+ "TARGET_SIMD"
+ "<ANY_EXTEND:su><ADDSUB:optab>w\\t%0.<Vwtype>, %1.<Vwtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_<ADDSUB:optab>_widen")]
+)
+
+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>w2<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ADDSUB:<VWIDE> (match_operand:<VWIDE> 1 "register_operand" "w")
+ (ANY_EXTEND:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQW 2 "register_operand" "w")
+ (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))))]
+ "TARGET_SIMD"
+ "<ANY_EXTEND:su><ADDSUB:optab>w2\\t%0.<Vwtype>, %1.<Vwtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_<ADDSUB:optab>_widen")]
+)
+
+(define_expand "aarch64_saddw2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_saddw2<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+(define_expand "aarch64_uaddw2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_uaddw2<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+
+(define_expand "aarch64_ssubw2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_ssubw2<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+(define_expand "aarch64_usubw2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQW 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_usubw2<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+;; <su><r>h<addsub>.
+
+(define_insn "aarch64_<sur>h<addsub><mode>"
+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
+ (unspec:VQ_S [(match_operand:VQ_S 1 "register_operand" "w")
+ (match_operand:VQ_S 2 "register_operand" "w")]
+ HADDSUB))]
+ "TARGET_SIMD"
+ "<sur>h<addsub>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_<addsub>_halve<q>")]
+)
+
+;; <r><addsub>hn<q>.
+
+(define_insn "aarch64_<sur><addsub>hn<mode>"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+ (unspec:<VNARROWQ> [(match_operand:VQN 1 "register_operand" "w")
+ (match_operand:VQN 2 "register_operand" "w")]
+ ADDSUBHN))]
+ "TARGET_SIMD"
+ "<sur><addsub>hn\\t%0.<Vntype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_<addsub>_halve_narrow_q")]
+)
+
+(define_insn "aarch64_<sur><addsub>hn2<mode>"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (unspec:<VNARROWQ2> [(match_operand:<VNARROWQ> 1 "register_operand" "0")
+ (match_operand:VQN 2 "register_operand" "w")
+ (match_operand:VQN 3 "register_operand" "w")]
+ ADDSUBHN2))]
+ "TARGET_SIMD"
+ "<sur><addsub>hn2\\t%0.<V2ntype>, %2.<Vtype>, %3.<Vtype>"
+ [(set_attr "type" "neon_<addsub>_halve_narrow_q")]
+)
+
+;; pmul.
+
+(define_insn "aarch64_pmul<mode>"
+ [(set (match_operand:VB 0 "register_operand" "=w")
+ (unspec:VB [(match_operand:VB 1 "register_operand" "w")
+ (match_operand:VB 2 "register_operand" "w")]
+ UNSPEC_PMUL))]
+ "TARGET_SIMD"
+ "pmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_mul_<Vetype><q>")]
+)
+
+;; <su>q<addsub>
+
+(define_insn "aarch64_<su_optab><optab><mode>"
+ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
+ (BINQOPS:VSDQ_I (match_operand:VSDQ_I 1 "register_operand" "w")
+ (match_operand:VSDQ_I 2 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "<su_optab><optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "type" "neon_<optab><q>")]
+)
+
+;; suqadd and usqadd
+
+(define_insn "aarch64_<sur>qadd<mode>"
+ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
+ (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "0")
+ (match_operand:VSDQ_I 2 "register_operand" "w")]
+ USSUQADD))]
+ "TARGET_SIMD"
+ "<sur>qadd\\t%<v>0<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "type" "neon_qadd<q>")]
+)
+
+;; sqmovun
+
+(define_insn "aarch64_sqmovun<mode>"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+ (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")]
+ UNSPEC_SQXTUN))]
+ "TARGET_SIMD"
+ "sqxtun\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>"
+ [(set_attr "type" "neon_sat_shift_imm_narrow_q")]
+ )
+
+;; sqmovn and uqmovn
+
+(define_insn "aarch64_<sur>qmovn<mode>"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+ (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")]
+ SUQMOVN))]
+ "TARGET_SIMD"
+ "<sur>qxtn\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>"
+ [(set_attr "type" "neon_sat_shift_imm_narrow_q")]
+ )
+
+;; <su>q<absneg>
+
+(define_insn "aarch64_s<optab><mode>"
+ [(set (match_operand:VSDQ_I_BHSI 0 "register_operand" "=w")
+ (UNQOPS:VSDQ_I_BHSI
+ (match_operand:VSDQ_I_BHSI 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "s<optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>"
+ [(set_attr "type" "neon_<optab><q>")]
+)
+
+;; sq<r>dmulh.
+
+(define_insn "aarch64_sq<r>dmulh<mode>"
+ [(set (match_operand:VSDQ_HSI 0 "register_operand" "=w")
+ (unspec:VSDQ_HSI
+ [(match_operand:VSDQ_HSI 1 "register_operand" "w")
+ (match_operand:VSDQ_HSI 2 "register_operand" "w")]
+ VQDMULH))]
+ "TARGET_SIMD"
+ "sq<r>dmulh\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "type" "neon_sat_mul_<Vetype><q>")]
+)
+
+;; sq<r>dmulh_lane
+
+(define_insn "aarch64_sq<r>dmulh_lane<mode>"
+ [(set (match_operand:VDQHS 0 "register_operand" "=w")
+ (unspec:VDQHS
+ [(match_operand:VDQHS 1 "register_operand" "w")
+ (vec_select:<VEL>
+ (match_operand:<VCOND> 2 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
+ VQDMULH))]
+ "TARGET_SIMD"
+ "*
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCOND>mode));
+ operands[3] = GEN_INT (ENDIAN_LANE_N (<VCOND>mode, INTVAL (operands[3])));
+ return \"sq<r>dmulh\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]\";"
+ [(set_attr "type" "neon_sat_mul_<Vetype>_scalar<q>")]
+)
+
+(define_insn "aarch64_sq<r>dmulh_laneq<mode>"
+ [(set (match_operand:VDQHS 0 "register_operand" "=w")
+ (unspec:VDQHS
+ [(match_operand:VDQHS 1 "register_operand" "w")
+ (vec_select:<VEL>
+ (match_operand:<VCONQ> 2 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
+ VQDMULH))]
+ "TARGET_SIMD"
+ "*
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode));
+ operands[3] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[3])));
+ return \"sq<r>dmulh\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]\";"
+ [(set_attr "type" "neon_sat_mul_<Vetype>_scalar<q>")]
+)
+
+(define_insn "aarch64_sq<r>dmulh_lane<mode>"
+ [(set (match_operand:SD_HSI 0 "register_operand" "=w")
+ (unspec:SD_HSI
+ [(match_operand:SD_HSI 1 "register_operand" "w")
+ (vec_select:<VEL>
+ (match_operand:<VCONQ> 2 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
+ VQDMULH))]
+ "TARGET_SIMD"
+ "*
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode));
+ operands[3] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[3])));
+ return \"sq<r>dmulh\\t%<v>0, %<v>1, %2.<v>[%3]\";"
+ [(set_attr "type" "neon_sat_mul_<Vetype>_scalar<q>")]
+)
+
+;; vqdml[sa]l
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (SBINQOPS:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:VSD_HSI 2 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (match_operand:VSD_HSI 3 "register_operand" "w")))
+ (const_int 1))))]
+ "TARGET_SIMD"
+ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %<v>3<Vmtype>"
+ [(set_attr "type" "neon_sat_mla_<Vetype>_long")]
+)
+
+;; vqdml[sa]l_lane
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (SBINQOPS:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:VD_HSI 2 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:VD_HSI
+ (vec_select:<VEL>
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 4 "immediate_operand" "i")])))
+ ))
+ (const_int 1))))]
+ "TARGET_SIMD"
+ {
+ operands[4] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[4])));
+ return
+ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]";
+ }
+ [(set_attr "type" "neon_sat_mla_<Vetype>_scalar_long")]
+)
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (SBINQOPS:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:SD_HSI 2 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (vec_select:<VEL>
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 4 "immediate_operand" "i")])))
+ )
+ (const_int 1))))]
+ "TARGET_SIMD"
+ {
+ operands[4] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[4])));
+ return
+ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]";
+ }
+ [(set_attr "type" "neon_sat_mla_<Vetype>_scalar_long")]
+)
+
+(define_expand "aarch64_sqdmlal_lane<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (match_operand:VSD_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
+ emit_insn (gen_aarch64_sqdmlal_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlal_laneq<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (match_operand:VSD_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode));
+ emit_insn (gen_aarch64_sqdmlal_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlsl_lane<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (match_operand:VSD_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
+ emit_insn (gen_aarch64_sqdmlsl_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlsl_laneq<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (match_operand:VSD_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode));
+ emit_insn (gen_aarch64_sqdmlsl_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4]));
+ DONE;
+})
+
+;; vqdml[sa]l_n
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l_n<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (SBINQOPS:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:VD_HSI 2 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:VD_HSI
+ (match_operand:<VEL> 3 "register_operand" "<vwx>"))))
+ (const_int 1))))]
+ "TARGET_SIMD"
+ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[0]"
+ [(set_attr "type" "neon_sat_mla_<Vetype>_scalar_long")]
+)
+
+;; sqdml[as]l2
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l2<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (SBINQOPS:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 3 "register_operand" "w")
+ (match_dup 4))))
+ (const_int 1))))]
+ "TARGET_SIMD"
+ "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %<v>3<Vmtype>"
+ [(set_attr "type" "neon_sat_mla_<Vetype>_scalar_long")]
+)
+
+(define_expand "aarch64_sqdmlal2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:VQ_HSI 3 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_sqdmlal2<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3], p));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlsl2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:VQ_HSI 3 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_sqdmlsl2<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3], p));
+ DONE;
+})
+
+;; vqdml[sa]l2_lane
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l2_lane<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (SBINQOPS:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:VQ_HSI 5 "vect_par_cnst_hi_half" "")))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:<VHALF>
+ (vec_select:<VEL>
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 4 "immediate_operand" "i")])
+ ))))
+ (const_int 1))))]
+ "TARGET_SIMD"
+ {
+ operands[4] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[4])));
+ return
+ "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]";
+ }
+ [(set_attr "type" "neon_sat_mla_<Vetype>_scalar_long")]
+)
+
+(define_expand "aarch64_sqdmlal2_lane<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
+ emit_insn (gen_aarch64_sqdmlal2_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4], p));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlal2_laneq<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode));
+ emit_insn (gen_aarch64_sqdmlal2_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4], p));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlsl2_lane<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
+ emit_insn (gen_aarch64_sqdmlsl2_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4], p));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlsl2_laneq<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
+ (match_operand:SI 4 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode));
+ emit_insn (gen_aarch64_sqdmlsl2_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ operands[4], p));
+ DONE;
+})
+
+(define_insn "aarch64_sqdml<SBINQOPS:as>l2_n<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (SBINQOPS:<VWIDE>
+ (match_operand:<VWIDE> 1 "register_operand" "0")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:<VHALF>
+ (match_operand:<VEL> 3 "register_operand" "<vwx>"))))
+ (const_int 1))))]
+ "TARGET_SIMD"
+ "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[0]"
+ [(set_attr "type" "neon_sat_mla_<Vetype>_scalar_long")]
+)
+
+(define_expand "aarch64_sqdmlal2_n<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:<VEL> 3 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_sqdmlal2_n<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ p));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmlsl2_n<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:<VWIDE> 1 "register_operand" "w")
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:<VEL> 3 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_sqdmlsl2_n<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ p));
+ DONE;
+})
+
+;; vqdmull
+
+(define_insn "aarch64_sqdmull<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:VSD_HSI 1 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (match_operand:VSD_HSI 2 "register_operand" "w")))
+ (const_int 1)))]
+ "TARGET_SIMD"
+ "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "type" "neon_sat_mul_<Vetype>_long")]
+)
+
+;; vqdmull_lane
+
+(define_insn "aarch64_sqdmull_lane<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:VD_HSI 1 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:VD_HSI
+ (vec_select:<VEL>
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
+ ))
+ (const_int 1)))]
+ "TARGET_SIMD"
+ {
+ operands[3] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[3])));
+ return "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]";
+ }
+ [(set_attr "type" "neon_sat_mul_<Vetype>_scalar_long")]
+)
+
+(define_insn "aarch64_sqdmull_lane<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:SD_HSI 1 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (vec_select:<VEL>
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))
+ ))
+ (const_int 1)))]
+ "TARGET_SIMD"
+ {
+ operands[3] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[3])));
+ return "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]";
+ }
+ [(set_attr "type" "neon_sat_mul_<Vetype>_scalar_long")]
+)
+
+(define_expand "aarch64_sqdmull_lane<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VSD_HSI 1 "register_operand" "w")
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
+ emit_insn (gen_aarch64_sqdmull_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmull_laneq<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VD_HSI 1 "register_operand" "w")
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode));
+ emit_insn (gen_aarch64_sqdmull_lane<mode>_internal
+ (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+})
+
+;; vqdmull_n
+
+(define_insn "aarch64_sqdmull_n<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (match_operand:VD_HSI 1 "register_operand" "w"))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:VD_HSI
+ (match_operand:<VEL> 2 "register_operand" "<vwx>")))
+ )
+ (const_int 1)))]
+ "TARGET_SIMD"
+ "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[0]"
+ [(set_attr "type" "neon_sat_mul_<Vetype>_scalar_long")]
+)
+
+;; vqdmull2
+
+
+
+(define_insn "aarch64_sqdmull2<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 1 "register_operand" "w")
+ (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_dup 3)))
+ )
+ (const_int 1)))]
+ "TARGET_SIMD"
+ "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "type" "neon_sat_mul_<Vetype>_scalar_long")]
+)
+
+(define_expand "aarch64_sqdmull2<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQ_HSI 1 "register_operand" "w")
+ (match_operand:<VCON> 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_sqdmull2<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+;; vqdmull2_lane
+
+(define_insn "aarch64_sqdmull2_lane<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 1 "register_operand" "w")
+ (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:<VHALF>
+ (vec_select:<VEL>
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
+ ))
+ (const_int 1)))]
+ "TARGET_SIMD"
+ {
+ operands[3] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[3])));
+ return "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]";
+ }
+ [(set_attr "type" "neon_sat_mul_<Vetype>_scalar_long")]
+)
+
+(define_expand "aarch64_sqdmull2_lane<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQ_HSI 1 "register_operand" "w")
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
+ emit_insn (gen_aarch64_sqdmull2_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ p));
+ DONE;
+})
+
+(define_expand "aarch64_sqdmull2_laneq<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQ_HSI 1 "register_operand" "w")
+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode));
+ emit_insn (gen_aarch64_sqdmull2_lane<mode>_internal (operands[0], operands[1],
+ operands[2], operands[3],
+ p));
+ DONE;
+})
+
+;; vqdmull2_n
+
+(define_insn "aarch64_sqdmull2_n<mode>_internal"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (ss_ashift:<VWIDE>
+ (mult:<VWIDE>
+ (sign_extend:<VWIDE>
+ (vec_select:<VHALF>
+ (match_operand:VQ_HSI 1 "register_operand" "w")
+ (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
+ (sign_extend:<VWIDE>
+ (vec_duplicate:<VHALF>
+ (match_operand:<VEL> 2 "register_operand" "<vwx>")))
+ )
+ (const_int 1)))]
+ "TARGET_SIMD"
+ "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[0]"
+ [(set_attr "type" "neon_sat_mul_<Vetype>_scalar_long")]
+)
+
+(define_expand "aarch64_sqdmull2_n<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
+ (match_operand:VQ_HSI 1 "register_operand" "w")
+ (match_operand:<VEL> 2 "register_operand" "w")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ emit_insn (gen_aarch64_sqdmull2_n<mode>_internal (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+})
+
+;; vshl
+
+(define_insn "aarch64_<sur>shl<mode>"
+ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+ (unspec:VSDQ_I_DI
+ [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
+ (match_operand:VSDQ_I_DI 2 "register_operand" "w")]
+ VSHL))]
+ "TARGET_SIMD"
+ "<sur>shl\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>";
+ [(set_attr "type" "neon_shift_reg<q>")]
+)
+
+
+;; vqshl
+
+(define_insn "aarch64_<sur>q<r>shl<mode>"
+ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
+ (unspec:VSDQ_I
+ [(match_operand:VSDQ_I 1 "register_operand" "w")
+ (match_operand:VSDQ_I 2 "register_operand" "w")]
+ VQSHL))]
+ "TARGET_SIMD"
+ "<sur>q<r>shl\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>";
+ [(set_attr "type" "neon_sat_shift_reg<q>")]
+)
+
+;; vshll_n
+
+(define_insn "aarch64_<sur>shll_n<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (unspec:<VWIDE> [(match_operand:VDW 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VSHLL))]
+ "TARGET_SIMD"
+ "*
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
+ if (INTVAL (operands[2]) == bit_width)
+ {
+ return \"shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
+ }
+ else {
+ return \"<sur>shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
+ }"
+ [(set_attr "type" "neon_shift_imm_long")]
+)
+
+;; vshll_high_n
+
+(define_insn "aarch64_<sur>shll2_n<mode>"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (unspec:<VWIDE> [(match_operand:VQW 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VSHLL))]
+ "TARGET_SIMD"
+ "*
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
+ if (INTVAL (operands[2]) == bit_width)
+ {
+ return \"shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
+ }
+ else {
+ return \"<sur>shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
+ }"
+ [(set_attr "type" "neon_shift_imm_long")]
+)
+
+;; vrshr_n
+
+(define_insn "aarch64_<sur>shr_n<mode>"
+ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+ (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VRSHR_N))]
+ "TARGET_SIMD"
+ "*
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
+ return \"<sur>shr\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
+ [(set_attr "type" "neon_sat_shift_imm<q>")]
+)
+
+;; v(r)sra_n
+
+(define_insn "aarch64_<sur>sra_n<mode>"
+ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+ (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
+ (match_operand:VSDQ_I_DI 2 "register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ VSRA))]
+ "TARGET_SIMD"
+ "*
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ aarch64_simd_const_bounds (operands[3], 1, bit_width + 1);
+ return \"<sur>sra\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
+ [(set_attr "type" "neon_shift_acc<q>")]
+)
+
+;; vs<lr>i_n
+
+(define_insn "aarch64_<sur>s<lr>i_n<mode>"
+ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
+ (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
+ (match_operand:VSDQ_I_DI 2 "register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ VSLRI))]
+ "TARGET_SIMD"
+ "*
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ aarch64_simd_const_bounds (operands[3], 1 - <VSLRI:offsetlr>,
+ bit_width - <VSLRI:offsetlr> + 1);
+ return \"s<lr>i\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
+ [(set_attr "type" "neon_shift_imm<q>")]
+)
+
+;; vqshl(u)
+
+(define_insn "aarch64_<sur>qshl<u>_n<mode>"
+ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
+ (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VQSHL_N))]
+ "TARGET_SIMD"
+ "*
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ aarch64_simd_const_bounds (operands[2], 0, bit_width);
+ return \"<sur>qshl<u>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
+ [(set_attr "type" "neon_sat_shift_imm<q>")]
+)
+
+
+;; vq(r)shr(u)n_n
+
+(define_insn "aarch64_<sur>q<r>shr<u>n_n<mode>"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
+ (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VQSHRN_N))]
+ "TARGET_SIMD"
+ "*
+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
+ aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
+ return \"<sur>q<r>shr<u>n\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>, %2\";"
+ [(set_attr "type" "neon_sat_shift_imm_narrow_q")]
+)
+
+
+;; cm(eq|ge|gt|lt|le)
+;; Note, we have constraints for Dz and Z as different expanders
+;; have different ideas of what should be passed to this pattern.
+
+(define_insn "aarch64_cm<optab><mode>"
+ [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
+ (neg:<V_cmp_result>
+ (COMPARISONS:<V_cmp_result>
+ (match_operand:VDQ 1 "register_operand" "w,w")
+ (match_operand:VDQ 2 "aarch64_simd_reg_or_zero" "w,ZDz")
+ )))]
+ "TARGET_SIMD"
+ "@
+ cm<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype>
+ cm<optab>\t%<v>0<Vmtype>, %<v>1<Vmtype>, #0"
+ [(set_attr "type" "neon_compare<q>, neon_compare_zero<q>")]
+)
+
+(define_insn_and_split "aarch64_cm<optab>di"
+ [(set (match_operand:DI 0 "register_operand" "=w,w,r")
+ (neg:DI
+ (COMPARISONS:DI
+ (match_operand:DI 1 "register_operand" "w,w,r")
+ (match_operand:DI 2 "aarch64_simd_reg_or_zero" "w,ZDz,r")
+ )))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_SIMD"
+ "@
+ cm<n_optab>\t%d0, %d<cmp_1>, %d<cmp_2>
+ cm<optab>\t%d0, %d1, #0
+ #"
+ "reload_completed
+ /* We need to prevent the split from
+ happening in the 'w' constraint cases. */
+ && GP_REGNUM_P (REGNO (operands[0]))
+ && GP_REGNUM_P (REGNO (operands[1]))"
+ [(const_int 0)]
+ {
+ enum machine_mode mode = SELECT_CC_MODE (<CMP>, operands[1], operands[2]);
+ rtx cc_reg = aarch64_gen_compare_reg (<CMP>, operands[1], operands[2]);
+ rtx comparison = gen_rtx_<CMP> (mode, operands[1], operands[2]);
+ emit_insn (gen_cstoredi_neg (operands[0], comparison, cc_reg));
+ DONE;
+ }
+ [(set_attr "type" "neon_compare, neon_compare_zero, multiple")]
+)
+
+;; cm(hs|hi)
+
+(define_insn "aarch64_cm<optab><mode>"
+ [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
+ (neg:<V_cmp_result>
+ (UCOMPARISONS:<V_cmp_result>
+ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w")
+ )))]
+ "TARGET_SIMD"
+ "cm<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype>"
+ [(set_attr "type" "neon_compare<q>")]
+)
+
+(define_insn_and_split "aarch64_cm<optab>di"
+ [(set (match_operand:DI 0 "register_operand" "=w,r")
+ (neg:DI
+ (UCOMPARISONS:DI
+ (match_operand:DI 1 "register_operand" "w,r")
+ (match_operand:DI 2 "aarch64_simd_reg_or_zero" "w,r")
+ )))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_SIMD"
+ "@
+ cm<n_optab>\t%d0, %d<cmp_1>, %d<cmp_2>
+ #"
+ "reload_completed
+ /* We need to prevent the split from
+ happening in the 'w' constraint cases. */
+ && GP_REGNUM_P (REGNO (operands[0]))
+ && GP_REGNUM_P (REGNO (operands[1]))"
+ [(const_int 0)]
+ {
+ enum machine_mode mode = CCmode;
+ rtx cc_reg = aarch64_gen_compare_reg (<CMP>, operands[1], operands[2]);
+ rtx comparison = gen_rtx_<CMP> (mode, operands[1], operands[2]);
+ emit_insn (gen_cstoredi_neg (operands[0], comparison, cc_reg));
+ DONE;
+ }
+ [(set_attr "type" "neon_compare, neon_compare_zero")]
+)
+
+;; cmtst
+
+(define_insn "aarch64_cmtst<mode>"
+ [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
+ (neg:<V_cmp_result>
+ (ne:<V_cmp_result>
+ (and:VDQ
+ (match_operand:VDQ 1 "register_operand" "w")
+ (match_operand:VDQ 2 "register_operand" "w"))
+ (vec_duplicate:<V_cmp_result> (const_int 0)))))]
+ "TARGET_SIMD"
+ "cmtst\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "type" "neon_tst<q>")]
+)
+
+(define_insn_and_split "aarch64_cmtstdi"
+ [(set (match_operand:DI 0 "register_operand" "=w,r")
+ (neg:DI
+ (ne:DI
+ (and:DI
+ (match_operand:DI 1 "register_operand" "w,r")
+ (match_operand:DI 2 "register_operand" "w,r"))
+ (const_int 0))))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_SIMD"
+ "@
+ cmtst\t%d0, %d1, %d2
+ #"
+ "reload_completed
+ /* We need to prevent the split from
+ happening in the 'w' constraint cases. */
+ && GP_REGNUM_P (REGNO (operands[0]))
+ && GP_REGNUM_P (REGNO (operands[1]))"
+ [(const_int 0)]
+ {
+ rtx and_tree = gen_rtx_AND (DImode, operands[1], operands[2]);
+ enum machine_mode mode = SELECT_CC_MODE (NE, and_tree, const0_rtx);
+ rtx cc_reg = aarch64_gen_compare_reg (NE, and_tree, const0_rtx);
+ rtx comparison = gen_rtx_NE (mode, and_tree, const0_rtx);
+ emit_insn (gen_cstoredi_neg (operands[0], comparison, cc_reg));
+ DONE;
+ }
+ [(set_attr "type" "neon_tst")]
+)
+
+;; fcm(eq|ge|gt|le|lt)
+
+(define_insn "aarch64_cm<optab><mode>"
+ [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
+ (neg:<V_cmp_result>
+ (COMPARISONS:<V_cmp_result>
+ (match_operand:VALLF 1 "register_operand" "w,w")
+ (match_operand:VALLF 2 "aarch64_simd_reg_or_zero" "w,YDz")
+ )))]
+ "TARGET_SIMD"
+ "@
+ fcm<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype>
+ fcm<optab>\t%<v>0<Vmtype>, %<v>1<Vmtype>, 0"
+ [(set_attr "type" "neon_fp_compare_<Vetype><q>")]
+)
+
+;; fac(ge|gt)
+;; Note we can also handle what would be fac(le|lt) by
+;; generating fac(ge|gt).
+
+(define_insn "*aarch64_fac<optab><mode>"
+ [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
+ (neg:<V_cmp_result>
+ (FAC_COMPARISONS:<V_cmp_result>
+ (abs:VALLF (match_operand:VALLF 1 "register_operand" "w"))
+ (abs:VALLF (match_operand:VALLF 2 "register_operand" "w"))
+ )))]
+ "TARGET_SIMD"
+ "fac<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype>"
+ [(set_attr "type" "neon_fp_compare_<Vetype><q>")]
+)
+
+;; addp
+
+(define_insn "aarch64_addp<mode>"
+ [(set (match_operand:VD_BHSI 0 "register_operand" "=w")
+ (unspec:VD_BHSI
+ [(match_operand:VD_BHSI 1 "register_operand" "w")
+ (match_operand:VD_BHSI 2 "register_operand" "w")]
+ UNSPEC_ADDP))]
+ "TARGET_SIMD"
+ "addp\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "type" "neon_reduc_add<q>")]
+)
+
+(define_insn "aarch64_addpdi"
+ [(set (match_operand:DI 0 "register_operand" "=w")
+ (unspec:DI
+ [(match_operand:V2DI 1 "register_operand" "w")]
+ UNSPEC_ADDP))]
+ "TARGET_SIMD"
+ "addp\t%d0, %1.2d"
+ [(set_attr "type" "neon_reduc_add")]
+)
+
+;; sqrt
+
+(define_insn "sqrt<mode>2"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (sqrt:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fsqrt\\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "type" "neon_fp_sqrt_<Vetype><q>")]
+)
+
+;; Patterns for vector struct loads and stores.
+
+(define_insn "vec_load_lanesoi<mode>"
+ [(set (match_operand:OI 0 "register_operand" "=w")
+ (unspec:OI [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_LD2))]
+ "TARGET_SIMD"
+ "ld2\\t{%S0.<Vtype> - %T0.<Vtype>}, %1"
+ [(set_attr "type" "neon_load2_2reg<q>")]
+)
+
+(define_insn "vec_store_lanesoi<mode>"
+ [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:OI [(match_operand:OI 1 "register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST2))]
+ "TARGET_SIMD"
+ "st2\\t{%S1.<Vtype> - %T1.<Vtype>}, %0"
+ [(set_attr "type" "neon_store2_2reg<q>")]
+)
+
+(define_insn "vec_load_lanesci<mode>"
+ [(set (match_operand:CI 0 "register_operand" "=w")
+ (unspec:CI [(match_operand:CI 1 "aarch64_simd_struct_operand" "Utv")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_LD3))]
+ "TARGET_SIMD"
+ "ld3\\t{%S0.<Vtype> - %U0.<Vtype>}, %1"
+ [(set_attr "type" "neon_load3_3reg<q>")]
+)
+
+(define_insn "vec_store_lanesci<mode>"
+ [(set (match_operand:CI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:CI [(match_operand:CI 1 "register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST3))]
+ "TARGET_SIMD"
+ "st3\\t{%S1.<Vtype> - %U1.<Vtype>}, %0"
+ [(set_attr "type" "neon_store3_3reg<q>")]
+)
+
+(define_insn "vec_load_lanesxi<mode>"
+ [(set (match_operand:XI 0 "register_operand" "=w")
+ (unspec:XI [(match_operand:XI 1 "aarch64_simd_struct_operand" "Utv")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_LD4))]
+ "TARGET_SIMD"
+ "ld4\\t{%S0.<Vtype> - %V0.<Vtype>}, %1"
+ [(set_attr "type" "neon_load4_4reg<q>")]
+)
+
+(define_insn "vec_store_lanesxi<mode>"
+ [(set (match_operand:XI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:XI [(match_operand:XI 1 "register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST4))]
+ "TARGET_SIMD"
+ "st4\\t{%S1.<Vtype> - %V1.<Vtype>}, %0"
+ [(set_attr "type" "neon_store4_4reg<q>")]
+)
+
+;; Reload patterns for AdvSIMD register list operands.
+
+(define_expand "mov<mode>"
+ [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "")
+ (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" ""))]
+ "TARGET_SIMD"
+{
+ if (can_create_pseudo_p ())
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+ }
+})
+
+(define_insn "*aarch64_mov<mode>"
+ [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "=w,Utv,w")
+ (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" " w,w,Utv"))]
+ "TARGET_SIMD
+ && (register_operand (operands[0], <MODE>mode)
+ || register_operand (operands[1], <MODE>mode))"
+
+{
+ switch (which_alternative)
+ {
+ case 0: return "#";
+ case 1: return "st1\\t{%S1.16b - %<Vendreg>1.16b}, %0";
+ case 2: return "ld1\\t{%S0.16b - %<Vendreg>0.16b}, %1";
+ default: gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "neon_move,neon_store<nregs>_<nregs>reg_q,\
+ neon_load<nregs>_<nregs>reg_q")
+ (set (attr "length") (symbol_ref "aarch64_simd_attr_length_move (insn)"))]
+)
+
+(define_insn "aarch64_be_ld1<mode>"
+ [(set (match_operand:VALLDI 0 "register_operand" "=w")
+ (unspec:VALLDI [(match_operand:VALLDI 1 "aarch64_simd_struct_operand" "Utv")]
+ UNSPEC_LD1))]
+ "TARGET_SIMD"
+ "ld1\\t{%0<Vmtype>}, %1"
+ [(set_attr "type" "neon_load1_1reg<q>")]
+)
+
+(define_insn "aarch64_be_st1<mode>"
+ [(set (match_operand:VALLDI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:VALLDI [(match_operand:VALLDI 1 "register_operand" "w")]
+ UNSPEC_ST1))]
+ "TARGET_SIMD"
+ "st1\\t{%1<Vmtype>}, %0"
+ [(set_attr "type" "neon_store1_1reg<q>")]
+)
+
+(define_split
+ [(set (match_operand:OI 0 "register_operand" "")
+ (match_operand:OI 1 "register_operand" ""))]
+ "TARGET_SIMD && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))]
+{
+ int rdest = REGNO (operands[0]);
+ int rsrc = REGNO (operands[1]);
+ rtx dest[2], src[2];
+
+ dest[0] = gen_rtx_REG (TFmode, rdest);
+ src[0] = gen_rtx_REG (TFmode, rsrc);
+ dest[1] = gen_rtx_REG (TFmode, rdest + 1);
+ src[1] = gen_rtx_REG (TFmode, rsrc + 1);
+
+ aarch64_simd_disambiguate_copy (operands, dest, src, 2);
+})
+
+(define_split
+ [(set (match_operand:CI 0 "register_operand" "")
+ (match_operand:CI 1 "register_operand" ""))]
+ "TARGET_SIMD && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+{
+ int rdest = REGNO (operands[0]);
+ int rsrc = REGNO (operands[1]);
+ rtx dest[3], src[3];
+
+ dest[0] = gen_rtx_REG (TFmode, rdest);
+ src[0] = gen_rtx_REG (TFmode, rsrc);
+ dest[1] = gen_rtx_REG (TFmode, rdest + 1);
+ src[1] = gen_rtx_REG (TFmode, rsrc + 1);
+ dest[2] = gen_rtx_REG (TFmode, rdest + 2);
+ src[2] = gen_rtx_REG (TFmode, rsrc + 2);
+
+ aarch64_simd_disambiguate_copy (operands, dest, src, 3);
+})
+
+(define_split
+ [(set (match_operand:XI 0 "register_operand" "")
+ (match_operand:XI 1 "register_operand" ""))]
+ "TARGET_SIMD && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))
+ (set (match_dup 6) (match_dup 7))]
+{
+ int rdest = REGNO (operands[0]);
+ int rsrc = REGNO (operands[1]);
+ rtx dest[4], src[4];
+
+ dest[0] = gen_rtx_REG (TFmode, rdest);
+ src[0] = gen_rtx_REG (TFmode, rsrc);
+ dest[1] = gen_rtx_REG (TFmode, rdest + 1);
+ src[1] = gen_rtx_REG (TFmode, rsrc + 1);
+ dest[2] = gen_rtx_REG (TFmode, rdest + 2);
+ src[2] = gen_rtx_REG (TFmode, rsrc + 2);
+ dest[3] = gen_rtx_REG (TFmode, rdest + 3);
+ src[3] = gen_rtx_REG (TFmode, rsrc + 3);
+
+ aarch64_simd_disambiguate_copy (operands, dest, src, 4);
+})
+
+(define_insn "aarch64_ld2<mode>_dreg"
+ [(set (match_operand:OI 0 "register_operand" "=w")
+ (subreg:OI
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")]
+ UNSPEC_LD2)
+ (vec_duplicate:VD (const_int 0)))
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_dup 1)]
+ UNSPEC_LD2)
+ (vec_duplicate:VD (const_int 0)))) 0))]
+ "TARGET_SIMD"
+ "ld2\\t{%S0.<Vtype> - %T0.<Vtype>}, %1"
+ [(set_attr "type" "neon_load2_2reg<q>")]
+)
+
+(define_insn "aarch64_ld2<mode>_dreg"
+ [(set (match_operand:OI 0 "register_operand" "=w")
+ (subreg:OI
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")]
+ UNSPEC_LD2)
+ (const_int 0))
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_dup 1)]
+ UNSPEC_LD2)
+ (const_int 0))) 0))]
+ "TARGET_SIMD"
+ "ld1\\t{%S0.1d - %T0.1d}, %1"
+ [(set_attr "type" "neon_load1_2reg<q>")]
+)
+
+(define_insn "aarch64_ld3<mode>_dreg"
+ [(set (match_operand:CI 0 "register_operand" "=w")
+ (subreg:CI
+ (vec_concat:<VRL3>
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")]
+ UNSPEC_LD3)
+ (vec_duplicate:VD (const_int 0)))
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_dup 1)]
+ UNSPEC_LD3)
+ (vec_duplicate:VD (const_int 0))))
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_dup 1)]
+ UNSPEC_LD3)
+ (vec_duplicate:VD (const_int 0)))) 0))]
+ "TARGET_SIMD"
+ "ld3\\t{%S0.<Vtype> - %U0.<Vtype>}, %1"
+ [(set_attr "type" "neon_load3_3reg<q>")]
+)
+
+(define_insn "aarch64_ld3<mode>_dreg"
+ [(set (match_operand:CI 0 "register_operand" "=w")
+ (subreg:CI
+ (vec_concat:<VRL3>
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")]
+ UNSPEC_LD3)
+ (const_int 0))
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_dup 1)]
+ UNSPEC_LD3)
+ (const_int 0)))
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_dup 1)]
+ UNSPEC_LD3)
+ (const_int 0))) 0))]
+ "TARGET_SIMD"
+ "ld1\\t{%S0.1d - %U0.1d}, %1"
+ [(set_attr "type" "neon_load1_3reg<q>")]
+)
+
+(define_insn "aarch64_ld4<mode>_dreg"
+ [(set (match_operand:XI 0 "register_operand" "=w")
+ (subreg:XI
+ (vec_concat:<VRL4>
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")]
+ UNSPEC_LD4)
+ (vec_duplicate:VD (const_int 0)))
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_dup 1)]
+ UNSPEC_LD4)
+ (vec_duplicate:VD (const_int 0))))
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_dup 1)]
+ UNSPEC_LD4)
+ (vec_duplicate:VD (const_int 0)))
+ (vec_concat:<VDBL>
+ (unspec:VD [(match_dup 1)]
+ UNSPEC_LD4)
+ (vec_duplicate:VD (const_int 0))))) 0))]
+ "TARGET_SIMD"
+ "ld4\\t{%S0.<Vtype> - %V0.<Vtype>}, %1"
+ [(set_attr "type" "neon_load4_4reg<q>")]
+)
+
+(define_insn "aarch64_ld4<mode>_dreg"
+ [(set (match_operand:XI 0 "register_operand" "=w")
+ (subreg:XI
+ (vec_concat:<VRL4>
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")]
+ UNSPEC_LD4)
+ (const_int 0))
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_dup 1)]
+ UNSPEC_LD4)
+ (const_int 0)))
+ (vec_concat:<VRL2>
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_dup 1)]
+ UNSPEC_LD4)
+ (const_int 0))
+ (vec_concat:<VDBL>
+ (unspec:DX [(match_dup 1)]
+ UNSPEC_LD4)
+ (const_int 0)))) 0))]
+ "TARGET_SIMD"
+ "ld1\\t{%S0.1d - %V0.1d}, %1"
+ [(set_attr "type" "neon_load1_4reg<q>")]
+)
+
+(define_expand "aarch64_ld<VSTRUCT:nregs><VDC:mode>"
+ [(match_operand:VSTRUCT 0 "register_operand" "=w")
+ (match_operand:DI 1 "register_operand" "r")
+ (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ "TARGET_SIMD"
+{
+ enum machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode;
+ rtx mem = gen_rtx_MEM (mode, operands[1]);
+
+ emit_insn (gen_aarch64_ld<VSTRUCT:nregs><VDC:mode>_dreg (operands[0], mem));
+ DONE;
+})
+
+(define_expand "aarch64_ld1<VALL:mode>"
+ [(match_operand:VALL 0 "register_operand")
+ (match_operand:DI 1 "register_operand")]
+ "TARGET_SIMD"
+{
+ enum machine_mode mode = <VALL:MODE>mode;
+ rtx mem = gen_rtx_MEM (mode, operands[1]);
+
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_aarch64_be_ld1<VALL:mode> (operands[0], mem));
+ else
+ emit_move_insn (operands[0], mem);
+ DONE;
+})
+
+(define_expand "aarch64_ld<VSTRUCT:nregs><VQ:mode>"
+ [(match_operand:VSTRUCT 0 "register_operand" "=w")
+ (match_operand:DI 1 "register_operand" "r")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ "TARGET_SIMD"
+{
+ enum machine_mode mode = <VSTRUCT:MODE>mode;
+ rtx mem = gen_rtx_MEM (mode, operands[1]);
+
+ emit_insn (gen_vec_load_lanes<VSTRUCT:mode><VQ:mode> (operands[0], mem));
+ DONE;
+})
+
+;; Expanders for builtins to extract vector registers from large
+;; opaque integer modes.
+
+;; D-register list.
+
+(define_expand "aarch64_get_dreg<VSTRUCT:mode><VDC:mode>"
+ [(match_operand:VDC 0 "register_operand" "=w")
+ (match_operand:VSTRUCT 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ int part = INTVAL (operands[2]);
+ rtx temp = gen_reg_rtx (<VDC:VDBL>mode);
+ int offset = part * 16;
+
+ emit_move_insn (temp, gen_rtx_SUBREG (<VDC:VDBL>mode, operands[1], offset));
+ emit_move_insn (operands[0], gen_lowpart (<VDC:MODE>mode, temp));
+ DONE;
+})
+
+;; Q-register list.
+
+(define_expand "aarch64_get_qreg<VSTRUCT:mode><VQ:mode>"
+ [(match_operand:VQ 0 "register_operand" "=w")
+ (match_operand:VSTRUCT 1 "register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ int part = INTVAL (operands[2]);
+ int offset = part * 16;
+
+ emit_move_insn (operands[0],
+ gen_rtx_SUBREG (<VQ:MODE>mode, operands[1], offset));
+ DONE;
+})
+
+;; Permuted-store expanders for neon intrinsics.
+
+;; Permute instructions
+
+;; vec_perm support
+
+(define_expand "vec_perm_const<mode>"
+ [(match_operand:VALL 0 "register_operand")
+ (match_operand:VALL 1 "register_operand")
+ (match_operand:VALL 2 "register_operand")
+ (match_operand:<V_cmp_result> 3)]
+ "TARGET_SIMD"
+{
+ if (aarch64_expand_vec_perm_const (operands[0], operands[1],
+ operands[2], operands[3]))
+ DONE;
+ else
+ FAIL;
+})
+
+(define_expand "vec_perm<mode>"
+ [(match_operand:VB 0 "register_operand")
+ (match_operand:VB 1 "register_operand")
+ (match_operand:VB 2 "register_operand")
+ (match_operand:VB 3 "register_operand")]
+ "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+{
+ aarch64_expand_vec_perm (operands[0], operands[1],
+ operands[2], operands[3]);
+ DONE;
+})
+
+(define_insn "aarch64_tbl1<mode>"
+ [(set (match_operand:VB 0 "register_operand" "=w")
+ (unspec:VB [(match_operand:V16QI 1 "register_operand" "w")
+ (match_operand:VB 2 "register_operand" "w")]
+ UNSPEC_TBL))]
+ "TARGET_SIMD"
+ "tbl\\t%0.<Vtype>, {%1.16b}, %2.<Vtype>"
+ [(set_attr "type" "neon_tbl1<q>")]
+)
+
+;; Two source registers.
+
+(define_insn "aarch64_tbl2v16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=w")
+ (unspec:V16QI [(match_operand:OI 1 "register_operand" "w")
+ (match_operand:V16QI 2 "register_operand" "w")]
+ UNSPEC_TBL))]
+ "TARGET_SIMD"
+ "tbl\\t%0.16b, {%S1.16b - %T1.16b}, %2.16b"
+ [(set_attr "type" "neon_tbl2_q")]
+)
+
+(define_insn_and_split "aarch64_combinev16qi"
+ [(set (match_operand:OI 0 "register_operand" "=w")
+ (unspec:OI [(match_operand:V16QI 1 "register_operand" "w")
+ (match_operand:V16QI 2 "register_operand" "w")]
+ UNSPEC_CONCAT))]
+ "TARGET_SIMD"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ aarch64_split_combinev16qi (operands);
+ DONE;
+}
+[(set_attr "type" "multiple")]
+)
+
+(define_insn "aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>"
+ [(set (match_operand:VALL 0 "register_operand" "=w")
+ (unspec:VALL [(match_operand:VALL 1 "register_operand" "w")
+ (match_operand:VALL 2 "register_operand" "w")]
+ PERMUTE))]
+ "TARGET_SIMD"
+ "<PERMUTE:perm_insn><PERMUTE:perm_hilo>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_permute<q>")]
+)
+
+(define_insn "aarch64_st2<mode>_dreg"
+ [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:TI [(match_operand:OI 1 "register_operand" "w")
+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST2))]
+ "TARGET_SIMD"
+ "st2\\t{%S1.<Vtype> - %T1.<Vtype>}, %0"
+ [(set_attr "type" "neon_store2_2reg")]
+)
+
+(define_insn "aarch64_st2<mode>_dreg"
+ [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:TI [(match_operand:OI 1 "register_operand" "w")
+ (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST2))]
+ "TARGET_SIMD"
+ "st1\\t{%S1.1d - %T1.1d}, %0"
+ [(set_attr "type" "neon_store1_2reg")]
+)
+
+(define_insn "aarch64_st3<mode>_dreg"
+ [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:EI [(match_operand:CI 1 "register_operand" "w")
+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST3))]
+ "TARGET_SIMD"
+ "st3\\t{%S1.<Vtype> - %U1.<Vtype>}, %0"
+ [(set_attr "type" "neon_store3_3reg")]
+)
+
+(define_insn "aarch64_st3<mode>_dreg"
+ [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:EI [(match_operand:CI 1 "register_operand" "w")
+ (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST3))]
+ "TARGET_SIMD"
+ "st1\\t{%S1.1d - %U1.1d}, %0"
+ [(set_attr "type" "neon_store1_3reg")]
+)
+
+(define_insn "aarch64_st4<mode>_dreg"
+ [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:OI [(match_operand:XI 1 "register_operand" "w")
+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST4))]
+ "TARGET_SIMD"
+ "st4\\t{%S1.<Vtype> - %V1.<Vtype>}, %0"
+ [(set_attr "type" "neon_store4_4reg")]
+)
+
+(define_insn "aarch64_st4<mode>_dreg"
+ [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
+ (unspec:OI [(match_operand:XI 1 "register_operand" "w")
+ (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ UNSPEC_ST4))]
+ "TARGET_SIMD"
+ "st1\\t{%S1.1d - %V1.1d}, %0"
+ [(set_attr "type" "neon_store1_4reg")]
+)
+
+(define_expand "aarch64_st<VSTRUCT:nregs><VDC:mode>"
+ [(match_operand:DI 0 "register_operand" "r")
+ (match_operand:VSTRUCT 1 "register_operand" "w")
+ (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ "TARGET_SIMD"
+{
+ enum machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode;
+ rtx mem = gen_rtx_MEM (mode, operands[0]);
+
+ emit_insn (gen_aarch64_st<VSTRUCT:nregs><VDC:mode>_dreg (mem, operands[1]));
+ DONE;
+})
+
+(define_expand "aarch64_st<VSTRUCT:nregs><VQ:mode>"
+ [(match_operand:DI 0 "register_operand" "r")
+ (match_operand:VSTRUCT 1 "register_operand" "w")
+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
+ "TARGET_SIMD"
+{
+ enum machine_mode mode = <VSTRUCT:MODE>mode;
+ rtx mem = gen_rtx_MEM (mode, operands[0]);
+
+ emit_insn (gen_vec_store_lanes<VSTRUCT:mode><VQ:mode> (mem, operands[1]));
+ DONE;
+})
+
+(define_expand "aarch64_st1<VALL:mode>"
+ [(match_operand:DI 0 "register_operand")
+ (match_operand:VALL 1 "register_operand")]
+ "TARGET_SIMD"
+{
+ enum machine_mode mode = <VALL:MODE>mode;
+ rtx mem = gen_rtx_MEM (mode, operands[0]);
+
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_aarch64_be_st1<VALL:mode> (mem, operands[1]));
+ else
+ emit_move_insn (mem, operands[1]);
+ DONE;
+})
+
+;; Expander for builtins to insert vector registers into large
+;; opaque integer modes.
+
+;; Q-register list. We don't need a D-reg inserter as we zero
+;; extend them in arm_neon.h and insert the resulting Q-regs.
+
+(define_expand "aarch64_set_qreg<VSTRUCT:mode><VQ:mode>"
+ [(match_operand:VSTRUCT 0 "register_operand" "+w")
+ (match_operand:VSTRUCT 1 "register_operand" "0")
+ (match_operand:VQ 2 "register_operand" "w")
+ (match_operand:SI 3 "immediate_operand" "i")]
+ "TARGET_SIMD"
+{
+ int part = INTVAL (operands[3]);
+ int offset = part * 16;
+
+ emit_move_insn (operands[0], operands[1]);
+ emit_move_insn (gen_rtx_SUBREG (<VQ:MODE>mode, operands[0], offset),
+ operands[2]);
+ DONE;
+})
+
+;; Standard pattern name vec_init<mode>.
+
+(define_expand "vec_init<mode>"
+ [(match_operand:VALL 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_SIMD"
+{
+ aarch64_expand_vector_init (operands[0], operands[1]);
+ DONE;
+})
+
+(define_insn "*aarch64_simd_ld1r<mode>"
+ [(set (match_operand:VALLDI 0 "register_operand" "=w")
+ (vec_duplicate:VALLDI
+ (match_operand:<VEL> 1 "aarch64_simd_struct_operand" "Utv")))]
+ "TARGET_SIMD"
+ "ld1r\\t{%0.<Vtype>}, %1"
+ [(set_attr "type" "neon_load1_all_lanes")]
+)
+
+(define_insn "aarch64_frecpe<mode>"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")]
+ UNSPEC_FRECPE))]
+ "TARGET_SIMD"
+ "frecpe\\t%0.<Vtype>, %1.<Vtype>"
+ [(set_attr "type" "neon_fp_recpe_<Vetype><q>")]
+)
+
+(define_insn "aarch64_frecp<FRECP:frecp_suffix><mode>"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+ FRECP))]
+ "TARGET_SIMD"
+ "frecp<FRECP:frecp_suffix>\\t%<s>0, %<s>1"
+ [(set_attr "type" "neon_fp_recp<FRECP:frecp_suffix>_<GPF:Vetype><GPF:q>")]
+)
+
+(define_insn "aarch64_frecps<mode>"
+ [(set (match_operand:VALLF 0 "register_operand" "=w")
+ (unspec:VALLF [(match_operand:VALLF 1 "register_operand" "w")
+ (match_operand:VALLF 2 "register_operand" "w")]
+ UNSPEC_FRECPS))]
+ "TARGET_SIMD"
+ "frecps\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "type" "neon_fp_recps_<Vetype><q>")]
+)
+
+;; Standard pattern name vec_extract<mode>.
+
+(define_expand "vec_extract<mode>"
+ [(match_operand:<VEL> 0 "aarch64_simd_nonimmediate_operand" "")
+ (match_operand:VALL 1 "register_operand" "")
+ (match_operand:SI 2 "immediate_operand" "")]
+ "TARGET_SIMD"
+{
+ emit_insn
+ (gen_aarch64_get_lane<mode> (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+;; aes
+
+(define_insn "aarch64_crypto_aes<aes_op>v16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=w")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "0")
+ (match_operand:V16QI 2 "register_operand" "w")]
+ CRYPTO_AES))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "aes<aes_op>\\t%0.16b, %2.16b"
+ [(set_attr "type" "crypto_aes")]
+)
+
+(define_insn "aarch64_crypto_aes<aesmc_op>v16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=w")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "w")]
+ CRYPTO_AESMC))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "aes<aesmc_op>\\t%0.16b, %1.16b"
+ [(set_attr "type" "crypto_aes")]
+)
+
+;; sha1
+
+(define_insn "aarch64_crypto_sha1hsi"
+ [(set (match_operand:SI 0 "register_operand" "=w")
+ (unspec:SI [(match_operand:SI 1
+ "register_operand" "w")]
+ UNSPEC_SHA1H))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "sha1h\\t%s0, %s1"
+ [(set_attr "type" "crypto_sha1_fast")]
+)
+
+(define_insn "aarch64_crypto_sha1su1v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "register_operand" "w")]
+ UNSPEC_SHA1SU1))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "sha1su1\\t%0.4s, %2.4s"
+ [(set_attr "type" "crypto_sha1_fast")]
+)
+
+(define_insn "aarch64_crypto_sha1<sha1_op>v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "w")
+ (match_operand:V4SI 3 "register_operand" "w")]
+ CRYPTO_SHA1))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "sha1<sha1_op>\\t%q0, %s2, %3.4s"
+ [(set_attr "type" "crypto_sha1_slow")]
+)
+
+(define_insn "aarch64_crypto_sha1su0v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "register_operand" "w")
+ (match_operand:V4SI 3 "register_operand" "w")]
+ UNSPEC_SHA1SU0))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "sha1su0\\t%0.4s, %2.4s, %3.4s"
+ [(set_attr "type" "crypto_sha1_xor")]
+)
+
+;; sha256
+
+(define_insn "aarch64_crypto_sha256h<sha256_op>v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "register_operand" "w")
+ (match_operand:V4SI 3 "register_operand" "w")]
+ CRYPTO_SHA256))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "sha256h<sha256_op>\\t%q0, %q2, %3.4s"
+ [(set_attr "type" "crypto_sha256_slow")]
+)
+
+(define_insn "aarch64_crypto_sha256su0v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "register_operand" "w")]
+ UNSPEC_SHA256SU0))]
+ "TARGET_SIMD &&TARGET_CRYPTO"
+ "sha256su0\\t%0.4s, %2.4s"
+ [(set_attr "type" "crypto_sha256_fast")]
+)
+
+(define_insn "aarch64_crypto_sha256su1v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=w")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "register_operand" "w")
+ (match_operand:V4SI 3 "register_operand" "w")]
+ UNSPEC_SHA256SU1))]
+ "TARGET_SIMD &&TARGET_CRYPTO"
+ "sha256su1\\t%0.4s, %2.4s, %3.4s"
+ [(set_attr "type" "crypto_sha256_slow")]
+)
+
+;; pmull
+
+(define_insn "aarch64_crypto_pmulldi"
+ [(set (match_operand:TI 0 "register_operand" "=w")
+ (unspec:TI [(match_operand:DI 1 "register_operand" "w")
+ (match_operand:DI 2 "register_operand" "w")]
+ UNSPEC_PMULL))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "pmull\\t%0.1q, %1.1d, %2.1d"
+ [(set_attr "type" "neon_mul_d_long")]
+)
+
+(define_insn "aarch64_crypto_pmullv2di"
+ [(set (match_operand:TI 0 "register_operand" "=w")
+ (unspec:TI [(match_operand:V2DI 1 "register_operand" "w")
+ (match_operand:V2DI 2 "register_operand" "w")]
+ UNSPEC_PMULL2))]
+ "TARGET_SIMD && TARGET_CRYPTO"
+ "pmull2\\t%0.1q, %1.2d, %2.2d"
+ [(set_attr "type" "neon_mul_d_long")]
+)
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-tune.md b/gcc-4.9/gcc/config/aarch64/aarch64-tune.md
new file mode 100644
index 000000000..b7e40e0b5
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64-tune.md
@@ -0,0 +1,5 @@
+;; -*- buffer-read-only: t -*-
+;; Generated automatically by gentune.sh from aarch64-cores.def
+(define_attr "tune"
+ "cortexa53,cortexa15,cortexa57cortexa53"
+ (const (symbol_ref "((enum attr_tune) aarch64_tune)")))
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64.c b/gcc-4.9/gcc/config/aarch64/aarch64.c
new file mode 100644
index 000000000..ebd58c007
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64.c
@@ -0,0 +1,8544 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "insn-codes.h"
+#include "rtl.h"
+#include "insn-attr.h"
+#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "varasm.h"
+#include "regs.h"
+#include "df.h"
+#include "hard-reg-set.h"
+#include "output.h"
+#include "expr.h"
+#include "reload.h"
+#include "toplev.h"
+#include "target.h"
+#include "target-def.h"
+#include "targhooks.h"
+#include "ggc.h"
+#include "function.h"
+#include "tm_p.h"
+#include "recog.h"
+#include "langhooks.h"
+#include "diagnostic-core.h"
+#include "pointer-set.h"
+#include "hash-table.h"
+#include "vec.h"
+#include "basic-block.h"
+#include "tree-ssa-alias.h"
+#include "internal-fn.h"
+#include "gimple-fold.h"
+#include "tree-eh.h"
+#include "gimple-expr.h"
+#include "is-a.h"
+#include "gimple.h"
+#include "gimplify.h"
+#include "optabs.h"
+#include "dwarf2.h"
+#include "cfgloop.h"
+#include "tree-vectorizer.h"
+#include "config/arm/aarch-cost-tables.h"
+
+/* Defined for convenience. */
+#define POINTER_BYTES (POINTER_SIZE / BITS_PER_UNIT)
+
+/* Classifies an address.
+
+ ADDRESS_REG_IMM
+ A simple base register plus immediate offset.
+
+ ADDRESS_REG_WB
+ A base register indexed by immediate offset with writeback.
+
+ ADDRESS_REG_REG
+ A base register indexed by (optionally scaled) register.
+
+ ADDRESS_REG_UXTW
+ A base register indexed by (optionally scaled) zero-extended register.
+
+ ADDRESS_REG_SXTW
+ A base register indexed by (optionally scaled) sign-extended register.
+
+ ADDRESS_LO_SUM
+ A LO_SUM rtx with a base register and "LO12" symbol relocation.
+
+ ADDRESS_SYMBOLIC:
+ A constant symbolic address, in pc-relative literal pool. */
+
+enum aarch64_address_type {
+ ADDRESS_REG_IMM,
+ ADDRESS_REG_WB,
+ ADDRESS_REG_REG,
+ ADDRESS_REG_UXTW,
+ ADDRESS_REG_SXTW,
+ ADDRESS_LO_SUM,
+ ADDRESS_SYMBOLIC
+};
+
+struct aarch64_address_info {
+ enum aarch64_address_type type;
+ rtx base;
+ rtx offset;
+ int shift;
+ enum aarch64_symbol_type symbol_type;
+};
+
+struct simd_immediate_info
+{
+ rtx value;
+ int shift;
+ int element_width;
+ bool mvn;
+ bool msl;
+};
+
+/* The current code model. */
+enum aarch64_code_model aarch64_cmodel;
+
+#ifdef HAVE_AS_TLS
+#undef TARGET_HAVE_TLS
+#define TARGET_HAVE_TLS 1
+#endif
+
+static bool aarch64_lra_p (void);
+static bool aarch64_composite_type_p (const_tree, enum machine_mode);
+static bool aarch64_vfp_is_call_or_return_candidate (enum machine_mode,
+ const_tree,
+ enum machine_mode *, int *,
+ bool *);
+static void aarch64_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED;
+static void aarch64_elf_asm_destructor (rtx, int) ATTRIBUTE_UNUSED;
+static void aarch64_override_options_after_change (void);
+static bool aarch64_vector_mode_supported_p (enum machine_mode);
+static unsigned bit_count (unsigned HOST_WIDE_INT);
+static bool aarch64_const_vec_all_same_int_p (rtx,
+ HOST_WIDE_INT, HOST_WIDE_INT);
+
+static bool aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
+ const unsigned char *sel);
+
+/* The processor for which instructions should be scheduled. */
+enum aarch64_processor aarch64_tune = cortexa53;
+
+/* The current tuning set. */
+const struct tune_params *aarch64_tune_params;
+
+/* Mask to specify which instructions we are allowed to generate. */
+unsigned long aarch64_isa_flags = 0;
+
+/* Mask to specify which instruction scheduling options should be used. */
+unsigned long aarch64_tune_flags = 0;
+
+/* Tuning parameters. */
+
+#if HAVE_DESIGNATED_INITIALIZERS
+#define NAMED_PARAM(NAME, VAL) .NAME = (VAL)
+#else
+#define NAMED_PARAM(NAME, VAL) (VAL)
+#endif
+
+#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
+__extension__
+#endif
+
+#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
+__extension__
+#endif
+static const struct cpu_addrcost_table generic_addrcost_table =
+{
+ NAMED_PARAM (pre_modify, 0),
+ NAMED_PARAM (post_modify, 0),
+ NAMED_PARAM (register_offset, 0),
+ NAMED_PARAM (register_extend, 0),
+ NAMED_PARAM (imm_offset, 0)
+};
+
+#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
+__extension__
+#endif
+static const struct cpu_regmove_cost generic_regmove_cost =
+{
+ NAMED_PARAM (GP2GP, 1),
+ NAMED_PARAM (GP2FP, 2),
+ NAMED_PARAM (FP2GP, 2),
+ /* We currently do not provide direct support for TFmode Q->Q move.
+ Therefore we need to raise the cost above 2 in order to have
+ reload handle the situation. */
+ NAMED_PARAM (FP2FP, 4)
+};
+
+/* Generic costs for vector insn classes. */
+#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
+__extension__
+#endif
+static const struct cpu_vector_cost generic_vector_cost =
+{
+ NAMED_PARAM (scalar_stmt_cost, 1),
+ NAMED_PARAM (scalar_load_cost, 1),
+ NAMED_PARAM (scalar_store_cost, 1),
+ NAMED_PARAM (vec_stmt_cost, 1),
+ NAMED_PARAM (vec_to_scalar_cost, 1),
+ NAMED_PARAM (scalar_to_vec_cost, 1),
+ NAMED_PARAM (vec_align_load_cost, 1),
+ NAMED_PARAM (vec_unalign_load_cost, 1),
+ NAMED_PARAM (vec_unalign_store_cost, 1),
+ NAMED_PARAM (vec_store_cost, 1),
+ NAMED_PARAM (cond_taken_branch_cost, 3),
+ NAMED_PARAM (cond_not_taken_branch_cost, 1)
+};
+
+#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
+__extension__
+#endif
+static const struct tune_params generic_tunings =
+{
+ &cortexa57_extra_costs,
+ &generic_addrcost_table,
+ &generic_regmove_cost,
+ &generic_vector_cost,
+ NAMED_PARAM (memmov_cost, 4),
+ NAMED_PARAM (issue_rate, 2)
+};
+
+static const struct tune_params cortexa53_tunings =
+{
+ &cortexa53_extra_costs,
+ &generic_addrcost_table,
+ &generic_regmove_cost,
+ &generic_vector_cost,
+ NAMED_PARAM (memmov_cost, 4),
+ NAMED_PARAM (issue_rate, 2)
+};
+
+static const struct tune_params cortexa57_tunings =
+{
+ &cortexa57_extra_costs,
+ &generic_addrcost_table,
+ &generic_regmove_cost,
+ &generic_vector_cost,
+ NAMED_PARAM (memmov_cost, 4),
+ NAMED_PARAM (issue_rate, 3)
+};
+
+/* A processor implementing AArch64. */
+struct processor
+{
+ const char *const name;
+ enum aarch64_processor core;
+ const char *arch;
+ const unsigned long flags;
+ const struct tune_params *const tune;
+};
+
+/* Processor cores implementing AArch64. */
+static const struct processor all_cores[] =
+{
+#define AARCH64_CORE(NAME, X, IDENT, ARCH, FLAGS, COSTS) \
+ {NAME, IDENT, #ARCH, FLAGS | AARCH64_FL_FOR_ARCH##ARCH, &COSTS##_tunings},
+#include "aarch64-cores.def"
+#undef AARCH64_CORE
+ {"generic", cortexa53, "8", AARCH64_FL_FPSIMD | AARCH64_FL_FOR_ARCH8, &generic_tunings},
+ {NULL, aarch64_none, NULL, 0, NULL}
+};
+
+/* Architectures implementing AArch64. */
+static const struct processor all_architectures[] =
+{
+#define AARCH64_ARCH(NAME, CORE, ARCH, FLAGS) \
+ {NAME, CORE, #ARCH, FLAGS, NULL},
+#include "aarch64-arches.def"
+#undef AARCH64_ARCH
+ {NULL, aarch64_none, NULL, 0, NULL}
+};
+
+/* Target specification. These are populated as commandline arguments
+ are processed, or NULL if not specified. */
+static const struct processor *selected_arch;
+static const struct processor *selected_cpu;
+static const struct processor *selected_tune;
+
+#define AARCH64_CPU_DEFAULT_FLAGS ((selected_cpu) ? selected_cpu->flags : 0)
+
+/* An ISA extension in the co-processor and main instruction set space. */
+struct aarch64_option_extension
+{
+ const char *const name;
+ const unsigned long flags_on;
+ const unsigned long flags_off;
+};
+
+/* ISA extensions in AArch64. */
+static const struct aarch64_option_extension all_extensions[] =
+{
+#define AARCH64_OPT_EXTENSION(NAME, FLAGS_ON, FLAGS_OFF) \
+ {NAME, FLAGS_ON, FLAGS_OFF},
+#include "aarch64-option-extensions.def"
+#undef AARCH64_OPT_EXTENSION
+ {NULL, 0, 0}
+};
+
+/* Used to track the size of an address when generating a pre/post
+ increment address. */
+static enum machine_mode aarch64_memory_reference_mode;
+
+/* Used to force GTY into this file. */
+static GTY(()) int gty_dummy;
+
+/* A table of valid AArch64 "bitmask immediate" values for
+ logical instructions. */
+
+#define AARCH64_NUM_BITMASKS 5334
+static unsigned HOST_WIDE_INT aarch64_bitmasks[AARCH64_NUM_BITMASKS];
+
+/* Did we set flag_omit_frame_pointer just so
+ aarch64_frame_pointer_required would be called? */
+static bool faked_omit_frame_pointer;
+
+typedef enum aarch64_cond_code
+{
+ AARCH64_EQ = 0, AARCH64_NE, AARCH64_CS, AARCH64_CC, AARCH64_MI, AARCH64_PL,
+ AARCH64_VS, AARCH64_VC, AARCH64_HI, AARCH64_LS, AARCH64_GE, AARCH64_LT,
+ AARCH64_GT, AARCH64_LE, AARCH64_AL, AARCH64_NV
+}
+aarch64_cc;
+
+#define AARCH64_INVERSE_CONDITION_CODE(X) ((aarch64_cc) (((int) X) ^ 1))
+
+/* The condition codes of the processor, and the inverse function. */
+static const char * const aarch64_condition_codes[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
+};
+
+/* Provide a mapping from gcc register numbers to dwarf register numbers. */
+unsigned
+aarch64_dbx_register_number (unsigned regno)
+{
+ if (GP_REGNUM_P (regno))
+ return AARCH64_DWARF_R0 + regno - R0_REGNUM;
+ else if (regno == SP_REGNUM)
+ return AARCH64_DWARF_SP;
+ else if (FP_REGNUM_P (regno))
+ return AARCH64_DWARF_V0 + regno - V0_REGNUM;
+
+ /* Return values >= DWARF_FRAME_REGISTERS indicate that there is no
+ equivalent DWARF register. */
+ return DWARF_FRAME_REGISTERS;
+}
+
+/* Return TRUE if MODE is any of the large INT modes. */
+static bool
+aarch64_vect_struct_mode_p (enum machine_mode mode)
+{
+ return mode == OImode || mode == CImode || mode == XImode;
+}
+
+/* Return TRUE if MODE is any of the vector modes. */
+static bool
+aarch64_vector_mode_p (enum machine_mode mode)
+{
+ return aarch64_vector_mode_supported_p (mode)
+ || aarch64_vect_struct_mode_p (mode);
+}
+
+/* Implement target hook TARGET_ARRAY_MODE_SUPPORTED_P. */
+static bool
+aarch64_array_mode_supported_p (enum machine_mode mode,
+ unsigned HOST_WIDE_INT nelems)
+{
+ if (TARGET_SIMD
+ && AARCH64_VALID_SIMD_QREG_MODE (mode)
+ && (nelems >= 2 && nelems <= 4))
+ return true;
+
+ return false;
+}
+
+/* Implement HARD_REGNO_NREGS. */
+
+int
+aarch64_hard_regno_nregs (unsigned regno, enum machine_mode mode)
+{
+ switch (aarch64_regno_regclass (regno))
+ {
+ case FP_REGS:
+ case FP_LO_REGS:
+ return (GET_MODE_SIZE (mode) + UNITS_PER_VREG - 1) / UNITS_PER_VREG;
+ default:
+ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+ }
+ gcc_unreachable ();
+}
+
+/* Implement HARD_REGNO_MODE_OK. */
+
+int
+aarch64_hard_regno_mode_ok (unsigned regno, enum machine_mode mode)
+{
+ if (GET_MODE_CLASS (mode) == MODE_CC)
+ return regno == CC_REGNUM;
+
+ if (regno == SP_REGNUM)
+ /* The purpose of comparing with ptr_mode is to support the
+ global register variable associated with the stack pointer
+ register via the syntax of asm ("wsp") in ILP32. */
+ return mode == Pmode || mode == ptr_mode;
+
+ if (regno == FRAME_POINTER_REGNUM || regno == ARG_POINTER_REGNUM)
+ return mode == Pmode;
+
+ if (GP_REGNUM_P (regno) && ! aarch64_vect_struct_mode_p (mode))
+ return 1;
+
+ if (FP_REGNUM_P (regno))
+ {
+ if (aarch64_vect_struct_mode_p (mode))
+ return
+ (regno + aarch64_hard_regno_nregs (regno, mode) - 1) <= V31_REGNUM;
+ else
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return true if calls to DECL should be treated as
+ long-calls (ie called via a register). */
+static bool
+aarch64_decl_is_long_call_p (const_tree decl ATTRIBUTE_UNUSED)
+{
+ return false;
+}
+
+/* Return true if calls to symbol-ref SYM should be treated as
+ long-calls (ie called via a register). */
+bool
+aarch64_is_long_call_p (rtx sym)
+{
+ return aarch64_decl_is_long_call_p (SYMBOL_REF_DECL (sym));
+}
+
+/* Return true if the offsets to a zero/sign-extract operation
+ represent an expression that matches an extend operation. The
+ operands represent the paramters from
+
+ (extract (mult (reg) (mult_imm)) (extract_imm) (const_int 0)). */
+bool
+aarch64_is_extend_from_extract (enum machine_mode mode, rtx mult_imm,
+ rtx extract_imm)
+{
+ HOST_WIDE_INT mult_val, extract_val;
+
+ if (! CONST_INT_P (mult_imm) || ! CONST_INT_P (extract_imm))
+ return false;
+
+ mult_val = INTVAL (mult_imm);
+ extract_val = INTVAL (extract_imm);
+
+ if (extract_val > 8
+ && extract_val < GET_MODE_BITSIZE (mode)
+ && exact_log2 (extract_val & ~7) > 0
+ && (extract_val & 7) <= 4
+ && mult_val == (1 << (extract_val & 7)))
+ return true;
+
+ return false;
+}
+
+/* Emit an insn that's a simple single-set. Both the operands must be
+ known to be valid. */
+inline static rtx
+emit_set_insn (rtx x, rtx y)
+{
+ return emit_insn (gen_rtx_SET (VOIDmode, x, y));
+}
+
+/* X and Y are two things to compare using CODE. Emit the compare insn and
+ return the rtx for register 0 in the proper mode. */
+rtx
+aarch64_gen_compare_reg (RTX_CODE code, rtx x, rtx y)
+{
+ enum machine_mode mode = SELECT_CC_MODE (code, x, y);
+ rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
+
+ emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
+ return cc_reg;
+}
+
+/* Build the SYMBOL_REF for __tls_get_addr. */
+
+static GTY(()) rtx tls_get_addr_libfunc;
+
+rtx
+aarch64_tls_get_addr (void)
+{
+ if (!tls_get_addr_libfunc)
+ tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
+ return tls_get_addr_libfunc;
+}
+
+/* Return the TLS model to use for ADDR. */
+
+static enum tls_model
+tls_symbolic_operand_type (rtx addr)
+{
+ enum tls_model tls_kind = TLS_MODEL_NONE;
+ rtx sym, addend;
+
+ if (GET_CODE (addr) == CONST)
+ {
+ split_const (addr, &sym, &addend);
+ if (GET_CODE (sym) == SYMBOL_REF)
+ tls_kind = SYMBOL_REF_TLS_MODEL (sym);
+ }
+ else if (GET_CODE (addr) == SYMBOL_REF)
+ tls_kind = SYMBOL_REF_TLS_MODEL (addr);
+
+ return tls_kind;
+}
+
+/* We'll allow lo_sum's in addresses in our legitimate addresses
+ so that combine would take care of combining addresses where
+ necessary, but for generation purposes, we'll generate the address
+ as :
+ RTL Absolute
+ tmp = hi (symbol_ref); adrp x1, foo
+ dest = lo_sum (tmp, symbol_ref); add dest, x1, :lo_12:foo
+ nop
+
+ PIC TLS
+ adrp x1, :got:foo adrp tmp, :tlsgd:foo
+ ldr x1, [:got_lo12:foo] add dest, tmp, :tlsgd_lo12:foo
+ bl __tls_get_addr
+ nop
+
+ Load TLS symbol, depending on TLS mechanism and TLS access model.
+
+ Global Dynamic - Traditional TLS:
+ adrp tmp, :tlsgd:imm
+ add dest, tmp, #:tlsgd_lo12:imm
+ bl __tls_get_addr
+
+ Global Dynamic - TLS Descriptors:
+ adrp dest, :tlsdesc:imm
+ ldr tmp, [dest, #:tlsdesc_lo12:imm]
+ add dest, dest, #:tlsdesc_lo12:imm
+ blr tmp
+ mrs tp, tpidr_el0
+ add dest, dest, tp
+
+ Initial Exec:
+ mrs tp, tpidr_el0
+ adrp tmp, :gottprel:imm
+ ldr dest, [tmp, #:gottprel_lo12:imm]
+ add dest, dest, tp
+
+ Local Exec:
+ mrs tp, tpidr_el0
+ add t0, tp, #:tprel_hi12:imm
+ add t0, #:tprel_lo12_nc:imm
+*/
+
+static void
+aarch64_load_symref_appropriately (rtx dest, rtx imm,
+ enum aarch64_symbol_type type)
+{
+ switch (type)
+ {
+ case SYMBOL_SMALL_ABSOLUTE:
+ {
+ /* In ILP32, the mode of dest can be either SImode or DImode. */
+ rtx tmp_reg = dest;
+ enum machine_mode mode = GET_MODE (dest);
+
+ gcc_assert (mode == Pmode || mode == ptr_mode);
+
+ if (can_create_pseudo_p ())
+ tmp_reg = gen_reg_rtx (mode);
+
+ emit_move_insn (tmp_reg, gen_rtx_HIGH (mode, imm));
+ emit_insn (gen_add_losym (dest, tmp_reg, imm));
+ return;
+ }
+
+ case SYMBOL_TINY_ABSOLUTE:
+ emit_insn (gen_rtx_SET (Pmode, dest, imm));
+ return;
+
+ case SYMBOL_SMALL_GOT:
+ {
+ /* In ILP32, the mode of dest can be either SImode or DImode,
+ while the got entry is always of SImode size. The mode of
+ dest depends on how dest is used: if dest is assigned to a
+ pointer (e.g. in the memory), it has SImode; it may have
+ DImode if dest is dereferenced to access the memeory.
+ This is why we have to handle three different ldr_got_small
+ patterns here (two patterns for ILP32). */
+ rtx tmp_reg = dest;
+ enum machine_mode mode = GET_MODE (dest);
+
+ if (can_create_pseudo_p ())
+ tmp_reg = gen_reg_rtx (mode);
+
+ emit_move_insn (tmp_reg, gen_rtx_HIGH (mode, imm));
+ if (mode == ptr_mode)
+ {
+ if (mode == DImode)
+ emit_insn (gen_ldr_got_small_di (dest, tmp_reg, imm));
+ else
+ emit_insn (gen_ldr_got_small_si (dest, tmp_reg, imm));
+ }
+ else
+ {
+ gcc_assert (mode == Pmode);
+ emit_insn (gen_ldr_got_small_sidi (dest, tmp_reg, imm));
+ }
+
+ return;
+ }
+
+ case SYMBOL_SMALL_TLSGD:
+ {
+ rtx insns;
+ rtx result = gen_rtx_REG (Pmode, R0_REGNUM);
+
+ start_sequence ();
+ emit_call_insn (gen_tlsgd_small (result, imm));
+ insns = get_insns ();
+ end_sequence ();
+
+ RTL_CONST_CALL_P (insns) = 1;
+ emit_libcall_block (insns, dest, result, imm);
+ return;
+ }
+
+ case SYMBOL_SMALL_TLSDESC:
+ {
+ rtx x0 = gen_rtx_REG (Pmode, R0_REGNUM);
+ rtx tp;
+
+ emit_insn (gen_tlsdesc_small (imm));
+ tp = aarch64_load_tp (NULL);
+ emit_insn (gen_rtx_SET (Pmode, dest, gen_rtx_PLUS (Pmode, tp, x0)));
+ set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
+ return;
+ }
+
+ case SYMBOL_SMALL_GOTTPREL:
+ {
+ rtx tmp_reg = gen_reg_rtx (Pmode);
+ rtx tp = aarch64_load_tp (NULL);
+ emit_insn (gen_tlsie_small (tmp_reg, imm));
+ emit_insn (gen_rtx_SET (Pmode, dest, gen_rtx_PLUS (Pmode, tp, tmp_reg)));
+ set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
+ return;
+ }
+
+ case SYMBOL_SMALL_TPREL:
+ {
+ rtx tp = aarch64_load_tp (NULL);
+ emit_insn (gen_tlsle_small (dest, tp, imm));
+ set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
+ return;
+ }
+
+ case SYMBOL_TINY_GOT:
+ emit_insn (gen_ldr_got_tiny (dest, imm));
+ return;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Emit a move from SRC to DEST. Assume that the move expanders can
+ handle all moves if !can_create_pseudo_p (). The distinction is
+ important because, unlike emit_move_insn, the move expanders know
+ how to force Pmode objects into the constant pool even when the
+ constant pool address is not itself legitimate. */
+static rtx
+aarch64_emit_move (rtx dest, rtx src)
+{
+ return (can_create_pseudo_p ()
+ ? emit_move_insn (dest, src)
+ : emit_move_insn_1 (dest, src));
+}
+
+/* Split a 128-bit move operation into two 64-bit move operations,
+ taking care to handle partial overlap of register to register
+ copies. Special cases are needed when moving between GP regs and
+ FP regs. SRC can be a register, constant or memory; DST a register
+ or memory. If either operand is memory it must not have any side
+ effects. */
+void
+aarch64_split_128bit_move (rtx dst, rtx src)
+{
+ rtx dst_lo, dst_hi;
+ rtx src_lo, src_hi;
+
+ enum machine_mode mode = GET_MODE (dst);
+
+ gcc_assert (mode == TImode || mode == TFmode);
+ gcc_assert (!(side_effects_p (src) || side_effects_p (dst)));
+ gcc_assert (mode == GET_MODE (src) || GET_MODE (src) == VOIDmode);
+
+ if (REG_P (dst) && REG_P (src))
+ {
+ int src_regno = REGNO (src);
+ int dst_regno = REGNO (dst);
+
+ /* Handle FP <-> GP regs. */
+ if (FP_REGNUM_P (dst_regno) && GP_REGNUM_P (src_regno))
+ {
+ src_lo = gen_lowpart (word_mode, src);
+ src_hi = gen_highpart (word_mode, src);
+
+ if (mode == TImode)
+ {
+ emit_insn (gen_aarch64_movtilow_di (dst, src_lo));
+ emit_insn (gen_aarch64_movtihigh_di (dst, src_hi));
+ }
+ else
+ {
+ emit_insn (gen_aarch64_movtflow_di (dst, src_lo));
+ emit_insn (gen_aarch64_movtfhigh_di (dst, src_hi));
+ }
+ return;
+ }
+ else if (GP_REGNUM_P (dst_regno) && FP_REGNUM_P (src_regno))
+ {
+ dst_lo = gen_lowpart (word_mode, dst);
+ dst_hi = gen_highpart (word_mode, dst);
+
+ if (mode == TImode)
+ {
+ emit_insn (gen_aarch64_movdi_tilow (dst_lo, src));
+ emit_insn (gen_aarch64_movdi_tihigh (dst_hi, src));
+ }
+ else
+ {
+ emit_insn (gen_aarch64_movdi_tflow (dst_lo, src));
+ emit_insn (gen_aarch64_movdi_tfhigh (dst_hi, src));
+ }
+ return;
+ }
+ }
+
+ dst_lo = gen_lowpart (word_mode, dst);
+ dst_hi = gen_highpart (word_mode, dst);
+ src_lo = gen_lowpart (word_mode, src);
+ src_hi = gen_highpart_mode (word_mode, mode, src);
+
+ /* At most one pairing may overlap. */
+ if (reg_overlap_mentioned_p (dst_lo, src_hi))
+ {
+ aarch64_emit_move (dst_hi, src_hi);
+ aarch64_emit_move (dst_lo, src_lo);
+ }
+ else
+ {
+ aarch64_emit_move (dst_lo, src_lo);
+ aarch64_emit_move (dst_hi, src_hi);
+ }
+}
+
+bool
+aarch64_split_128bit_move_p (rtx dst, rtx src)
+{
+ return (! REG_P (src)
+ || ! (FP_REGNUM_P (REGNO (dst)) && FP_REGNUM_P (REGNO (src))));
+}
+
+/* Split a complex SIMD combine. */
+
+void
+aarch64_split_simd_combine (rtx dst, rtx src1, rtx src2)
+{
+ enum machine_mode src_mode = GET_MODE (src1);
+ enum machine_mode dst_mode = GET_MODE (dst);
+
+ gcc_assert (VECTOR_MODE_P (dst_mode));
+
+ if (REG_P (dst) && REG_P (src1) && REG_P (src2))
+ {
+ rtx (*gen) (rtx, rtx, rtx);
+
+ switch (src_mode)
+ {
+ case V8QImode:
+ gen = gen_aarch64_simd_combinev8qi;
+ break;
+ case V4HImode:
+ gen = gen_aarch64_simd_combinev4hi;
+ break;
+ case V2SImode:
+ gen = gen_aarch64_simd_combinev2si;
+ break;
+ case V2SFmode:
+ gen = gen_aarch64_simd_combinev2sf;
+ break;
+ case DImode:
+ gen = gen_aarch64_simd_combinedi;
+ break;
+ case DFmode:
+ gen = gen_aarch64_simd_combinedf;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ emit_insn (gen (dst, src1, src2));
+ return;
+ }
+}
+
+/* Split a complex SIMD move. */
+
+void
+aarch64_split_simd_move (rtx dst, rtx src)
+{
+ enum machine_mode src_mode = GET_MODE (src);
+ enum machine_mode dst_mode = GET_MODE (dst);
+
+ gcc_assert (VECTOR_MODE_P (dst_mode));
+
+ if (REG_P (dst) && REG_P (src))
+ {
+ rtx (*gen) (rtx, rtx);
+
+ gcc_assert (VECTOR_MODE_P (src_mode));
+
+ switch (src_mode)
+ {
+ case V16QImode:
+ gen = gen_aarch64_split_simd_movv16qi;
+ break;
+ case V8HImode:
+ gen = gen_aarch64_split_simd_movv8hi;
+ break;
+ case V4SImode:
+ gen = gen_aarch64_split_simd_movv4si;
+ break;
+ case V2DImode:
+ gen = gen_aarch64_split_simd_movv2di;
+ break;
+ case V4SFmode:
+ gen = gen_aarch64_split_simd_movv4sf;
+ break;
+ case V2DFmode:
+ gen = gen_aarch64_split_simd_movv2df;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ emit_insn (gen (dst, src));
+ return;
+ }
+}
+
+static rtx
+aarch64_force_temporary (enum machine_mode mode, rtx x, rtx value)
+{
+ if (can_create_pseudo_p ())
+ return force_reg (mode, value);
+ else
+ {
+ x = aarch64_emit_move (x, value);
+ return x;
+ }
+}
+
+
+static rtx
+aarch64_add_offset (enum machine_mode mode, rtx temp, rtx reg, HOST_WIDE_INT offset)
+{
+ if (!aarch64_plus_immediate (GEN_INT (offset), mode))
+ {
+ rtx high;
+ /* Load the full offset into a register. This
+ might be improvable in the future. */
+ high = GEN_INT (offset);
+ offset = 0;
+ high = aarch64_force_temporary (mode, temp, high);
+ reg = aarch64_force_temporary (mode, temp,
+ gen_rtx_PLUS (mode, high, reg));
+ }
+ return plus_constant (mode, reg, offset);
+}
+
+void
+aarch64_expand_mov_immediate (rtx dest, rtx imm)
+{
+ enum machine_mode mode = GET_MODE (dest);
+ unsigned HOST_WIDE_INT mask;
+ int i;
+ bool first;
+ unsigned HOST_WIDE_INT val;
+ bool subtargets;
+ rtx subtarget;
+ int one_match, zero_match;
+
+ gcc_assert (mode == SImode || mode == DImode);
+
+ /* Check on what type of symbol it is. */
+ if (GET_CODE (imm) == SYMBOL_REF
+ || GET_CODE (imm) == LABEL_REF
+ || GET_CODE (imm) == CONST)
+ {
+ rtx mem, base, offset;
+ enum aarch64_symbol_type sty;
+
+ /* If we have (const (plus symbol offset)), separate out the offset
+ before we start classifying the symbol. */
+ split_const (imm, &base, &offset);
+
+ sty = aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR);
+ switch (sty)
+ {
+ case SYMBOL_FORCE_TO_MEM:
+ if (offset != const0_rtx
+ && targetm.cannot_force_const_mem (mode, imm))
+ {
+ gcc_assert (can_create_pseudo_p ());
+ base = aarch64_force_temporary (mode, dest, base);
+ base = aarch64_add_offset (mode, NULL, base, INTVAL (offset));
+ aarch64_emit_move (dest, base);
+ return;
+ }
+ mem = force_const_mem (ptr_mode, imm);
+ gcc_assert (mem);
+ if (mode != ptr_mode)
+ mem = gen_rtx_ZERO_EXTEND (mode, mem);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, mem));
+ return;
+
+ case SYMBOL_SMALL_TLSGD:
+ case SYMBOL_SMALL_TLSDESC:
+ case SYMBOL_SMALL_GOTTPREL:
+ case SYMBOL_SMALL_GOT:
+ case SYMBOL_TINY_GOT:
+ if (offset != const0_rtx)
+ {
+ gcc_assert(can_create_pseudo_p ());
+ base = aarch64_force_temporary (mode, dest, base);
+ base = aarch64_add_offset (mode, NULL, base, INTVAL (offset));
+ aarch64_emit_move (dest, base);
+ return;
+ }
+ /* FALLTHRU */
+
+ case SYMBOL_SMALL_TPREL:
+ case SYMBOL_SMALL_ABSOLUTE:
+ case SYMBOL_TINY_ABSOLUTE:
+ aarch64_load_symref_appropriately (dest, imm, sty);
+ return;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ if (CONST_INT_P (imm) && aarch64_move_imm (INTVAL (imm), mode))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, dest, imm));
+ return;
+ }
+
+ if (!CONST_INT_P (imm))
+ {
+ if (GET_CODE (imm) == HIGH)
+ emit_insn (gen_rtx_SET (VOIDmode, dest, imm));
+ else
+ {
+ rtx mem = force_const_mem (mode, imm);
+ gcc_assert (mem);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, mem));
+ }
+
+ return;
+ }
+
+ if (mode == SImode)
+ {
+ /* We know we can't do this in 1 insn, and we must be able to do it
+ in two; so don't mess around looking for sequences that don't buy
+ us anything. */
+ emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (INTVAL (imm) & 0xffff)));
+ emit_insn (gen_insv_immsi (dest, GEN_INT (16),
+ GEN_INT ((INTVAL (imm) >> 16) & 0xffff)));
+ return;
+ }
+
+ /* Remaining cases are all for DImode. */
+
+ val = INTVAL (imm);
+ subtargets = optimize && can_create_pseudo_p ();
+
+ one_match = 0;
+ zero_match = 0;
+ mask = 0xffff;
+
+ for (i = 0; i < 64; i += 16, mask <<= 16)
+ {
+ if ((val & mask) == 0)
+ zero_match++;
+ else if ((val & mask) == mask)
+ one_match++;
+ }
+
+ if (one_match == 2)
+ {
+ mask = 0xffff;
+ for (i = 0; i < 64; i += 16, mask <<= 16)
+ {
+ if ((val & mask) != mask)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (val | mask)));
+ emit_insn (gen_insv_immdi (dest, GEN_INT (i),
+ GEN_INT ((val >> i) & 0xffff)));
+ return;
+ }
+ }
+ gcc_unreachable ();
+ }
+
+ if (zero_match == 2)
+ goto simple_sequence;
+
+ mask = 0x0ffff0000UL;
+ for (i = 16; i < 64; i += 16, mask <<= 16)
+ {
+ HOST_WIDE_INT comp = mask & ~(mask - 1);
+
+ if (aarch64_uimm12_shift (val - (val & mask)))
+ {
+ subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget, GEN_INT (val & mask)));
+ emit_insn (gen_adddi3 (dest, subtarget,
+ GEN_INT (val - (val & mask))));
+ return;
+ }
+ else if (aarch64_uimm12_shift (-(val - ((val + comp) & mask))))
+ {
+ subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+ GEN_INT ((val + comp) & mask)));
+ emit_insn (gen_adddi3 (dest, subtarget,
+ GEN_INT (val - ((val + comp) & mask))));
+ return;
+ }
+ else if (aarch64_uimm12_shift (val - ((val - comp) | ~mask)))
+ {
+ subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+ GEN_INT ((val - comp) | ~mask)));
+ emit_insn (gen_adddi3 (dest, subtarget,
+ GEN_INT (val - ((val - comp) | ~mask))));
+ return;
+ }
+ else if (aarch64_uimm12_shift (-(val - (val | ~mask))))
+ {
+ subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+ GEN_INT (val | ~mask)));
+ emit_insn (gen_adddi3 (dest, subtarget,
+ GEN_INT (val - (val | ~mask))));
+ return;
+ }
+ }
+
+ /* See if we can do it by arithmetically combining two
+ immediates. */
+ for (i = 0; i < AARCH64_NUM_BITMASKS; i++)
+ {
+ int j;
+ mask = 0xffff;
+
+ if (aarch64_uimm12_shift (val - aarch64_bitmasks[i])
+ || aarch64_uimm12_shift (-val + aarch64_bitmasks[i]))
+ {
+ subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+ GEN_INT (aarch64_bitmasks[i])));
+ emit_insn (gen_adddi3 (dest, subtarget,
+ GEN_INT (val - aarch64_bitmasks[i])));
+ return;
+ }
+
+ for (j = 0; j < 64; j += 16, mask <<= 16)
+ {
+ if ((aarch64_bitmasks[i] & ~mask) == (val & ~mask))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, dest,
+ GEN_INT (aarch64_bitmasks[i])));
+ emit_insn (gen_insv_immdi (dest, GEN_INT (j),
+ GEN_INT ((val >> j) & 0xffff)));
+ return;
+ }
+ }
+ }
+
+ /* See if we can do it by logically combining two immediates. */
+ for (i = 0; i < AARCH64_NUM_BITMASKS; i++)
+ {
+ if ((aarch64_bitmasks[i] & val) == aarch64_bitmasks[i])
+ {
+ int j;
+
+ for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++)
+ if (val == (aarch64_bitmasks[i] | aarch64_bitmasks[j]))
+ {
+ subtarget = subtargets ? gen_reg_rtx (mode) : dest;
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+ GEN_INT (aarch64_bitmasks[i])));
+ emit_insn (gen_iordi3 (dest, subtarget,
+ GEN_INT (aarch64_bitmasks[j])));
+ return;
+ }
+ }
+ else if ((val & aarch64_bitmasks[i]) == val)
+ {
+ int j;
+
+ for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++)
+ if (val == (aarch64_bitmasks[j] & aarch64_bitmasks[i]))
+ {
+
+ subtarget = subtargets ? gen_reg_rtx (mode) : dest;
+ emit_insn (gen_rtx_SET (VOIDmode, subtarget,
+ GEN_INT (aarch64_bitmasks[j])));
+ emit_insn (gen_anddi3 (dest, subtarget,
+ GEN_INT (aarch64_bitmasks[i])));
+ return;
+ }
+ }
+ }
+
+ simple_sequence:
+ first = true;
+ mask = 0xffff;
+ for (i = 0; i < 64; i += 16, mask <<= 16)
+ {
+ if ((val & mask) != 0)
+ {
+ if (first)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, dest,
+ GEN_INT (val & mask)));
+ first = false;
+ }
+ else
+ emit_insn (gen_insv_immdi (dest, GEN_INT (i),
+ GEN_INT ((val >> i) & 0xffff)));
+ }
+ }
+}
+
+static bool
+aarch64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
+{
+ /* Indirect calls are not currently supported. */
+ if (decl == NULL)
+ return false;
+
+ /* Cannot tail-call to long-calls, since these are outside of the
+ range of a branch instruction (we could handle this if we added
+ support for indirect tail-calls. */
+ if (aarch64_decl_is_long_call_p (decl))
+ return false;
+
+ return true;
+}
+
+/* Implement TARGET_PASS_BY_REFERENCE. */
+
+static bool
+aarch64_pass_by_reference (cumulative_args_t pcum ATTRIBUTE_UNUSED,
+ enum machine_mode mode,
+ const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ HOST_WIDE_INT size;
+ enum machine_mode dummymode;
+ int nregs;
+
+ /* GET_MODE_SIZE (BLKmode) is useless since it is 0. */
+ size = (mode == BLKmode && type)
+ ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
+
+ /* Aggregates are passed by reference based on their size. */
+ if (type && AGGREGATE_TYPE_P (type))
+ {
+ size = int_size_in_bytes (type);
+ }
+
+ /* Variable sized arguments are always returned by reference. */
+ if (size < 0)
+ return true;
+
+ /* Can this be a candidate to be passed in fp/simd register(s)? */
+ if (aarch64_vfp_is_call_or_return_candidate (mode, type,
+ &dummymode, &nregs,
+ NULL))
+ return false;
+
+ /* Arguments which are variable sized or larger than 2 registers are
+ passed by reference unless they are a homogenous floating point
+ aggregate. */
+ return size > 2 * UNITS_PER_WORD;
+}
+
+/* Return TRUE if VALTYPE is padded to its least significant bits. */
+static bool
+aarch64_return_in_msb (const_tree valtype)
+{
+ enum machine_mode dummy_mode;
+ int dummy_int;
+
+ /* Never happens in little-endian mode. */
+ if (!BYTES_BIG_ENDIAN)
+ return false;
+
+ /* Only composite types smaller than or equal to 16 bytes can
+ be potentially returned in registers. */
+ if (!aarch64_composite_type_p (valtype, TYPE_MODE (valtype))
+ || int_size_in_bytes (valtype) <= 0
+ || int_size_in_bytes (valtype) > 16)
+ return false;
+
+ /* But not a composite that is an HFA (Homogeneous Floating-point Aggregate)
+ or an HVA (Homogeneous Short-Vector Aggregate); such a special composite
+ is always passed/returned in the least significant bits of fp/simd
+ register(s). */
+ if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (valtype), valtype,
+ &dummy_mode, &dummy_int, NULL))
+ return false;
+
+ return true;
+}
+
+/* Implement TARGET_FUNCTION_VALUE.
+ Define how to find the value returned by a function. */
+
+static rtx
+aarch64_function_value (const_tree type, const_tree func,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ enum machine_mode mode;
+ int unsignedp;
+ int count;
+ enum machine_mode ag_mode;
+
+ mode = TYPE_MODE (type);
+ if (INTEGRAL_TYPE_P (type))
+ mode = promote_function_mode (type, mode, &unsignedp, func, 1);
+
+ if (aarch64_return_in_msb (type))
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (type);
+
+ if (size % UNITS_PER_WORD != 0)
+ {
+ size += UNITS_PER_WORD - size % UNITS_PER_WORD;
+ mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+ }
+ }
+
+ if (aarch64_vfp_is_call_or_return_candidate (mode, type,
+ &ag_mode, &count, NULL))
+ {
+ if (!aarch64_composite_type_p (type, mode))
+ {
+ gcc_assert (count == 1 && mode == ag_mode);
+ return gen_rtx_REG (mode, V0_REGNUM);
+ }
+ else
+ {
+ int i;
+ rtx par;
+
+ par = gen_rtx_PARALLEL (mode, rtvec_alloc (count));
+ for (i = 0; i < count; i++)
+ {
+ rtx tmp = gen_rtx_REG (ag_mode, V0_REGNUM + i);
+ tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
+ GEN_INT (i * GET_MODE_SIZE (ag_mode)));
+ XVECEXP (par, 0, i) = tmp;
+ }
+ return par;
+ }
+ }
+ else
+ return gen_rtx_REG (mode, R0_REGNUM);
+}
+
+/* Implements TARGET_FUNCTION_VALUE_REGNO_P.
+ Return true if REGNO is the number of a hard register in which the values
+ of called function may come back. */
+
+static bool
+aarch64_function_value_regno_p (const unsigned int regno)
+{
+ /* Maximum of 16 bytes can be returned in the general registers. Examples
+ of 16-byte return values are: 128-bit integers and 16-byte small
+ structures (excluding homogeneous floating-point aggregates). */
+ if (regno == R0_REGNUM || regno == R1_REGNUM)
+ return true;
+
+ /* Up to four fp/simd registers can return a function value, e.g. a
+ homogeneous floating-point aggregate having four members. */
+ if (regno >= V0_REGNUM && regno < V0_REGNUM + HA_MAX_NUM_FLDS)
+ return !TARGET_GENERAL_REGS_ONLY;
+
+ return false;
+}
+
+/* Implement TARGET_RETURN_IN_MEMORY.
+
+ If the type T of the result of a function is such that
+ void func (T arg)
+ would require that arg be passed as a value in a register (or set of
+ registers) according to the parameter passing rules, then the result
+ is returned in the same registers as would be used for such an
+ argument. */
+
+static bool
+aarch64_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
+{
+ HOST_WIDE_INT size;
+ enum machine_mode ag_mode;
+ int count;
+
+ if (!AGGREGATE_TYPE_P (type)
+ && TREE_CODE (type) != COMPLEX_TYPE
+ && TREE_CODE (type) != VECTOR_TYPE)
+ /* Simple scalar types always returned in registers. */
+ return false;
+
+ if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (type),
+ type,
+ &ag_mode,
+ &count,
+ NULL))
+ return false;
+
+ /* Types larger than 2 registers returned in memory. */
+ size = int_size_in_bytes (type);
+ return (size < 0 || size > 2 * UNITS_PER_WORD);
+}
+
+static bool
+aarch64_vfp_is_call_candidate (cumulative_args_t pcum_v, enum machine_mode mode,
+ const_tree type, int *nregs)
+{
+ CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
+ return aarch64_vfp_is_call_or_return_candidate (mode,
+ type,
+ &pcum->aapcs_vfp_rmode,
+ nregs,
+ NULL);
+}
+
+/* Given MODE and TYPE of a function argument, return the alignment in
+ bits. The idea is to suppress any stronger alignment requested by
+ the user and opt for the natural alignment (specified in AAPCS64 \S 4.1).
+ This is a helper function for local use only. */
+
+static unsigned int
+aarch64_function_arg_alignment (enum machine_mode mode, const_tree type)
+{
+ unsigned int alignment;
+
+ if (type)
+ {
+ if (!integer_zerop (TYPE_SIZE (type)))
+ {
+ if (TYPE_MODE (type) == mode)
+ alignment = TYPE_ALIGN (type);
+ else
+ alignment = GET_MODE_ALIGNMENT (mode);
+ }
+ else
+ alignment = 0;
+ }
+ else
+ alignment = GET_MODE_ALIGNMENT (mode);
+
+ return alignment;
+}
+
+/* Layout a function argument according to the AAPCS64 rules. The rule
+ numbers refer to the rule numbers in the AAPCS64. */
+
+static void
+aarch64_layout_arg (cumulative_args_t pcum_v, enum machine_mode mode,
+ const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
+ int ncrn, nvrn, nregs;
+ bool allocate_ncrn, allocate_nvrn;
+
+ /* We need to do this once per argument. */
+ if (pcum->aapcs_arg_processed)
+ return;
+
+ pcum->aapcs_arg_processed = true;
+
+ allocate_ncrn = (type) ? !(FLOAT_TYPE_P (type)) : !FLOAT_MODE_P (mode);
+ allocate_nvrn = aarch64_vfp_is_call_candidate (pcum_v,
+ mode,
+ type,
+ &nregs);
+
+ /* allocate_ncrn may be false-positive, but allocate_nvrn is quite reliable.
+ The following code thus handles passing by SIMD/FP registers first. */
+
+ nvrn = pcum->aapcs_nvrn;
+
+ /* C1 - C5 for floating point, homogenous floating point aggregates (HFA)
+ and homogenous short-vector aggregates (HVA). */
+ if (allocate_nvrn)
+ {
+ if (nvrn + nregs <= NUM_FP_ARG_REGS)
+ {
+ pcum->aapcs_nextnvrn = nvrn + nregs;
+ if (!aarch64_composite_type_p (type, mode))
+ {
+ gcc_assert (nregs == 1);
+ pcum->aapcs_reg = gen_rtx_REG (mode, V0_REGNUM + nvrn);
+ }
+ else
+ {
+ rtx par;
+ int i;
+ par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
+ for (i = 0; i < nregs; i++)
+ {
+ rtx tmp = gen_rtx_REG (pcum->aapcs_vfp_rmode,
+ V0_REGNUM + nvrn + i);
+ tmp = gen_rtx_EXPR_LIST
+ (VOIDmode, tmp,
+ GEN_INT (i * GET_MODE_SIZE (pcum->aapcs_vfp_rmode)));
+ XVECEXP (par, 0, i) = tmp;
+ }
+ pcum->aapcs_reg = par;
+ }
+ return;
+ }
+ else
+ {
+ /* C.3 NSRN is set to 8. */
+ pcum->aapcs_nextnvrn = NUM_FP_ARG_REGS;
+ goto on_stack;
+ }
+ }
+
+ ncrn = pcum->aapcs_ncrn;
+ nregs = ((type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode))
+ + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+
+ /* C6 - C9. though the sign and zero extension semantics are
+ handled elsewhere. This is the case where the argument fits
+ entirely general registers. */
+ if (allocate_ncrn && (ncrn + nregs <= NUM_ARG_REGS))
+ {
+ unsigned int alignment = aarch64_function_arg_alignment (mode, type);
+
+ gcc_assert (nregs == 0 || nregs == 1 || nregs == 2);
+
+ /* C.8 if the argument has an alignment of 16 then the NGRN is
+ rounded up to the next even number. */
+ if (nregs == 2 && alignment == 16 * BITS_PER_UNIT && ncrn % 2)
+ {
+ ++ncrn;
+ gcc_assert (ncrn + nregs <= NUM_ARG_REGS);
+ }
+ /* NREGS can be 0 when e.g. an empty structure is to be passed.
+ A reg is still generated for it, but the caller should be smart
+ enough not to use it. */
+ if (nregs == 0 || nregs == 1 || GET_MODE_CLASS (mode) == MODE_INT)
+ {
+ pcum->aapcs_reg = gen_rtx_REG (mode, R0_REGNUM + ncrn);
+ }
+ else
+ {
+ rtx par;
+ int i;
+
+ par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
+ for (i = 0; i < nregs; i++)
+ {
+ rtx tmp = gen_rtx_REG (word_mode, R0_REGNUM + ncrn + i);
+ tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
+ GEN_INT (i * UNITS_PER_WORD));
+ XVECEXP (par, 0, i) = tmp;
+ }
+ pcum->aapcs_reg = par;
+ }
+
+ pcum->aapcs_nextncrn = ncrn + nregs;
+ return;
+ }
+
+ /* C.11 */
+ pcum->aapcs_nextncrn = NUM_ARG_REGS;
+
+ /* The argument is passed on stack; record the needed number of words for
+ this argument (we can re-use NREGS) and align the total size if
+ necessary. */
+on_stack:
+ pcum->aapcs_stack_words = nregs;
+ if (aarch64_function_arg_alignment (mode, type) == 16 * BITS_PER_UNIT)
+ pcum->aapcs_stack_size = AARCH64_ROUND_UP (pcum->aapcs_stack_size,
+ 16 / UNITS_PER_WORD) + 1;
+ return;
+}
+
+/* Implement TARGET_FUNCTION_ARG. */
+
+static rtx
+aarch64_function_arg (cumulative_args_t pcum_v, enum machine_mode mode,
+ const_tree type, bool named)
+{
+ CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
+ gcc_assert (pcum->pcs_variant == ARM_PCS_AAPCS64);
+
+ if (mode == VOIDmode)
+ return NULL_RTX;
+
+ aarch64_layout_arg (pcum_v, mode, type, named);
+ return pcum->aapcs_reg;
+}
+
+void
+aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
+ const_tree fntype ATTRIBUTE_UNUSED,
+ rtx libname ATTRIBUTE_UNUSED,
+ const_tree fndecl ATTRIBUTE_UNUSED,
+ unsigned n_named ATTRIBUTE_UNUSED)
+{
+ pcum->aapcs_ncrn = 0;
+ pcum->aapcs_nvrn = 0;
+ pcum->aapcs_nextncrn = 0;
+ pcum->aapcs_nextnvrn = 0;
+ pcum->pcs_variant = ARM_PCS_AAPCS64;
+ pcum->aapcs_reg = NULL_RTX;
+ pcum->aapcs_arg_processed = false;
+ pcum->aapcs_stack_words = 0;
+ pcum->aapcs_stack_size = 0;
+
+ return;
+}
+
+static void
+aarch64_function_arg_advance (cumulative_args_t pcum_v,
+ enum machine_mode mode,
+ const_tree type,
+ bool named)
+{
+ CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
+ if (pcum->pcs_variant == ARM_PCS_AAPCS64)
+ {
+ aarch64_layout_arg (pcum_v, mode, type, named);
+ gcc_assert ((pcum->aapcs_reg != NULL_RTX)
+ != (pcum->aapcs_stack_words != 0));
+ pcum->aapcs_arg_processed = false;
+ pcum->aapcs_ncrn = pcum->aapcs_nextncrn;
+ pcum->aapcs_nvrn = pcum->aapcs_nextnvrn;
+ pcum->aapcs_stack_size += pcum->aapcs_stack_words;
+ pcum->aapcs_stack_words = 0;
+ pcum->aapcs_reg = NULL_RTX;
+ }
+}
+
+bool
+aarch64_function_arg_regno_p (unsigned regno)
+{
+ return ((GP_REGNUM_P (regno) && regno < R0_REGNUM + NUM_ARG_REGS)
+ || (FP_REGNUM_P (regno) && regno < V0_REGNUM + NUM_FP_ARG_REGS));
+}
+
+/* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
+ PARM_BOUNDARY bits of alignment, but will be given anything up
+ to STACK_BOUNDARY bits if the type requires it. This makes sure
+ that both before and after the layout of each argument, the Next
+ Stacked Argument Address (NSAA) will have a minimum alignment of
+ 8 bytes. */
+
+static unsigned int
+aarch64_function_arg_boundary (enum machine_mode mode, const_tree type)
+{
+ unsigned int alignment = aarch64_function_arg_alignment (mode, type);
+
+ if (alignment < PARM_BOUNDARY)
+ alignment = PARM_BOUNDARY;
+ if (alignment > STACK_BOUNDARY)
+ alignment = STACK_BOUNDARY;
+ return alignment;
+}
+
+/* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
+
+ Return true if an argument passed on the stack should be padded upwards,
+ i.e. if the least-significant byte of the stack slot has useful data.
+
+ Small aggregate types are placed in the lowest memory address.
+
+ The related parameter passing rules are B.4, C.3, C.5 and C.14. */
+
+bool
+aarch64_pad_arg_upward (enum machine_mode mode, const_tree type)
+{
+ /* On little-endian targets, the least significant byte of every stack
+ argument is passed at the lowest byte address of the stack slot. */
+ if (!BYTES_BIG_ENDIAN)
+ return true;
+
+ /* Otherwise, integral, floating-point and pointer types are padded downward:
+ the least significant byte of a stack argument is passed at the highest
+ byte address of the stack slot. */
+ if (type
+ ? (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type)
+ || POINTER_TYPE_P (type))
+ : (SCALAR_INT_MODE_P (mode) || SCALAR_FLOAT_MODE_P (mode)))
+ return false;
+
+ /* Everything else padded upward, i.e. data in first byte of stack slot. */
+ return true;
+}
+
+/* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
+
+ It specifies padding for the last (may also be the only)
+ element of a block move between registers and memory. If
+ assuming the block is in the memory, padding upward means that
+ the last element is padded after its highest significant byte,
+ while in downward padding, the last element is padded at the
+ its least significant byte side.
+
+ Small aggregates and small complex types are always padded
+ upwards.
+
+ We don't need to worry about homogeneous floating-point or
+ short-vector aggregates; their move is not affected by the
+ padding direction determined here. Regardless of endianness,
+ each element of such an aggregate is put in the least
+ significant bits of a fp/simd register.
+
+ Return !BYTES_BIG_ENDIAN if the least significant byte of the
+ register has useful data, and return the opposite if the most
+ significant byte does. */
+
+bool
+aarch64_pad_reg_upward (enum machine_mode mode, const_tree type,
+ bool first ATTRIBUTE_UNUSED)
+{
+
+ /* Small composite types are always padded upward. */
+ if (BYTES_BIG_ENDIAN && aarch64_composite_type_p (type, mode))
+ {
+ HOST_WIDE_INT size = (type ? int_size_in_bytes (type)
+ : GET_MODE_SIZE (mode));
+ if (size < 2 * UNITS_PER_WORD)
+ return true;
+ }
+
+ /* Otherwise, use the default padding. */
+ return !BYTES_BIG_ENDIAN;
+}
+
+static enum machine_mode
+aarch64_libgcc_cmp_return_mode (void)
+{
+ return SImode;
+}
+
+static bool
+aarch64_frame_pointer_required (void)
+{
+ /* If the function contains dynamic stack allocations, we need to
+ use the frame pointer to access the static parts of the frame. */
+ if (cfun->calls_alloca)
+ return true;
+
+ /* We may have turned flag_omit_frame_pointer on in order to have this
+ function called; if we did, we also set the 'faked_omit_frame_pointer' flag
+ and we'll check it here.
+ If we really did set flag_omit_frame_pointer normally, then we return false
+ (no frame pointer required) in all cases. */
+
+ if (flag_omit_frame_pointer && !faked_omit_frame_pointer)
+ return false;
+ else if (flag_omit_leaf_frame_pointer)
+ return !crtl->is_leaf || df_regs_ever_live_p (LR_REGNUM);
+ return true;
+}
+
+/* Mark the registers that need to be saved by the callee and calculate
+ the size of the callee-saved registers area and frame record (both FP
+ and LR may be omitted). */
+static void
+aarch64_layout_frame (void)
+{
+ HOST_WIDE_INT offset = 0;
+ int regno;
+
+ if (reload_completed && cfun->machine->frame.laid_out)
+ return;
+
+ cfun->machine->frame.fp_lr_offset = 0;
+
+ /* First mark all the registers that really need to be saved... */
+ for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
+ cfun->machine->frame.reg_offset[regno] = -1;
+
+ for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+ cfun->machine->frame.reg_offset[regno] = -1;
+
+ /* ... that includes the eh data registers (if needed)... */
+ if (crtl->calls_eh_return)
+ for (regno = 0; EH_RETURN_DATA_REGNO (regno) != INVALID_REGNUM; regno++)
+ cfun->machine->frame.reg_offset[EH_RETURN_DATA_REGNO (regno)] = 0;
+
+ /* ... and any callee saved register that dataflow says is live. */
+ for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
+ if (df_regs_ever_live_p (regno)
+ && !call_used_regs[regno])
+ cfun->machine->frame.reg_offset[regno] = 0;
+
+ for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+ if (df_regs_ever_live_p (regno)
+ && !call_used_regs[regno])
+ cfun->machine->frame.reg_offset[regno] = 0;
+
+ if (frame_pointer_needed)
+ {
+ cfun->machine->frame.reg_offset[R30_REGNUM] = 0;
+ cfun->machine->frame.reg_offset[R29_REGNUM] = 0;
+ cfun->machine->frame.hardfp_offset = 2 * UNITS_PER_WORD;
+ }
+
+ /* Now assign stack slots for them. */
+ for (regno = R0_REGNUM; regno <= R28_REGNUM; regno++)
+ if (cfun->machine->frame.reg_offset[regno] != -1)
+ {
+ cfun->machine->frame.reg_offset[regno] = offset;
+ offset += UNITS_PER_WORD;
+ }
+
+ for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+ if (cfun->machine->frame.reg_offset[regno] != -1)
+ {
+ cfun->machine->frame.reg_offset[regno] = offset;
+ offset += UNITS_PER_WORD;
+ }
+
+ if (frame_pointer_needed)
+ {
+ cfun->machine->frame.reg_offset[R29_REGNUM] = offset;
+ offset += UNITS_PER_WORD;
+ cfun->machine->frame.fp_lr_offset = UNITS_PER_WORD;
+ }
+
+ if (cfun->machine->frame.reg_offset[R30_REGNUM] != -1)
+ {
+ cfun->machine->frame.reg_offset[R30_REGNUM] = offset;
+ offset += UNITS_PER_WORD;
+ cfun->machine->frame.fp_lr_offset += UNITS_PER_WORD;
+ }
+
+ cfun->machine->frame.padding0 =
+ (AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT) - offset);
+ offset = AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+
+ cfun->machine->frame.saved_regs_size = offset;
+ cfun->machine->frame.laid_out = true;
+}
+
+/* Make the last instruction frame-related and note that it performs
+ the operation described by FRAME_PATTERN. */
+
+static void
+aarch64_set_frame_expr (rtx frame_pattern)
+{
+ rtx insn;
+
+ insn = get_last_insn ();
+ RTX_FRAME_RELATED_P (insn) = 1;
+ RTX_FRAME_RELATED_P (frame_pattern) = 1;
+ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ frame_pattern,
+ REG_NOTES (insn));
+}
+
+static bool
+aarch64_register_saved_on_entry (int regno)
+{
+ return cfun->machine->frame.reg_offset[regno] != -1;
+}
+
+
+static void
+aarch64_save_or_restore_fprs (int start_offset, int increment,
+ bool restore, rtx base_rtx)
+
+{
+ unsigned regno;
+ unsigned regno2;
+ rtx insn;
+ rtx (*gen_mem_ref)(enum machine_mode, rtx)
+ = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
+
+
+ for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
+ {
+ if (aarch64_register_saved_on_entry (regno))
+ {
+ rtx mem;
+ mem = gen_mem_ref (DFmode,
+ plus_constant (Pmode,
+ base_rtx,
+ start_offset));
+
+ for (regno2 = regno + 1;
+ regno2 <= V31_REGNUM
+ && !aarch64_register_saved_on_entry (regno2);
+ regno2++)
+ {
+ /* Empty loop. */
+ }
+ if (regno2 <= V31_REGNUM &&
+ aarch64_register_saved_on_entry (regno2))
+ {
+ rtx mem2;
+ /* Next highest register to be saved. */
+ mem2 = gen_mem_ref (DFmode,
+ plus_constant
+ (Pmode,
+ base_rtx,
+ start_offset + increment));
+ if (restore == false)
+ {
+ insn = emit_insn
+ ( gen_store_pairdf (mem, gen_rtx_REG (DFmode, regno),
+ mem2, gen_rtx_REG (DFmode, regno2)));
+
+ }
+ else
+ {
+ insn = emit_insn
+ ( gen_load_pairdf (gen_rtx_REG (DFmode, regno), mem,
+ gen_rtx_REG (DFmode, regno2), mem2));
+
+ add_reg_note (insn, REG_CFA_RESTORE,
+ gen_rtx_REG (DFmode, regno));
+ add_reg_note (insn, REG_CFA_RESTORE,
+ gen_rtx_REG (DFmode, regno2));
+ }
+
+ /* The first part of a frame-related parallel insn
+ is always assumed to be relevant to the frame
+ calculations; subsequent parts, are only
+ frame-related if explicitly marked. */
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
+ regno = regno2;
+ start_offset += increment * 2;
+ }
+ else
+ {
+ if (restore == false)
+ insn = emit_move_insn (mem, gen_rtx_REG (DFmode, regno));
+ else
+ {
+ insn = emit_move_insn (gen_rtx_REG (DFmode, regno), mem);
+ add_reg_note (insn, REG_CFA_RESTORE,
+ gen_rtx_REG (DImode, regno));
+ }
+ start_offset += increment;
+ }
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+
+}
+
+
+/* offset from the stack pointer of where the saves and
+ restore's have to happen. */
+static void
+aarch64_save_or_restore_callee_save_registers (HOST_WIDE_INT offset,
+ bool restore)
+{
+ rtx insn;
+ rtx base_rtx = stack_pointer_rtx;
+ HOST_WIDE_INT start_offset = offset;
+ HOST_WIDE_INT increment = UNITS_PER_WORD;
+ rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
+ unsigned limit = (frame_pointer_needed)? R28_REGNUM: R30_REGNUM;
+ unsigned regno;
+ unsigned regno2;
+
+ for (regno = R0_REGNUM; regno <= limit; regno++)
+ {
+ if (aarch64_register_saved_on_entry (regno))
+ {
+ rtx mem;
+ mem = gen_mem_ref (Pmode,
+ plus_constant (Pmode,
+ base_rtx,
+ start_offset));
+
+ for (regno2 = regno + 1;
+ regno2 <= limit
+ && !aarch64_register_saved_on_entry (regno2);
+ regno2++)
+ {
+ /* Empty loop. */
+ }
+ if (regno2 <= limit &&
+ aarch64_register_saved_on_entry (regno2))
+ {
+ rtx mem2;
+ /* Next highest register to be saved. */
+ mem2 = gen_mem_ref (Pmode,
+ plus_constant
+ (Pmode,
+ base_rtx,
+ start_offset + increment));
+ if (restore == false)
+ {
+ insn = emit_insn
+ ( gen_store_pairdi (mem, gen_rtx_REG (DImode, regno),
+ mem2, gen_rtx_REG (DImode, regno2)));
+
+ }
+ else
+ {
+ insn = emit_insn
+ ( gen_load_pairdi (gen_rtx_REG (DImode, regno), mem,
+ gen_rtx_REG (DImode, regno2), mem2));
+
+ add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
+ add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno2));
+ }
+
+ /* The first part of a frame-related parallel insn
+ is always assumed to be relevant to the frame
+ calculations; subsequent parts, are only
+ frame-related if explicitly marked. */
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0,
+ 1)) = 1;
+ regno = regno2;
+ start_offset += increment * 2;
+ }
+ else
+ {
+ if (restore == false)
+ insn = emit_move_insn (mem, gen_rtx_REG (DImode, regno));
+ else
+ {
+ insn = emit_move_insn (gen_rtx_REG (DImode, regno), mem);
+ add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
+ }
+ start_offset += increment;
+ }
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+
+ aarch64_save_or_restore_fprs (start_offset, increment, restore, base_rtx);
+
+}
+
+/* AArch64 stack frames generated by this compiler look like:
+
+ +-------------------------------+
+ | |
+ | incoming stack arguments |
+ | |
+ +-------------------------------+ <-- arg_pointer_rtx
+ | |
+ | callee-allocated save area |
+ | for register varargs |
+ | |
+ +-------------------------------+ <-- frame_pointer_rtx
+ | |
+ | local variables |
+ | |
+ +-------------------------------+
+ | padding0 | \
+ +-------------------------------+ |
+ | | |
+ | | |
+ | callee-saved registers | | frame.saved_regs_size
+ | | |
+ +-------------------------------+ |
+ | LR' | |
+ +-------------------------------+ |
+ | FP' | /
+ P +-------------------------------+ <-- hard_frame_pointer_rtx
+ | dynamic allocation |
+ +-------------------------------+
+ | |
+ | outgoing stack arguments |
+ | |
+ +-------------------------------+ <-- stack_pointer_rtx
+
+ Dynamic stack allocations such as alloca insert data at point P.
+ They decrease stack_pointer_rtx but leave frame_pointer_rtx and
+ hard_frame_pointer_rtx unchanged. */
+
+/* Generate the prologue instructions for entry into a function.
+ Establish the stack frame by decreasing the stack pointer with a
+ properly calculated size and, if necessary, create a frame record
+ filled with the values of LR and previous frame pointer. The
+ current FP is also set up if it is in use. */
+
+void
+aarch64_expand_prologue (void)
+{
+ /* sub sp, sp, #<frame_size>
+ stp {fp, lr}, [sp, #<frame_size> - 16]
+ add fp, sp, #<frame_size> - hardfp_offset
+ stp {cs_reg}, [fp, #-16] etc.
+
+ sub sp, sp, <final_adjustment_if_any>
+ */
+ HOST_WIDE_INT original_frame_size; /* local variables + vararg save */
+ HOST_WIDE_INT frame_size, offset;
+ HOST_WIDE_INT fp_offset; /* FP offset from SP */
+ rtx insn;
+
+ aarch64_layout_frame ();
+ original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
+ gcc_assert ((!cfun->machine->saved_varargs_size || cfun->stdarg)
+ && (cfun->stdarg || !cfun->machine->saved_varargs_size));
+ frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
+ + crtl->outgoing_args_size);
+ offset = frame_size = AARCH64_ROUND_UP (frame_size,
+ STACK_BOUNDARY / BITS_PER_UNIT);
+
+ if (flag_stack_usage_info)
+ current_function_static_stack_size = frame_size;
+
+ fp_offset = (offset
+ - original_frame_size
+ - cfun->machine->frame.saved_regs_size);
+
+ /* Store pairs and load pairs have a range only -512 to 504. */
+ if (offset >= 512)
+ {
+ /* When the frame has a large size, an initial decrease is done on
+ the stack pointer to jump over the callee-allocated save area for
+ register varargs, the local variable area and/or the callee-saved
+ register area. This will allow the pre-index write-back
+ store pair instructions to be used for setting up the stack frame
+ efficiently. */
+ offset = original_frame_size + cfun->machine->frame.saved_regs_size;
+ if (offset >= 512)
+ offset = cfun->machine->frame.saved_regs_size;
+
+ frame_size -= (offset + crtl->outgoing_args_size);
+ fp_offset = 0;
+
+ if (frame_size >= 0x1000000)
+ {
+ rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM);
+ emit_move_insn (op0, GEN_INT (-frame_size));
+ emit_insn (gen_add2_insn (stack_pointer_rtx, op0));
+ aarch64_set_frame_expr (gen_rtx_SET
+ (Pmode, stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ -frame_size)));
+ }
+ else if (frame_size > 0)
+ {
+ if ((frame_size & 0xfff) != frame_size)
+ {
+ insn = emit_insn (gen_add2_insn
+ (stack_pointer_rtx,
+ GEN_INT (-(frame_size
+ & ~(HOST_WIDE_INT)0xfff))));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ if ((frame_size & 0xfff) != 0)
+ {
+ insn = emit_insn (gen_add2_insn
+ (stack_pointer_rtx,
+ GEN_INT (-(frame_size
+ & (HOST_WIDE_INT)0xfff))));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+ }
+ else
+ frame_size = -1;
+
+ if (offset > 0)
+ {
+ /* Save the frame pointer and lr if the frame pointer is needed
+ first. Make the frame pointer point to the location of the
+ old frame pointer on the stack. */
+ if (frame_pointer_needed)
+ {
+ rtx mem_fp, mem_lr;
+
+ if (fp_offset)
+ {
+ insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
+ GEN_INT (-offset)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ aarch64_set_frame_expr (gen_rtx_SET
+ (Pmode, stack_pointer_rtx,
+ gen_rtx_MINUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (offset))));
+ mem_fp = gen_frame_mem (DImode,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ fp_offset));
+ mem_lr = gen_frame_mem (DImode,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ fp_offset
+ + UNITS_PER_WORD));
+ insn = emit_insn (gen_store_pairdi (mem_fp,
+ hard_frame_pointer_rtx,
+ mem_lr,
+ gen_rtx_REG (DImode,
+ LR_REGNUM)));
+ }
+ else
+ {
+ insn = emit_insn (gen_storewb_pairdi_di
+ (stack_pointer_rtx, stack_pointer_rtx,
+ hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, LR_REGNUM),
+ GEN_INT (-offset),
+ GEN_INT (GET_MODE_SIZE (DImode) - offset)));
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1;
+ }
+
+ /* The first part of a frame-related parallel insn is always
+ assumed to be relevant to the frame calculations;
+ subsequent parts, are only frame-related if explicitly
+ marked. */
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* Set up frame pointer to point to the location of the
+ previous frame pointer on the stack. */
+ insn = emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (fp_offset)));
+ aarch64_set_frame_expr (gen_rtx_SET
+ (Pmode, hard_frame_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ fp_offset)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
+ hard_frame_pointer_rtx));
+ }
+ else
+ {
+ insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
+ GEN_INT (-offset)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ aarch64_save_or_restore_callee_save_registers
+ (fp_offset + cfun->machine->frame.hardfp_offset, 0);
+ }
+
+ /* when offset >= 512,
+ sub sp, sp, #<outgoing_args_size> */
+ if (frame_size > -1)
+ {
+ if (crtl->outgoing_args_size > 0)
+ {
+ insn = emit_insn (gen_add2_insn
+ (stack_pointer_rtx,
+ GEN_INT (- crtl->outgoing_args_size)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+}
+
+/* Generate the epilogue instructions for returning from a function. */
+void
+aarch64_expand_epilogue (bool for_sibcall)
+{
+ HOST_WIDE_INT original_frame_size, frame_size, offset;
+ HOST_WIDE_INT fp_offset;
+ rtx insn;
+ rtx cfa_reg;
+
+ aarch64_layout_frame ();
+ original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
+ frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
+ + crtl->outgoing_args_size);
+ offset = frame_size = AARCH64_ROUND_UP (frame_size,
+ STACK_BOUNDARY / BITS_PER_UNIT);
+
+ fp_offset = (offset
+ - original_frame_size
+ - cfun->machine->frame.saved_regs_size);
+
+ cfa_reg = frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx;
+
+ /* Store pairs and load pairs have a range only -512 to 504. */
+ if (offset >= 512)
+ {
+ offset = original_frame_size + cfun->machine->frame.saved_regs_size;
+ if (offset >= 512)
+ offset = cfun->machine->frame.saved_regs_size;
+
+ frame_size -= (offset + crtl->outgoing_args_size);
+ fp_offset = 0;
+ if (!frame_pointer_needed && crtl->outgoing_args_size > 0)
+ {
+ insn = emit_insn (gen_add2_insn
+ (stack_pointer_rtx,
+ GEN_INT (crtl->outgoing_args_size)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+ else
+ frame_size = -1;
+
+ /* If there were outgoing arguments or we've done dynamic stack
+ allocation, then restore the stack pointer from the frame
+ pointer. This is at most one insn and more efficient than using
+ GCC's internal mechanism. */
+ if (frame_pointer_needed
+ && (crtl->outgoing_args_size || cfun->calls_alloca))
+ {
+ insn = emit_insn (gen_add3_insn (stack_pointer_rtx,
+ hard_frame_pointer_rtx,
+ GEN_INT (- fp_offset)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ /* As SP is set to (FP - fp_offset), according to the rules in
+ dwarf2cfi.c:dwarf2out_frame_debug_expr, CFA should be calculated
+ from the value of SP from now on. */
+ cfa_reg = stack_pointer_rtx;
+ }
+
+ aarch64_save_or_restore_callee_save_registers
+ (fp_offset + cfun->machine->frame.hardfp_offset, 1);
+
+ /* Restore the frame pointer and lr if the frame pointer is needed. */
+ if (offset > 0)
+ {
+ if (frame_pointer_needed)
+ {
+ rtx mem_fp, mem_lr;
+
+ if (fp_offset)
+ {
+ mem_fp = gen_frame_mem (DImode,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ fp_offset));
+ mem_lr = gen_frame_mem (DImode,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ fp_offset
+ + UNITS_PER_WORD));
+ insn = emit_insn (gen_load_pairdi (hard_frame_pointer_rtx,
+ mem_fp,
+ gen_rtx_REG (DImode,
+ LR_REGNUM),
+ mem_lr));
+ }
+ else
+ {
+ insn = emit_insn (gen_loadwb_pairdi_di
+ (stack_pointer_rtx,
+ stack_pointer_rtx,
+ hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, LR_REGNUM),
+ GEN_INT (offset),
+ GEN_INT (GET_MODE_SIZE (DImode) + offset)));
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1;
+ add_reg_note (insn, REG_CFA_ADJUST_CFA,
+ (gen_rtx_SET (Pmode, stack_pointer_rtx,
+ plus_constant (Pmode, cfa_reg,
+ offset))));
+ }
+
+ /* The first part of a frame-related parallel insn
+ is always assumed to be relevant to the frame
+ calculations; subsequent parts, are only
+ frame-related if explicitly marked. */
+ RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
+ RTX_FRAME_RELATED_P (insn) = 1;
+ add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
+ add_reg_note (insn, REG_CFA_RESTORE,
+ gen_rtx_REG (DImode, LR_REGNUM));
+
+ if (fp_offset)
+ {
+ insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
+ GEN_INT (offset)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+ else
+ {
+ insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
+ GEN_INT (offset)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+
+ /* Stack adjustment for exception handler. */
+ if (crtl->calls_eh_return)
+ {
+ /* We need to unwind the stack by the offset computed by
+ EH_RETURN_STACKADJ_RTX. However, at this point the CFA is
+ based on SP. Ideally we would update the SP and define the
+ CFA along the lines of:
+
+ SP = SP + EH_RETURN_STACKADJ_RTX
+ (regnote CFA = SP - EH_RETURN_STACKADJ_RTX)
+
+ However the dwarf emitter only understands a constant
+ register offset.
+
+ The solution chosen here is to use the otherwise unused IP0
+ as a temporary register to hold the current SP value. The
+ CFA is described using IP0 then SP is modified. */
+
+ rtx ip0 = gen_rtx_REG (DImode, IP0_REGNUM);
+
+ insn = emit_move_insn (ip0, stack_pointer_rtx);
+ add_reg_note (insn, REG_CFA_DEF_CFA, ip0);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ emit_insn (gen_add2_insn (stack_pointer_rtx, EH_RETURN_STACKADJ_RTX));
+
+ /* Ensure the assignment to IP0 does not get optimized away. */
+ emit_use (ip0);
+ }
+
+ if (frame_size > -1)
+ {
+ if (frame_size >= 0x1000000)
+ {
+ rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM);
+ emit_move_insn (op0, GEN_INT (frame_size));
+ emit_insn (gen_add2_insn (stack_pointer_rtx, op0));
+ aarch64_set_frame_expr (gen_rtx_SET
+ (Pmode, stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ frame_size)));
+ }
+ else if (frame_size > 0)
+ {
+ if ((frame_size & 0xfff) != 0)
+ {
+ insn = emit_insn (gen_add2_insn
+ (stack_pointer_rtx,
+ GEN_INT ((frame_size
+ & (HOST_WIDE_INT) 0xfff))));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ if ((frame_size & 0xfff) != frame_size)
+ {
+ insn = emit_insn (gen_add2_insn
+ (stack_pointer_rtx,
+ GEN_INT ((frame_size
+ & ~ (HOST_WIDE_INT) 0xfff))));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+
+ aarch64_set_frame_expr (gen_rtx_SET (Pmode, stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ offset)));
+ }
+
+ emit_use (gen_rtx_REG (DImode, LR_REGNUM));
+ if (!for_sibcall)
+ emit_jump_insn (ret_rtx);
+}
+
+/* Return the place to copy the exception unwinding return address to.
+ This will probably be a stack slot, but could (in theory be the
+ return register). */
+rtx
+aarch64_final_eh_return_addr (void)
+{
+ HOST_WIDE_INT original_frame_size, frame_size, offset, fp_offset;
+ aarch64_layout_frame ();
+ original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
+ frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
+ + crtl->outgoing_args_size);
+ offset = frame_size = AARCH64_ROUND_UP (frame_size,
+ STACK_BOUNDARY / BITS_PER_UNIT);
+ fp_offset = offset
+ - original_frame_size
+ - cfun->machine->frame.saved_regs_size;
+
+ if (cfun->machine->frame.reg_offset[LR_REGNUM] < 0)
+ return gen_rtx_REG (DImode, LR_REGNUM);
+
+ /* DSE and CSELIB do not detect an alias between sp+k1 and fp+k2. This can
+ result in a store to save LR introduced by builtin_eh_return () being
+ incorrectly deleted because the alias is not detected.
+ So in the calculation of the address to copy the exception unwinding
+ return address to, we note 2 cases.
+ If FP is needed and the fp_offset is 0, it means that SP = FP and hence
+ we return a SP-relative location since all the addresses are SP-relative
+ in this case. This prevents the store from being optimized away.
+ If the fp_offset is not 0, then the addresses will be FP-relative and
+ therefore we return a FP-relative location. */
+
+ if (frame_pointer_needed)
+ {
+ if (fp_offset)
+ return gen_frame_mem (DImode,
+ plus_constant (Pmode, hard_frame_pointer_rtx, UNITS_PER_WORD));
+ else
+ return gen_frame_mem (DImode,
+ plus_constant (Pmode, stack_pointer_rtx, UNITS_PER_WORD));
+ }
+
+ /* If FP is not needed, we calculate the location of LR, which would be
+ at the top of the saved registers block. */
+
+ return gen_frame_mem (DImode,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ fp_offset
+ + cfun->machine->frame.saved_regs_size
+ - 2 * UNITS_PER_WORD));
+}
+
+/* Output code to build up a constant in a register. */
+static void
+aarch64_build_constant (int regnum, HOST_WIDE_INT val)
+{
+ if (aarch64_bitmask_imm (val, DImode))
+ emit_move_insn (gen_rtx_REG (Pmode, regnum), GEN_INT (val));
+ else
+ {
+ int i;
+ int ncount = 0;
+ int zcount = 0;
+ HOST_WIDE_INT valp = val >> 16;
+ HOST_WIDE_INT valm;
+ HOST_WIDE_INT tval;
+
+ for (i = 16; i < 64; i += 16)
+ {
+ valm = (valp & 0xffff);
+
+ if (valm != 0)
+ ++ zcount;
+
+ if (valm != 0xffff)
+ ++ ncount;
+
+ valp >>= 16;
+ }
+
+ /* zcount contains the number of additional MOVK instructions
+ required if the constant is built up with an initial MOVZ instruction,
+ while ncount is the number of MOVK instructions required if starting
+ with a MOVN instruction. Choose the sequence that yields the fewest
+ number of instructions, preferring MOVZ instructions when they are both
+ the same. */
+ if (ncount < zcount)
+ {
+ emit_move_insn (gen_rtx_REG (Pmode, regnum),
+ GEN_INT (val | ~(HOST_WIDE_INT) 0xffff));
+ tval = 0xffff;
+ }
+ else
+ {
+ emit_move_insn (gen_rtx_REG (Pmode, regnum),
+ GEN_INT (val & 0xffff));
+ tval = 0;
+ }
+
+ val >>= 16;
+
+ for (i = 16; i < 64; i += 16)
+ {
+ if ((val & 0xffff) != tval)
+ emit_insn (gen_insv_immdi (gen_rtx_REG (Pmode, regnum),
+ GEN_INT (i), GEN_INT (val & 0xffff)));
+ val >>= 16;
+ }
+ }
+}
+
+static void
+aarch64_add_constant (int regnum, int scratchreg, HOST_WIDE_INT delta)
+{
+ HOST_WIDE_INT mdelta = delta;
+ rtx this_rtx = gen_rtx_REG (Pmode, regnum);
+ rtx scratch_rtx = gen_rtx_REG (Pmode, scratchreg);
+
+ if (mdelta < 0)
+ mdelta = -mdelta;
+
+ if (mdelta >= 4096 * 4096)
+ {
+ aarch64_build_constant (scratchreg, delta);
+ emit_insn (gen_add3_insn (this_rtx, this_rtx, scratch_rtx));
+ }
+ else if (mdelta > 0)
+ {
+ if (mdelta >= 4096)
+ {
+ emit_insn (gen_rtx_SET (Pmode, scratch_rtx, GEN_INT (mdelta / 4096)));
+ rtx shift = gen_rtx_ASHIFT (Pmode, scratch_rtx, GEN_INT (12));
+ if (delta < 0)
+ emit_insn (gen_rtx_SET (Pmode, this_rtx,
+ gen_rtx_MINUS (Pmode, this_rtx, shift)));
+ else
+ emit_insn (gen_rtx_SET (Pmode, this_rtx,
+ gen_rtx_PLUS (Pmode, this_rtx, shift)));
+ }
+ if (mdelta % 4096 != 0)
+ {
+ scratch_rtx = GEN_INT ((delta < 0 ? -1 : 1) * (mdelta % 4096));
+ emit_insn (gen_rtx_SET (Pmode, this_rtx,
+ gen_rtx_PLUS (Pmode, this_rtx, scratch_rtx)));
+ }
+ }
+}
+
+/* Output code to add DELTA to the first argument, and then jump
+ to FUNCTION. Used for C++ multiple inheritance. */
+static void
+aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta,
+ HOST_WIDE_INT vcall_offset,
+ tree function)
+{
+ /* The this pointer is always in x0. Note that this differs from
+ Arm where the this pointer maybe bumped to r1 if r0 is required
+ to return a pointer to an aggregate. On AArch64 a result value
+ pointer will be in x8. */
+ int this_regno = R0_REGNUM;
+ rtx this_rtx, temp0, temp1, addr, insn, funexp;
+
+ reload_completed = 1;
+ emit_note (NOTE_INSN_PROLOGUE_END);
+
+ if (vcall_offset == 0)
+ aarch64_add_constant (this_regno, IP1_REGNUM, delta);
+ else
+ {
+ gcc_assert ((vcall_offset & (POINTER_BYTES - 1)) == 0);
+
+ this_rtx = gen_rtx_REG (Pmode, this_regno);
+ temp0 = gen_rtx_REG (Pmode, IP0_REGNUM);
+ temp1 = gen_rtx_REG (Pmode, IP1_REGNUM);
+
+ addr = this_rtx;
+ if (delta != 0)
+ {
+ if (delta >= -256 && delta < 256)
+ addr = gen_rtx_PRE_MODIFY (Pmode, this_rtx,
+ plus_constant (Pmode, this_rtx, delta));
+ else
+ aarch64_add_constant (this_regno, IP1_REGNUM, delta);
+ }
+
+ if (Pmode == ptr_mode)
+ aarch64_emit_move (temp0, gen_rtx_MEM (ptr_mode, addr));
+ else
+ aarch64_emit_move (temp0,
+ gen_rtx_ZERO_EXTEND (Pmode,
+ gen_rtx_MEM (ptr_mode, addr)));
+
+ if (vcall_offset >= -256 && vcall_offset < 4096 * POINTER_BYTES)
+ addr = plus_constant (Pmode, temp0, vcall_offset);
+ else
+ {
+ aarch64_build_constant (IP1_REGNUM, vcall_offset);
+ addr = gen_rtx_PLUS (Pmode, temp0, temp1);
+ }
+
+ if (Pmode == ptr_mode)
+ aarch64_emit_move (temp1, gen_rtx_MEM (ptr_mode,addr));
+ else
+ aarch64_emit_move (temp1,
+ gen_rtx_SIGN_EXTEND (Pmode,
+ gen_rtx_MEM (ptr_mode, addr)));
+
+ emit_insn (gen_add2_insn (this_rtx, temp1));
+ }
+
+ /* Generate a tail call to the target function. */
+ if (!TREE_USED (function))
+ {
+ assemble_external (function);
+ TREE_USED (function) = 1;
+ }
+ funexp = XEXP (DECL_RTL (function), 0);
+ funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
+ insn = emit_call_insn (gen_sibcall (funexp, const0_rtx, NULL_RTX));
+ SIBLING_CALL_P (insn) = 1;
+
+ insn = get_insns ();
+ shorten_branches (insn);
+ final_start_function (insn, file, 1);
+ final (insn, file, 1);
+ final_end_function ();
+
+ /* Stop pretending to be a post-reload pass. */
+ reload_completed = 0;
+}
+
+static int
+aarch64_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
+{
+ if (GET_CODE (*x) == SYMBOL_REF)
+ return SYMBOL_REF_TLS_MODEL (*x) != 0;
+
+ /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
+ TLS offsets, not real symbol references. */
+ if (GET_CODE (*x) == UNSPEC
+ && XINT (*x, 1) == UNSPEC_TLS)
+ return -1;
+
+ return 0;
+}
+
+static bool
+aarch64_tls_referenced_p (rtx x)
+{
+ if (!TARGET_HAVE_TLS)
+ return false;
+
+ return for_each_rtx (&x, aarch64_tls_operand_p_1, NULL);
+}
+
+
+static int
+aarch64_bitmasks_cmp (const void *i1, const void *i2)
+{
+ const unsigned HOST_WIDE_INT *imm1 = (const unsigned HOST_WIDE_INT *) i1;
+ const unsigned HOST_WIDE_INT *imm2 = (const unsigned HOST_WIDE_INT *) i2;
+
+ if (*imm1 < *imm2)
+ return -1;
+ if (*imm1 > *imm2)
+ return +1;
+ return 0;
+}
+
+
+static void
+aarch64_build_bitmask_table (void)
+{
+ unsigned HOST_WIDE_INT mask, imm;
+ unsigned int log_e, e, s, r;
+ unsigned int nimms = 0;
+
+ for (log_e = 1; log_e <= 6; log_e++)
+ {
+ e = 1 << log_e;
+ if (e == 64)
+ mask = ~(HOST_WIDE_INT) 0;
+ else
+ mask = ((HOST_WIDE_INT) 1 << e) - 1;
+ for (s = 1; s < e; s++)
+ {
+ for (r = 0; r < e; r++)
+ {
+ /* set s consecutive bits to 1 (s < 64) */
+ imm = ((unsigned HOST_WIDE_INT)1 << s) - 1;
+ /* rotate right by r */
+ if (r != 0)
+ imm = ((imm >> r) | (imm << (e - r))) & mask;
+ /* replicate the constant depending on SIMD size */
+ switch (log_e) {
+ case 1: imm |= (imm << 2);
+ case 2: imm |= (imm << 4);
+ case 3: imm |= (imm << 8);
+ case 4: imm |= (imm << 16);
+ case 5: imm |= (imm << 32);
+ case 6:
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ gcc_assert (nimms < AARCH64_NUM_BITMASKS);
+ aarch64_bitmasks[nimms++] = imm;
+ }
+ }
+ }
+
+ gcc_assert (nimms == AARCH64_NUM_BITMASKS);
+ qsort (aarch64_bitmasks, nimms, sizeof (aarch64_bitmasks[0]),
+ aarch64_bitmasks_cmp);
+}
+
+
+/* Return true if val can be encoded as a 12-bit unsigned immediate with
+ a left shift of 0 or 12 bits. */
+bool
+aarch64_uimm12_shift (HOST_WIDE_INT val)
+{
+ return ((val & (((HOST_WIDE_INT) 0xfff) << 0)) == val
+ || (val & (((HOST_WIDE_INT) 0xfff) << 12)) == val
+ );
+}
+
+
+/* Return true if val is an immediate that can be loaded into a
+ register by a MOVZ instruction. */
+static bool
+aarch64_movw_imm (HOST_WIDE_INT val, enum machine_mode mode)
+{
+ if (GET_MODE_SIZE (mode) > 4)
+ {
+ if ((val & (((HOST_WIDE_INT) 0xffff) << 32)) == val
+ || (val & (((HOST_WIDE_INT) 0xffff) << 48)) == val)
+ return 1;
+ }
+ else
+ {
+ /* Ignore sign extension. */
+ val &= (HOST_WIDE_INT) 0xffffffff;
+ }
+ return ((val & (((HOST_WIDE_INT) 0xffff) << 0)) == val
+ || (val & (((HOST_WIDE_INT) 0xffff) << 16)) == val);
+}
+
+
+/* Return true if val is a valid bitmask immediate. */
+bool
+aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode mode)
+{
+ if (GET_MODE_SIZE (mode) < 8)
+ {
+ /* Replicate bit pattern. */
+ val &= (HOST_WIDE_INT) 0xffffffff;
+ val |= val << 32;
+ }
+ return bsearch (&val, aarch64_bitmasks, AARCH64_NUM_BITMASKS,
+ sizeof (aarch64_bitmasks[0]), aarch64_bitmasks_cmp) != NULL;
+}
+
+
+/* Return true if val is an immediate that can be loaded into a
+ register in a single instruction. */
+bool
+aarch64_move_imm (HOST_WIDE_INT val, enum machine_mode mode)
+{
+ if (aarch64_movw_imm (val, mode) || aarch64_movw_imm (~val, mode))
+ return 1;
+ return aarch64_bitmask_imm (val, mode);
+}
+
+static bool
+aarch64_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
+{
+ rtx base, offset;
+
+ if (GET_CODE (x) == HIGH)
+ return true;
+
+ split_const (x, &base, &offset);
+ if (GET_CODE (base) == SYMBOL_REF || GET_CODE (base) == LABEL_REF)
+ {
+ if (aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR)
+ != SYMBOL_FORCE_TO_MEM)
+ return true;
+ else
+ /* Avoid generating a 64-bit relocation in ILP32; leave
+ to aarch64_expand_mov_immediate to handle it properly. */
+ return mode != ptr_mode;
+ }
+
+ return aarch64_tls_referenced_p (x);
+}
+
+/* Return true if register REGNO is a valid index register.
+ STRICT_P is true if REG_OK_STRICT is in effect. */
+
+bool
+aarch64_regno_ok_for_index_p (int regno, bool strict_p)
+{
+ if (!HARD_REGISTER_NUM_P (regno))
+ {
+ if (!strict_p)
+ return true;
+
+ if (!reg_renumber)
+ return false;
+
+ regno = reg_renumber[regno];
+ }
+ return GP_REGNUM_P (regno);
+}
+
+/* Return true if register REGNO is a valid base register for mode MODE.
+ STRICT_P is true if REG_OK_STRICT is in effect. */
+
+bool
+aarch64_regno_ok_for_base_p (int regno, bool strict_p)
+{
+ if (!HARD_REGISTER_NUM_P (regno))
+ {
+ if (!strict_p)
+ return true;
+
+ if (!reg_renumber)
+ return false;
+
+ regno = reg_renumber[regno];
+ }
+
+ /* The fake registers will be eliminated to either the stack or
+ hard frame pointer, both of which are usually valid base registers.
+ Reload deals with the cases where the eliminated form isn't valid. */
+ return (GP_REGNUM_P (regno)
+ || regno == SP_REGNUM
+ || regno == FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM);
+}
+
+/* Return true if X is a valid base register for mode MODE.
+ STRICT_P is true if REG_OK_STRICT is in effect. */
+
+static bool
+aarch64_base_register_rtx_p (rtx x, bool strict_p)
+{
+ if (!strict_p && GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+
+ return (REG_P (x) && aarch64_regno_ok_for_base_p (REGNO (x), strict_p));
+}
+
+/* Return true if address offset is a valid index. If it is, fill in INFO
+ appropriately. STRICT_P is true if REG_OK_STRICT is in effect. */
+
+static bool
+aarch64_classify_index (struct aarch64_address_info *info, rtx x,
+ enum machine_mode mode, bool strict_p)
+{
+ enum aarch64_address_type type;
+ rtx index;
+ int shift;
+
+ /* (reg:P) */
+ if ((REG_P (x) || GET_CODE (x) == SUBREG)
+ && GET_MODE (x) == Pmode)
+ {
+ type = ADDRESS_REG_REG;
+ index = x;
+ shift = 0;
+ }
+ /* (sign_extend:DI (reg:SI)) */
+ else if ((GET_CODE (x) == SIGN_EXTEND
+ || GET_CODE (x) == ZERO_EXTEND)
+ && GET_MODE (x) == DImode
+ && GET_MODE (XEXP (x, 0)) == SImode)
+ {
+ type = (GET_CODE (x) == SIGN_EXTEND)
+ ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+ index = XEXP (x, 0);
+ shift = 0;
+ }
+ /* (mult:DI (sign_extend:DI (reg:SI)) (const_int scale)) */
+ else if (GET_CODE (x) == MULT
+ && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
+ || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
+ && GET_MODE (XEXP (x, 0)) == DImode
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
+ ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+ index = XEXP (XEXP (x, 0), 0);
+ shift = exact_log2 (INTVAL (XEXP (x, 1)));
+ }
+ /* (ashift:DI (sign_extend:DI (reg:SI)) (const_int shift)) */
+ else if (GET_CODE (x) == ASHIFT
+ && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
+ || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
+ && GET_MODE (XEXP (x, 0)) == DImode
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
+ ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+ index = XEXP (XEXP (x, 0), 0);
+ shift = INTVAL (XEXP (x, 1));
+ }
+ /* (sign_extract:DI (mult:DI (reg:DI) (const_int scale)) 32+shift 0) */
+ else if ((GET_CODE (x) == SIGN_EXTRACT
+ || GET_CODE (x) == ZERO_EXTRACT)
+ && GET_MODE (x) == DImode
+ && GET_CODE (XEXP (x, 0)) == MULT
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
+ {
+ type = (GET_CODE (x) == SIGN_EXTRACT)
+ ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+ index = XEXP (XEXP (x, 0), 0);
+ shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)));
+ if (INTVAL (XEXP (x, 1)) != 32 + shift
+ || INTVAL (XEXP (x, 2)) != 0)
+ shift = -1;
+ }
+ /* (and:DI (mult:DI (reg:DI) (const_int scale))
+ (const_int 0xffffffff<<shift)) */
+ else if (GET_CODE (x) == AND
+ && GET_MODE (x) == DImode
+ && GET_CODE (XEXP (x, 0)) == MULT
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1))
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ type = ADDRESS_REG_UXTW;
+ index = XEXP (XEXP (x, 0), 0);
+ shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)));
+ if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift)
+ shift = -1;
+ }
+ /* (sign_extract:DI (ashift:DI (reg:DI) (const_int shift)) 32+shift 0) */
+ else if ((GET_CODE (x) == SIGN_EXTRACT
+ || GET_CODE (x) == ZERO_EXTRACT)
+ && GET_MODE (x) == DImode
+ && GET_CODE (XEXP (x, 0)) == ASHIFT
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
+ {
+ type = (GET_CODE (x) == SIGN_EXTRACT)
+ ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
+ index = XEXP (XEXP (x, 0), 0);
+ shift = INTVAL (XEXP (XEXP (x, 0), 1));
+ if (INTVAL (XEXP (x, 1)) != 32 + shift
+ || INTVAL (XEXP (x, 2)) != 0)
+ shift = -1;
+ }
+ /* (and:DI (ashift:DI (reg:DI) (const_int shift))
+ (const_int 0xffffffff<<shift)) */
+ else if (GET_CODE (x) == AND
+ && GET_MODE (x) == DImode
+ && GET_CODE (XEXP (x, 0)) == ASHIFT
+ && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1))
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ type = ADDRESS_REG_UXTW;
+ index = XEXP (XEXP (x, 0), 0);
+ shift = INTVAL (XEXP (XEXP (x, 0), 1));
+ if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift)
+ shift = -1;
+ }
+ /* (mult:P (reg:P) (const_int scale)) */
+ else if (GET_CODE (x) == MULT
+ && GET_MODE (x) == Pmode
+ && GET_MODE (XEXP (x, 0)) == Pmode
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ type = ADDRESS_REG_REG;
+ index = XEXP (x, 0);
+ shift = exact_log2 (INTVAL (XEXP (x, 1)));
+ }
+ /* (ashift:P (reg:P) (const_int shift)) */
+ else if (GET_CODE (x) == ASHIFT
+ && GET_MODE (x) == Pmode
+ && GET_MODE (XEXP (x, 0)) == Pmode
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ type = ADDRESS_REG_REG;
+ index = XEXP (x, 0);
+ shift = INTVAL (XEXP (x, 1));
+ }
+ else
+ return false;
+
+ if (GET_CODE (index) == SUBREG)
+ index = SUBREG_REG (index);
+
+ if ((shift == 0 ||
+ (shift > 0 && shift <= 3
+ && (1 << shift) == GET_MODE_SIZE (mode)))
+ && REG_P (index)
+ && aarch64_regno_ok_for_index_p (REGNO (index), strict_p))
+ {
+ info->type = type;
+ info->offset = index;
+ info->shift = shift;
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool
+offset_7bit_signed_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset)
+{
+ return (offset >= -64 * GET_MODE_SIZE (mode)
+ && offset < 64 * GET_MODE_SIZE (mode)
+ && offset % GET_MODE_SIZE (mode) == 0);
+}
+
+static inline bool
+offset_9bit_signed_unscaled_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT offset)
+{
+ return offset >= -256 && offset < 256;
+}
+
+static inline bool
+offset_12bit_unsigned_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset)
+{
+ return (offset >= 0
+ && offset < 4096 * GET_MODE_SIZE (mode)
+ && offset % GET_MODE_SIZE (mode) == 0);
+}
+
+/* Return true if X is a valid address for machine mode MODE. If it is,
+ fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
+ effect. OUTER_CODE is PARALLEL for a load/store pair. */
+
+static bool
+aarch64_classify_address (struct aarch64_address_info *info,
+ rtx x, enum machine_mode mode,
+ RTX_CODE outer_code, bool strict_p)
+{
+ enum rtx_code code = GET_CODE (x);
+ rtx op0, op1;
+ bool allow_reg_index_p =
+ outer_code != PARALLEL && GET_MODE_SIZE(mode) != 16;
+
+ /* Don't support anything other than POST_INC or REG addressing for
+ AdvSIMD. */
+ if (aarch64_vector_mode_p (mode)
+ && (code != POST_INC && code != REG))
+ return false;
+
+ switch (code)
+ {
+ case REG:
+ case SUBREG:
+ info->type = ADDRESS_REG_IMM;
+ info->base = x;
+ info->offset = const0_rtx;
+ return aarch64_base_register_rtx_p (x, strict_p);
+
+ case PLUS:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+ if (GET_MODE_SIZE (mode) != 0
+ && CONST_INT_P (op1)
+ && aarch64_base_register_rtx_p (op0, strict_p))
+ {
+ HOST_WIDE_INT offset = INTVAL (op1);
+
+ info->type = ADDRESS_REG_IMM;
+ info->base = op0;
+ info->offset = op1;
+
+ /* TImode and TFmode values are allowed in both pairs of X
+ registers and individual Q registers. The available
+ address modes are:
+ X,X: 7-bit signed scaled offset
+ Q: 9-bit signed offset
+ We conservatively require an offset representable in either mode.
+ */
+ if (mode == TImode || mode == TFmode)
+ return (offset_7bit_signed_scaled_p (mode, offset)
+ && offset_9bit_signed_unscaled_p (mode, offset));
+
+ if (outer_code == PARALLEL)
+ return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
+ && offset_7bit_signed_scaled_p (mode, offset));
+ else
+ return (offset_9bit_signed_unscaled_p (mode, offset)
+ || offset_12bit_unsigned_scaled_p (mode, offset));
+ }
+
+ if (allow_reg_index_p)
+ {
+ /* Look for base + (scaled/extended) index register. */
+ if (aarch64_base_register_rtx_p (op0, strict_p)
+ && aarch64_classify_index (info, op1, mode, strict_p))
+ {
+ info->base = op0;
+ return true;
+ }
+ if (aarch64_base_register_rtx_p (op1, strict_p)
+ && aarch64_classify_index (info, op0, mode, strict_p))
+ {
+ info->base = op1;
+ return true;
+ }
+ }
+
+ return false;
+
+ case POST_INC:
+ case POST_DEC:
+ case PRE_INC:
+ case PRE_DEC:
+ info->type = ADDRESS_REG_WB;
+ info->base = XEXP (x, 0);
+ info->offset = NULL_RTX;
+ return aarch64_base_register_rtx_p (info->base, strict_p);
+
+ case POST_MODIFY:
+ case PRE_MODIFY:
+ info->type = ADDRESS_REG_WB;
+ info->base = XEXP (x, 0);
+ if (GET_CODE (XEXP (x, 1)) == PLUS
+ && CONST_INT_P (XEXP (XEXP (x, 1), 1))
+ && rtx_equal_p (XEXP (XEXP (x, 1), 0), info->base)
+ && aarch64_base_register_rtx_p (info->base, strict_p))
+ {
+ HOST_WIDE_INT offset;
+ info->offset = XEXP (XEXP (x, 1), 1);
+ offset = INTVAL (info->offset);
+
+ /* TImode and TFmode values are allowed in both pairs of X
+ registers and individual Q registers. The available
+ address modes are:
+ X,X: 7-bit signed scaled offset
+ Q: 9-bit signed offset
+ We conservatively require an offset representable in either mode.
+ */
+ if (mode == TImode || mode == TFmode)
+ return (offset_7bit_signed_scaled_p (mode, offset)
+ && offset_9bit_signed_unscaled_p (mode, offset));
+
+ if (outer_code == PARALLEL)
+ return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
+ && offset_7bit_signed_scaled_p (mode, offset));
+ else
+ return offset_9bit_signed_unscaled_p (mode, offset);
+ }
+ return false;
+
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ /* load literal: pc-relative constant pool entry. Only supported
+ for SI mode or larger. */
+ info->type = ADDRESS_SYMBOLIC;
+ if (outer_code != PARALLEL && GET_MODE_SIZE (mode) >= 4)
+ {
+ rtx sym, addend;
+
+ split_const (x, &sym, &addend);
+ return (GET_CODE (sym) == LABEL_REF
+ || (GET_CODE (sym) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (sym)));
+ }
+ return false;
+
+ case LO_SUM:
+ info->type = ADDRESS_LO_SUM;
+ info->base = XEXP (x, 0);
+ info->offset = XEXP (x, 1);
+ if (allow_reg_index_p
+ && aarch64_base_register_rtx_p (info->base, strict_p))
+ {
+ rtx sym, offs;
+ split_const (info->offset, &sym, &offs);
+ if (GET_CODE (sym) == SYMBOL_REF
+ && (aarch64_classify_symbol (sym, SYMBOL_CONTEXT_MEM)
+ == SYMBOL_SMALL_ABSOLUTE))
+ {
+ /* The symbol and offset must be aligned to the access size. */
+ unsigned int align;
+ unsigned int ref_size;
+
+ if (CONSTANT_POOL_ADDRESS_P (sym))
+ align = GET_MODE_ALIGNMENT (get_pool_mode (sym));
+ else if (TREE_CONSTANT_POOL_ADDRESS_P (sym))
+ {
+ tree exp = SYMBOL_REF_DECL (sym);
+ align = TYPE_ALIGN (TREE_TYPE (exp));
+ align = CONSTANT_ALIGNMENT (exp, align);
+ }
+ else if (SYMBOL_REF_DECL (sym))
+ align = DECL_ALIGN (SYMBOL_REF_DECL (sym));
+ else
+ align = BITS_PER_UNIT;
+
+ ref_size = GET_MODE_SIZE (mode);
+ if (ref_size == 0)
+ ref_size = GET_MODE_SIZE (DImode);
+
+ return ((INTVAL (offs) & (ref_size - 1)) == 0
+ && ((align / BITS_PER_UNIT) & (ref_size - 1)) == 0);
+ }
+ }
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+bool
+aarch64_symbolic_address_p (rtx x)
+{
+ rtx offset;
+
+ split_const (x, &x, &offset);
+ return GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF;
+}
+
+/* Classify the base of symbolic expression X, given that X appears in
+ context CONTEXT. */
+
+enum aarch64_symbol_type
+aarch64_classify_symbolic_expression (rtx x,
+ enum aarch64_symbol_context context)
+{
+ rtx offset;
+
+ split_const (x, &x, &offset);
+ return aarch64_classify_symbol (x, context);
+}
+
+
+/* Return TRUE if X is a legitimate address for accessing memory in
+ mode MODE. */
+static bool
+aarch64_legitimate_address_hook_p (enum machine_mode mode, rtx x, bool strict_p)
+{
+ struct aarch64_address_info addr;
+
+ return aarch64_classify_address (&addr, x, mode, MEM, strict_p);
+}
+
+/* Return TRUE if X is a legitimate address for accessing memory in
+ mode MODE. OUTER_CODE will be PARALLEL if this is a load/store
+ pair operation. */
+bool
+aarch64_legitimate_address_p (enum machine_mode mode, rtx x,
+ RTX_CODE outer_code, bool strict_p)
+{
+ struct aarch64_address_info addr;
+
+ return aarch64_classify_address (&addr, x, mode, outer_code, strict_p);
+}
+
+/* Return TRUE if rtx X is immediate constant 0.0 */
+bool
+aarch64_float_const_zero_rtx_p (rtx x)
+{
+ REAL_VALUE_TYPE r;
+
+ if (GET_MODE (x) == VOIDmode)
+ return false;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ if (REAL_VALUE_MINUS_ZERO (r))
+ return !HONOR_SIGNED_ZEROS (GET_MODE (x));
+ return REAL_VALUES_EQUAL (r, dconst0);
+}
+
+/* Return the fixed registers used for condition codes. */
+
+static bool
+aarch64_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
+{
+ *p1 = CC_REGNUM;
+ *p2 = INVALID_REGNUM;
+ return true;
+}
+
+enum machine_mode
+aarch64_select_cc_mode (RTX_CODE code, rtx x, rtx y)
+{
+ /* All floating point compares return CCFP if it is an equality
+ comparison, and CCFPE otherwise. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ {
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ case UNORDERED:
+ case ORDERED:
+ case UNLT:
+ case UNLE:
+ case UNGT:
+ case UNGE:
+ case UNEQ:
+ case LTGT:
+ return CCFPmode;
+
+ case LT:
+ case LE:
+ case GT:
+ case GE:
+ return CCFPEmode;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
+ && y == const0_rtx
+ && (code == EQ || code == NE || code == LT || code == GE)
+ && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS || GET_CODE (x) == AND
+ || GET_CODE (x) == NEG))
+ return CC_NZmode;
+
+ /* A compare with a shifted operand. Because of canonicalization,
+ the comparison will have to be swapped when we emit the assembly
+ code. */
+ if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
+ && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG)
+ && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
+ || GET_CODE (x) == LSHIFTRT
+ || GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND))
+ return CC_SWPmode;
+
+ /* Similarly for a negated operand, but we can only do this for
+ equalities. */
+ if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
+ && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG)
+ && (code == EQ || code == NE)
+ && GET_CODE (x) == NEG)
+ return CC_Zmode;
+
+ /* A compare of a mode narrower than SI mode against zero can be done
+ by extending the value in the comparison. */
+ if ((GET_MODE (x) == QImode || GET_MODE (x) == HImode)
+ && y == const0_rtx)
+ /* Only use sign-extension if we really need it. */
+ return ((code == GT || code == GE || code == LE || code == LT)
+ ? CC_SESWPmode : CC_ZESWPmode);
+
+ /* For everything else, return CCmode. */
+ return CCmode;
+}
+
+static unsigned
+aarch64_get_condition_code (rtx x)
+{
+ enum machine_mode mode = GET_MODE (XEXP (x, 0));
+ enum rtx_code comp_code = GET_CODE (x);
+
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ mode = SELECT_CC_MODE (comp_code, XEXP (x, 0), XEXP (x, 1));
+
+ switch (mode)
+ {
+ case CCFPmode:
+ case CCFPEmode:
+ switch (comp_code)
+ {
+ case GE: return AARCH64_GE;
+ case GT: return AARCH64_GT;
+ case LE: return AARCH64_LS;
+ case LT: return AARCH64_MI;
+ case NE: return AARCH64_NE;
+ case EQ: return AARCH64_EQ;
+ case ORDERED: return AARCH64_VC;
+ case UNORDERED: return AARCH64_VS;
+ case UNLT: return AARCH64_LT;
+ case UNLE: return AARCH64_LE;
+ case UNGT: return AARCH64_HI;
+ case UNGE: return AARCH64_PL;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case CCmode:
+ switch (comp_code)
+ {
+ case NE: return AARCH64_NE;
+ case EQ: return AARCH64_EQ;
+ case GE: return AARCH64_GE;
+ case GT: return AARCH64_GT;
+ case LE: return AARCH64_LE;
+ case LT: return AARCH64_LT;
+ case GEU: return AARCH64_CS;
+ case GTU: return AARCH64_HI;
+ case LEU: return AARCH64_LS;
+ case LTU: return AARCH64_CC;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case CC_SWPmode:
+ case CC_ZESWPmode:
+ case CC_SESWPmode:
+ switch (comp_code)
+ {
+ case NE: return AARCH64_NE;
+ case EQ: return AARCH64_EQ;
+ case GE: return AARCH64_LE;
+ case GT: return AARCH64_LT;
+ case LE: return AARCH64_GE;
+ case LT: return AARCH64_GT;
+ case GEU: return AARCH64_LS;
+ case GTU: return AARCH64_CC;
+ case LEU: return AARCH64_CS;
+ case LTU: return AARCH64_HI;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case CC_NZmode:
+ switch (comp_code)
+ {
+ case NE: return AARCH64_NE;
+ case EQ: return AARCH64_EQ;
+ case GE: return AARCH64_PL;
+ case LT: return AARCH64_MI;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ case CC_Zmode:
+ switch (comp_code)
+ {
+ case NE: return AARCH64_NE;
+ case EQ: return AARCH64_EQ;
+ default: gcc_unreachable ();
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ break;
+ }
+}
+
+static unsigned
+bit_count (unsigned HOST_WIDE_INT value)
+{
+ unsigned count = 0;
+
+ while (value)
+ {
+ count++;
+ value &= value - 1;
+ }
+
+ return count;
+}
+
+void
+aarch64_print_operand (FILE *f, rtx x, char code)
+{
+ switch (code)
+ {
+ /* An integer or symbol address without a preceding # sign. */
+ case 'c':
+ switch (GET_CODE (x))
+ {
+ case CONST_INT:
+ fprintf (f, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
+ break;
+
+ case SYMBOL_REF:
+ output_addr_const (f, x);
+ break;
+
+ case CONST:
+ if (GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)
+ {
+ output_addr_const (f, x);
+ break;
+ }
+ /* Fall through. */
+
+ default:
+ output_operand_lossage ("Unsupported operand for code '%c'", code);
+ }
+ break;
+
+ case 'e':
+ /* Print the sign/zero-extend size as a character 8->b, 16->h, 32->w. */
+ {
+ int n;
+
+ if (GET_CODE (x) != CONST_INT
+ || (n = exact_log2 (INTVAL (x) & ~7)) <= 0)
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+
+ switch (n)
+ {
+ case 3:
+ fputc ('b', f);
+ break;
+ case 4:
+ fputc ('h', f);
+ break;
+ case 5:
+ fputc ('w', f);
+ break;
+ default:
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+ }
+ break;
+
+ case 'p':
+ {
+ int n;
+
+ /* Print N such that 2^N == X. */
+ if (GET_CODE (x) != CONST_INT || (n = exact_log2 (INTVAL (x))) < 0)
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+
+ asm_fprintf (f, "%d", n);
+ }
+ break;
+
+ case 'P':
+ /* Print the number of non-zero bits in X (a const_int). */
+ if (GET_CODE (x) != CONST_INT)
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+
+ asm_fprintf (f, "%u", bit_count (INTVAL (x)));
+ break;
+
+ case 'H':
+ /* Print the higher numbered register of a pair (TImode) of regs. */
+ if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1))
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+
+ asm_fprintf (f, "%s", reg_names [REGNO (x) + 1]);
+ break;
+
+ case 'm':
+ /* Print a condition (eq, ne, etc). */
+
+ /* CONST_TRUE_RTX means always -- that's the default. */
+ if (x == const_true_rtx)
+ return;
+
+ if (!COMPARISON_P (x))
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+
+ fputs (aarch64_condition_codes[aarch64_get_condition_code (x)], f);
+ break;
+
+ case 'M':
+ /* Print the inverse of a condition (eq <-> ne, etc). */
+
+ /* CONST_TRUE_RTX means never -- that's the default. */
+ if (x == const_true_rtx)
+ {
+ fputs ("nv", f);
+ return;
+ }
+
+ if (!COMPARISON_P (x))
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+
+ fputs (aarch64_condition_codes[AARCH64_INVERSE_CONDITION_CODE
+ (aarch64_get_condition_code (x))], f);
+ break;
+
+ case 'b':
+ case 'h':
+ case 's':
+ case 'd':
+ case 'q':
+ /* Print a scalar FP/SIMD register name. */
+ if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
+ {
+ output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
+ return;
+ }
+ asm_fprintf (f, "%c%d", code, REGNO (x) - V0_REGNUM);
+ break;
+
+ case 'S':
+ case 'T':
+ case 'U':
+ case 'V':
+ /* Print the first FP/SIMD register name in a list. */
+ if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
+ {
+ output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
+ return;
+ }
+ asm_fprintf (f, "v%d", REGNO (x) - V0_REGNUM + (code - 'S'));
+ break;
+
+ case 'X':
+ /* Print bottom 16 bits of integer constant in hex. */
+ if (GET_CODE (x) != CONST_INT)
+ {
+ output_operand_lossage ("invalid operand for '%%%c'", code);
+ return;
+ }
+ asm_fprintf (f, "0x%wx", UINTVAL (x) & 0xffff);
+ break;
+
+ case 'w':
+ case 'x':
+ /* Print a general register name or the zero register (32-bit or
+ 64-bit). */
+ if (x == const0_rtx
+ || (CONST_DOUBLE_P (x) && aarch64_float_const_zero_rtx_p (x)))
+ {
+ asm_fprintf (f, "%czr", code);
+ break;
+ }
+
+ if (REG_P (x) && GP_REGNUM_P (REGNO (x)))
+ {
+ asm_fprintf (f, "%c%d", code, REGNO (x) - R0_REGNUM);
+ break;
+ }
+
+ if (REG_P (x) && REGNO (x) == SP_REGNUM)
+ {
+ asm_fprintf (f, "%ssp", code == 'w' ? "w" : "");
+ break;
+ }
+
+ /* Fall through */
+
+ case 0:
+ /* Print a normal operand, if it's a general register, then we
+ assume DImode. */
+ if (x == NULL)
+ {
+ output_operand_lossage ("missing operand");
+ return;
+ }
+
+ switch (GET_CODE (x))
+ {
+ case REG:
+ asm_fprintf (f, "%s", reg_names [REGNO (x)]);
+ break;
+
+ case MEM:
+ aarch64_memory_reference_mode = GET_MODE (x);
+ output_address (XEXP (x, 0));
+ break;
+
+ case LABEL_REF:
+ case SYMBOL_REF:
+ output_addr_const (asm_out_file, x);
+ break;
+
+ case CONST_INT:
+ asm_fprintf (f, "%wd", INTVAL (x));
+ break;
+
+ case CONST_VECTOR:
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_VECTOR_INT)
+ {
+ gcc_assert (aarch64_const_vec_all_same_int_p (x,
+ HOST_WIDE_INT_MIN,
+ HOST_WIDE_INT_MAX));
+ asm_fprintf (f, "%wd", INTVAL (CONST_VECTOR_ELT (x, 0)));
+ }
+ else if (aarch64_simd_imm_zero_p (x, GET_MODE (x)))
+ {
+ fputc ('0', f);
+ }
+ else
+ gcc_unreachable ();
+ break;
+
+ case CONST_DOUBLE:
+ /* CONST_DOUBLE can represent a double-width integer.
+ In this case, the mode of x is VOIDmode. */
+ if (GET_MODE (x) == VOIDmode)
+ ; /* Do Nothing. */
+ else if (aarch64_float_const_zero_rtx_p (x))
+ {
+ fputc ('0', f);
+ break;
+ }
+ else if (aarch64_float_const_representable_p (x))
+ {
+#define buf_size 20
+ char float_buf[buf_size] = {'\0'};
+ REAL_VALUE_TYPE r;
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ real_to_decimal_for_mode (float_buf, &r,
+ buf_size, buf_size,
+ 1, GET_MODE (x));
+ asm_fprintf (asm_out_file, "%s", float_buf);
+ break;
+#undef buf_size
+ }
+ output_operand_lossage ("invalid constant");
+ return;
+ default:
+ output_operand_lossage ("invalid operand");
+ return;
+ }
+ break;
+
+ case 'A':
+ if (GET_CODE (x) == HIGH)
+ x = XEXP (x, 0);
+
+ switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
+ {
+ case SYMBOL_SMALL_GOT:
+ asm_fprintf (asm_out_file, ":got:");
+ break;
+
+ case SYMBOL_SMALL_TLSGD:
+ asm_fprintf (asm_out_file, ":tlsgd:");
+ break;
+
+ case SYMBOL_SMALL_TLSDESC:
+ asm_fprintf (asm_out_file, ":tlsdesc:");
+ break;
+
+ case SYMBOL_SMALL_GOTTPREL:
+ asm_fprintf (asm_out_file, ":gottprel:");
+ break;
+
+ case SYMBOL_SMALL_TPREL:
+ asm_fprintf (asm_out_file, ":tprel:");
+ break;
+
+ case SYMBOL_TINY_GOT:
+ gcc_unreachable ();
+ break;
+
+ default:
+ break;
+ }
+ output_addr_const (asm_out_file, x);
+ break;
+
+ case 'L':
+ switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
+ {
+ case SYMBOL_SMALL_GOT:
+ asm_fprintf (asm_out_file, ":lo12:");
+ break;
+
+ case SYMBOL_SMALL_TLSGD:
+ asm_fprintf (asm_out_file, ":tlsgd_lo12:");
+ break;
+
+ case SYMBOL_SMALL_TLSDESC:
+ asm_fprintf (asm_out_file, ":tlsdesc_lo12:");
+ break;
+
+ case SYMBOL_SMALL_GOTTPREL:
+ asm_fprintf (asm_out_file, ":gottprel_lo12:");
+ break;
+
+ case SYMBOL_SMALL_TPREL:
+ asm_fprintf (asm_out_file, ":tprel_lo12_nc:");
+ break;
+
+ case SYMBOL_TINY_GOT:
+ asm_fprintf (asm_out_file, ":got:");
+ break;
+
+ default:
+ break;
+ }
+ output_addr_const (asm_out_file, x);
+ break;
+
+ case 'G':
+
+ switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
+ {
+ case SYMBOL_SMALL_TPREL:
+ asm_fprintf (asm_out_file, ":tprel_hi12:");
+ break;
+ default:
+ break;
+ }
+ output_addr_const (asm_out_file, x);
+ break;
+
+ default:
+ output_operand_lossage ("invalid operand prefix '%%%c'", code);
+ return;
+ }
+}
+
+void
+aarch64_print_operand_address (FILE *f, rtx x)
+{
+ struct aarch64_address_info addr;
+
+ if (aarch64_classify_address (&addr, x, aarch64_memory_reference_mode,
+ MEM, true))
+ switch (addr.type)
+ {
+ case ADDRESS_REG_IMM:
+ if (addr.offset == const0_rtx)
+ asm_fprintf (f, "[%s]", reg_names [REGNO (addr.base)]);
+ else
+ asm_fprintf (f, "[%s,%wd]", reg_names [REGNO (addr.base)],
+ INTVAL (addr.offset));
+ return;
+
+ case ADDRESS_REG_REG:
+ if (addr.shift == 0)
+ asm_fprintf (f, "[%s,%s]", reg_names [REGNO (addr.base)],
+ reg_names [REGNO (addr.offset)]);
+ else
+ asm_fprintf (f, "[%s,%s,lsl %u]", reg_names [REGNO (addr.base)],
+ reg_names [REGNO (addr.offset)], addr.shift);
+ return;
+
+ case ADDRESS_REG_UXTW:
+ if (addr.shift == 0)
+ asm_fprintf (f, "[%s,w%d,uxtw]", reg_names [REGNO (addr.base)],
+ REGNO (addr.offset) - R0_REGNUM);
+ else
+ asm_fprintf (f, "[%s,w%d,uxtw %u]", reg_names [REGNO (addr.base)],
+ REGNO (addr.offset) - R0_REGNUM, addr.shift);
+ return;
+
+ case ADDRESS_REG_SXTW:
+ if (addr.shift == 0)
+ asm_fprintf (f, "[%s,w%d,sxtw]", reg_names [REGNO (addr.base)],
+ REGNO (addr.offset) - R0_REGNUM);
+ else
+ asm_fprintf (f, "[%s,w%d,sxtw %u]", reg_names [REGNO (addr.base)],
+ REGNO (addr.offset) - R0_REGNUM, addr.shift);
+ return;
+
+ case ADDRESS_REG_WB:
+ switch (GET_CODE (x))
+ {
+ case PRE_INC:
+ asm_fprintf (f, "[%s,%d]!", reg_names [REGNO (addr.base)],
+ GET_MODE_SIZE (aarch64_memory_reference_mode));
+ return;
+ case POST_INC:
+ asm_fprintf (f, "[%s],%d", reg_names [REGNO (addr.base)],
+ GET_MODE_SIZE (aarch64_memory_reference_mode));
+ return;
+ case PRE_DEC:
+ asm_fprintf (f, "[%s,-%d]!", reg_names [REGNO (addr.base)],
+ GET_MODE_SIZE (aarch64_memory_reference_mode));
+ return;
+ case POST_DEC:
+ asm_fprintf (f, "[%s],-%d", reg_names [REGNO (addr.base)],
+ GET_MODE_SIZE (aarch64_memory_reference_mode));
+ return;
+ case PRE_MODIFY:
+ asm_fprintf (f, "[%s,%wd]!", reg_names [REGNO (addr.base)],
+ INTVAL (addr.offset));
+ return;
+ case POST_MODIFY:
+ asm_fprintf (f, "[%s],%wd", reg_names [REGNO (addr.base)],
+ INTVAL (addr.offset));
+ return;
+ default:
+ break;
+ }
+ break;
+
+ case ADDRESS_LO_SUM:
+ asm_fprintf (f, "[%s,#:lo12:", reg_names [REGNO (addr.base)]);
+ output_addr_const (f, addr.offset);
+ asm_fprintf (f, "]");
+ return;
+
+ case ADDRESS_SYMBOLIC:
+ break;
+ }
+
+ output_addr_const (f, x);
+}
+
+bool
+aarch64_label_mentioned_p (rtx x)
+{
+ const char *fmt;
+ int i;
+
+ if (GET_CODE (x) == LABEL_REF)
+ return true;
+
+ /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the
+ referencing instruction, but they are constant offsets, not
+ symbols. */
+ if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
+ return false;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (aarch64_label_mentioned_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && aarch64_label_mentioned_p (XEXP (x, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Implement REGNO_REG_CLASS. */
+
+enum reg_class
+aarch64_regno_regclass (unsigned regno)
+{
+ if (GP_REGNUM_P (regno))
+ return CORE_REGS;
+
+ if (regno == SP_REGNUM)
+ return STACK_REG;
+
+ if (regno == FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM)
+ return POINTER_REGS;
+
+ if (FP_REGNUM_P (regno))
+ return FP_LO_REGNUM_P (regno) ? FP_LO_REGS : FP_REGS;
+
+ return NO_REGS;
+}
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and return the new rtx. */
+
+rtx
+aarch64_legitimize_reload_address (rtx *x_p,
+ enum machine_mode mode,
+ int opnum, int type,
+ int ind_levels ATTRIBUTE_UNUSED)
+{
+ rtx x = *x_p;
+
+ /* Do not allow mem (plus (reg, const)) if vector mode. */
+ if (aarch64_vector_mode_p (mode)
+ && GET_CODE (x) == PLUS
+ && REG_P (XEXP (x, 0))
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ rtx orig_rtx = x;
+ x = copy_rtx (x);
+ push_reload (orig_rtx, NULL_RTX, x_p, NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type) type);
+ return x;
+ }
+
+ /* We must recognize output that we have already generated ourselves. */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && REG_P (XEXP (XEXP (x, 0), 0))
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1))
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type) type);
+ return x;
+ }
+
+ /* We wish to handle large displacements off a base register by splitting
+ the addend across an add and the mem insn. This can cut the number of
+ extra insns needed from 3 to 1. It is only useful for load/store of a
+ single register with 12 bit offset field. */
+ if (GET_CODE (x) == PLUS
+ && REG_P (XEXP (x, 0))
+ && CONST_INT_P (XEXP (x, 1))
+ && HARD_REGISTER_P (XEXP (x, 0))
+ && mode != TImode
+ && mode != TFmode
+ && aarch64_regno_ok_for_base_p (REGNO (XEXP (x, 0)), true))
+ {
+ HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
+ HOST_WIDE_INT low = val & 0xfff;
+ HOST_WIDE_INT high = val - low;
+ HOST_WIDE_INT offs;
+ rtx cst;
+ enum machine_mode xmode = GET_MODE (x);
+
+ /* In ILP32, xmode can be either DImode or SImode. */
+ gcc_assert (xmode == DImode || xmode == SImode);
+
+ /* Reload non-zero BLKmode offsets. This is because we cannot ascertain
+ BLKmode alignment. */
+ if (GET_MODE_SIZE (mode) == 0)
+ return NULL_RTX;
+
+ offs = low % GET_MODE_SIZE (mode);
+
+ /* Align misaligned offset by adjusting high part to compensate. */
+ if (offs != 0)
+ {
+ if (aarch64_uimm12_shift (high + offs))
+ {
+ /* Align down. */
+ low = low - offs;
+ high = high + offs;
+ }
+ else
+ {
+ /* Align up. */
+ offs = GET_MODE_SIZE (mode) - offs;
+ low = low + offs;
+ high = high + (low & 0x1000) - offs;
+ low &= 0xfff;
+ }
+ }
+
+ /* Check for overflow. */
+ if (high + low != val)
+ return NULL_RTX;
+
+ cst = GEN_INT (high);
+ if (!aarch64_uimm12_shift (high))
+ cst = force_const_mem (xmode, cst);
+
+ /* Reload high part into base reg, leaving the low part
+ in the mem instruction.
+ Note that replacing this gen_rtx_PLUS with plus_constant is
+ wrong in this case because we rely on the
+ (plus (plus reg c1) c2) structure being preserved so that
+ XEXP (*p, 0) in push_reload below uses the correct term. */
+ x = gen_rtx_PLUS (xmode,
+ gen_rtx_PLUS (xmode, XEXP (x, 0), cst),
+ GEN_INT (low));
+
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, xmode, VOIDmode, 0, 0,
+ opnum, (enum reload_type) type);
+ return x;
+ }
+
+ return NULL_RTX;
+}
+
+
+static reg_class_t
+aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
+ reg_class_t rclass,
+ enum machine_mode mode,
+ secondary_reload_info *sri)
+{
+ /* Without the TARGET_SIMD instructions we cannot move a Q register
+ to a Q register directly. We need a scratch. */
+ if (REG_P (x) && (mode == TFmode || mode == TImode) && mode == GET_MODE (x)
+ && FP_REGNUM_P (REGNO (x)) && !TARGET_SIMD
+ && reg_class_subset_p (rclass, FP_REGS))
+ {
+ if (mode == TFmode)
+ sri->icode = CODE_FOR_aarch64_reload_movtf;
+ else if (mode == TImode)
+ sri->icode = CODE_FOR_aarch64_reload_movti;
+ return NO_REGS;
+ }
+
+ /* A TFmode or TImode memory access should be handled via an FP_REGS
+ because AArch64 has richer addressing modes for LDR/STR instructions
+ than LDP/STP instructions. */
+ if (!TARGET_GENERAL_REGS_ONLY && rclass == CORE_REGS
+ && GET_MODE_SIZE (mode) == 16 && MEM_P (x))
+ return FP_REGS;
+
+ if (rclass == FP_REGS && (mode == TImode || mode == TFmode) && CONSTANT_P(x))
+ return CORE_REGS;
+
+ return NO_REGS;
+}
+
+static bool
+aarch64_can_eliminate (const int from, const int to)
+{
+ /* If we need a frame pointer, we must eliminate FRAME_POINTER_REGNUM into
+ HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
+
+ if (frame_pointer_needed)
+ {
+ if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ return true;
+ if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ return false;
+ if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
+ && !cfun->calls_alloca)
+ return true;
+ if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ return true;
+ return false;
+ }
+ else
+ {
+ /* If we decided that we didn't need a leaf frame pointer but then used
+ LR in the function, then we'll want a frame pointer after all, so
+ prevent this elimination to ensure a frame pointer is used.
+
+ NOTE: the original value of flag_omit_frame_pointer gets trashed
+ IFF flag_omit_leaf_frame_pointer is true, so we check the value
+ of faked_omit_frame_pointer here (which is true when we always
+ wish to keep non-leaf frame pointers but only wish to keep leaf frame
+ pointers when LR is clobbered). */
+ if (to == STACK_POINTER_REGNUM
+ && df_regs_ever_live_p (LR_REGNUM)
+ && faked_omit_frame_pointer)
+ return false;
+ }
+
+ return true;
+}
+
+HOST_WIDE_INT
+aarch64_initial_elimination_offset (unsigned from, unsigned to)
+{
+ HOST_WIDE_INT frame_size;
+ HOST_WIDE_INT offset;
+
+ aarch64_layout_frame ();
+ frame_size = (get_frame_size () + cfun->machine->frame.saved_regs_size
+ + crtl->outgoing_args_size
+ + cfun->machine->saved_varargs_size);
+
+ frame_size = AARCH64_ROUND_UP (frame_size, STACK_BOUNDARY / BITS_PER_UNIT);
+ offset = frame_size;
+
+ if (to == HARD_FRAME_POINTER_REGNUM)
+ {
+ if (from == ARG_POINTER_REGNUM)
+ return offset - crtl->outgoing_args_size;
+
+ if (from == FRAME_POINTER_REGNUM)
+ return cfun->machine->frame.saved_regs_size + get_frame_size ();
+ }
+
+ if (to == STACK_POINTER_REGNUM)
+ {
+ if (from == FRAME_POINTER_REGNUM)
+ {
+ HOST_WIDE_INT elim = crtl->outgoing_args_size
+ + cfun->machine->frame.saved_regs_size
+ + get_frame_size ()
+ - cfun->machine->frame.fp_lr_offset;
+ elim = AARCH64_ROUND_UP (elim, STACK_BOUNDARY / BITS_PER_UNIT);
+ return elim;
+ }
+ }
+
+ return offset;
+}
+
+
+/* Implement RETURN_ADDR_RTX. We do not support moving back to a
+ previous frame. */
+
+rtx
+aarch64_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
+{
+ if (count != 0)
+ return const0_rtx;
+ return get_hard_reg_initial_val (Pmode, LR_REGNUM);
+}
+
+
+static void
+aarch64_asm_trampoline_template (FILE *f)
+{
+ if (TARGET_ILP32)
+ {
+ asm_fprintf (f, "\tldr\tw%d, .+16\n", IP1_REGNUM - R0_REGNUM);
+ asm_fprintf (f, "\tldr\tw%d, .+16\n", STATIC_CHAIN_REGNUM - R0_REGNUM);
+ }
+ else
+ {
+ asm_fprintf (f, "\tldr\t%s, .+16\n", reg_names [IP1_REGNUM]);
+ asm_fprintf (f, "\tldr\t%s, .+20\n", reg_names [STATIC_CHAIN_REGNUM]);
+ }
+ asm_fprintf (f, "\tbr\t%s\n", reg_names [IP1_REGNUM]);
+ assemble_aligned_integer (4, const0_rtx);
+ assemble_aligned_integer (POINTER_BYTES, const0_rtx);
+ assemble_aligned_integer (POINTER_BYTES, const0_rtx);
+}
+
+static void
+aarch64_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
+{
+ rtx fnaddr, mem, a_tramp;
+ const int tramp_code_sz = 16;
+
+ /* Don't need to copy the trailing D-words, we fill those in below. */
+ emit_block_move (m_tramp, assemble_trampoline_template (),
+ GEN_INT (tramp_code_sz), BLOCK_OP_NORMAL);
+ mem = adjust_address (m_tramp, ptr_mode, tramp_code_sz);
+ fnaddr = XEXP (DECL_RTL (fndecl), 0);
+ if (GET_MODE (fnaddr) != ptr_mode)
+ fnaddr = convert_memory_address (ptr_mode, fnaddr);
+ emit_move_insn (mem, fnaddr);
+
+ mem = adjust_address (m_tramp, ptr_mode, tramp_code_sz + POINTER_BYTES);
+ emit_move_insn (mem, chain_value);
+
+ /* XXX We should really define a "clear_cache" pattern and use
+ gen_clear_cache(). */
+ a_tramp = XEXP (m_tramp, 0);
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"),
+ LCT_NORMAL, VOIDmode, 2, a_tramp, ptr_mode,
+ plus_constant (ptr_mode, a_tramp, TRAMPOLINE_SIZE),
+ ptr_mode);
+}
+
+static unsigned char
+aarch64_class_max_nregs (reg_class_t regclass, enum machine_mode mode)
+{
+ switch (regclass)
+ {
+ case CORE_REGS:
+ case POINTER_REGS:
+ case GENERAL_REGS:
+ case ALL_REGS:
+ case FP_REGS:
+ case FP_LO_REGS:
+ return
+ aarch64_vector_mode_p (mode) ? (GET_MODE_SIZE (mode) + 15) / 16 :
+ (GET_MODE_SIZE (mode) + 7) / 8;
+ case STACK_REG:
+ return 1;
+
+ case NO_REGS:
+ return 0;
+
+ default:
+ break;
+ }
+ gcc_unreachable ();
+}
+
+static reg_class_t
+aarch64_preferred_reload_class (rtx x, reg_class_t regclass)
+{
+ if (regclass == POINTER_REGS)
+ return GENERAL_REGS;
+
+ if (regclass == STACK_REG)
+ {
+ if (REG_P(x)
+ && reg_class_subset_p (REGNO_REG_CLASS (REGNO (x)), POINTER_REGS))
+ return regclass;
+
+ return NO_REGS;
+ }
+
+ /* If it's an integer immediate that MOVI can't handle, then
+ FP_REGS is not an option, so we return NO_REGS instead. */
+ if (CONST_INT_P (x) && reg_class_subset_p (regclass, FP_REGS)
+ && !aarch64_simd_imm_scalar_p (x, GET_MODE (x)))
+ return NO_REGS;
+
+ /* Register eliminiation can result in a request for
+ SP+constant->FP_REGS. We cannot support such operations which
+ use SP as source and an FP_REG as destination, so reject out
+ right now. */
+ if (! reg_class_subset_p (regclass, GENERAL_REGS) && GET_CODE (x) == PLUS)
+ {
+ rtx lhs = XEXP (x, 0);
+
+ /* Look through a possible SUBREG introduced by ILP32. */
+ if (GET_CODE (lhs) == SUBREG)
+ lhs = SUBREG_REG (lhs);
+
+ gcc_assert (REG_P (lhs));
+ gcc_assert (reg_class_subset_p (REGNO_REG_CLASS (REGNO (lhs)),
+ POINTER_REGS));
+ return NO_REGS;
+ }
+
+ return regclass;
+}
+
+void
+aarch64_asm_output_labelref (FILE* f, const char *name)
+{
+ asm_fprintf (f, "%U%s", name);
+}
+
+static void
+aarch64_elf_asm_constructor (rtx symbol, int priority)
+{
+ if (priority == DEFAULT_INIT_PRIORITY)
+ default_ctor_section_asm_out_constructor (symbol, priority);
+ else
+ {
+ section *s;
+ char buf[18];
+ snprintf (buf, sizeof (buf), ".init_array.%.5u", priority);
+ s = get_section (buf, SECTION_WRITE, NULL);
+ switch_to_section (s);
+ assemble_align (POINTER_SIZE);
+ assemble_aligned_integer (POINTER_BYTES, symbol);
+ }
+}
+
+static void
+aarch64_elf_asm_destructor (rtx symbol, int priority)
+{
+ if (priority == DEFAULT_INIT_PRIORITY)
+ default_dtor_section_asm_out_destructor (symbol, priority);
+ else
+ {
+ section *s;
+ char buf[18];
+ snprintf (buf, sizeof (buf), ".fini_array.%.5u", priority);
+ s = get_section (buf, SECTION_WRITE, NULL);
+ switch_to_section (s);
+ assemble_align (POINTER_SIZE);
+ assemble_aligned_integer (POINTER_BYTES, symbol);
+ }
+}
+
+const char*
+aarch64_output_casesi (rtx *operands)
+{
+ char buf[100];
+ char label[100];
+ rtx diff_vec = PATTERN (NEXT_INSN (operands[2]));
+ int index;
+ static const char *const patterns[4][2] =
+ {
+ {
+ "ldrb\t%w3, [%0,%w1,uxtw]",
+ "add\t%3, %4, %w3, sxtb #2"
+ },
+ {
+ "ldrh\t%w3, [%0,%w1,uxtw #1]",
+ "add\t%3, %4, %w3, sxth #2"
+ },
+ {
+ "ldr\t%w3, [%0,%w1,uxtw #2]",
+ "add\t%3, %4, %w3, sxtw #2"
+ },
+ /* We assume that DImode is only generated when not optimizing and
+ that we don't really need 64-bit address offsets. That would
+ imply an object file with 8GB of code in a single function! */
+ {
+ "ldr\t%w3, [%0,%w1,uxtw #2]",
+ "add\t%3, %4, %w3, sxtw #2"
+ }
+ };
+
+ gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
+
+ index = exact_log2 (GET_MODE_SIZE (GET_MODE (diff_vec)));
+
+ gcc_assert (index >= 0 && index <= 3);
+
+ /* Need to implement table size reduction, by chaning the code below. */
+ output_asm_insn (patterns[index][0], operands);
+ ASM_GENERATE_INTERNAL_LABEL (label, "Lrtx", CODE_LABEL_NUMBER (operands[2]));
+ snprintf (buf, sizeof (buf),
+ "adr\t%%4, %s", targetm.strip_name_encoding (label));
+ output_asm_insn (buf, operands);
+ output_asm_insn (patterns[index][1], operands);
+ output_asm_insn ("br\t%3", operands);
+ assemble_label (asm_out_file, label);
+ return "";
+}
+
+
+/* Return size in bits of an arithmetic operand which is shifted/scaled and
+ masked such that it is suitable for a UXTB, UXTH, or UXTW extend
+ operator. */
+
+int
+aarch64_uxt_size (int shift, HOST_WIDE_INT mask)
+{
+ if (shift >= 0 && shift <= 3)
+ {
+ int size;
+ for (size = 8; size <= 32; size *= 2)
+ {
+ HOST_WIDE_INT bits = ((HOST_WIDE_INT)1U << size) - 1;
+ if (mask == bits << shift)
+ return size;
+ }
+ }
+ return 0;
+}
+
+static bool
+aarch64_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_rtx x ATTRIBUTE_UNUSED)
+{
+ /* We can't use blocks for constants when we're using a per-function
+ constant pool. */
+ return false;
+}
+
+static section *
+aarch64_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx x ATTRIBUTE_UNUSED,
+ unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
+{
+ /* Force all constant pool entries into the current function section. */
+ return function_section (current_function_decl);
+}
+
+
+/* Costs. */
+
+/* Helper function for rtx cost calculation. Strip a shift expression
+ from X. Returns the inner operand if successful, or the original
+ expression on failure. */
+static rtx
+aarch64_strip_shift (rtx x)
+{
+ rtx op = x;
+
+ if ((GET_CODE (op) == ASHIFT
+ || GET_CODE (op) == ASHIFTRT
+ || GET_CODE (op) == LSHIFTRT)
+ && CONST_INT_P (XEXP (op, 1)))
+ return XEXP (op, 0);
+
+ if (GET_CODE (op) == MULT
+ && CONST_INT_P (XEXP (op, 1))
+ && ((unsigned) exact_log2 (INTVAL (XEXP (op, 1)))) < 64)
+ return XEXP (op, 0);
+
+ return x;
+}
+
+/* Helper function for rtx cost calculation. Strip a shift or extend
+ expression from X. Returns the inner operand if successful, or the
+ original expression on failure. We deal with a number of possible
+ canonicalization variations here. */
+static rtx
+aarch64_strip_shift_or_extend (rtx x)
+{
+ rtx op = x;
+
+ /* Zero and sign extraction of a widened value. */
+ if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
+ && XEXP (op, 2) == const0_rtx
+ && aarch64_is_extend_from_extract (GET_MODE (op), XEXP (XEXP (op, 0), 1),
+ XEXP (op, 1)))
+ return XEXP (XEXP (op, 0), 0);
+
+ /* It can also be represented (for zero-extend) as an AND with an
+ immediate. */
+ if (GET_CODE (op) == AND
+ && GET_CODE (XEXP (op, 0)) == MULT
+ && CONST_INT_P (XEXP (XEXP (op, 0), 1))
+ && CONST_INT_P (XEXP (op, 1))
+ && aarch64_uxt_size (exact_log2 (INTVAL (XEXP (XEXP (op, 0), 1))),
+ INTVAL (XEXP (op, 1))) != 0)
+ return XEXP (XEXP (op, 0), 0);
+
+ /* Now handle extended register, as this may also have an optional
+ left shift by 1..4. */
+ if (GET_CODE (op) == ASHIFT
+ && CONST_INT_P (XEXP (op, 1))
+ && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op, 1))) <= 4)
+ op = XEXP (op, 0);
+
+ if (GET_CODE (op) == ZERO_EXTEND
+ || GET_CODE (op) == SIGN_EXTEND)
+ op = XEXP (op, 0);
+
+ if (op != x)
+ return op;
+
+ return aarch64_strip_shift (x);
+}
+
+/* Calculate the cost of calculating X, storing it in *COST. Result
+ is true if the total cost of the operation has now been calculated. */
+static bool
+aarch64_rtx_costs (rtx x, int code, int outer ATTRIBUTE_UNUSED,
+ int param ATTRIBUTE_UNUSED, int *cost, bool speed)
+{
+ rtx op0, op1;
+ const struct cpu_cost_table *extra_cost
+ = aarch64_tune_params->insn_extra_cost;
+
+ switch (code)
+ {
+ case SET:
+ op0 = SET_DEST (x);
+ op1 = SET_SRC (x);
+
+ switch (GET_CODE (op0))
+ {
+ case MEM:
+ if (speed)
+ *cost += extra_cost->ldst.store;
+
+ if (op1 != const0_rtx)
+ *cost += rtx_cost (op1, SET, 1, speed);
+ return true;
+
+ case SUBREG:
+ if (! REG_P (SUBREG_REG (op0)))
+ *cost += rtx_cost (SUBREG_REG (op0), SET, 0, speed);
+ /* Fall through. */
+ case REG:
+ /* Cost is just the cost of the RHS of the set. */
+ *cost += rtx_cost (op1, SET, 1, true);
+ return true;
+
+ case ZERO_EXTRACT: /* Bit-field insertion. */
+ case SIGN_EXTRACT:
+ /* Strip any redundant widening of the RHS to meet the width of
+ the target. */
+ if (GET_CODE (op1) == SUBREG)
+ op1 = SUBREG_REG (op1);
+ if ((GET_CODE (op1) == ZERO_EXTEND
+ || GET_CODE (op1) == SIGN_EXTEND)
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (GET_MODE_BITSIZE (GET_MODE (XEXP (op1, 0)))
+ >= INTVAL (XEXP (op0, 1))))
+ op1 = XEXP (op1, 0);
+ *cost += rtx_cost (op1, SET, 1, speed);
+ return true;
+
+ default:
+ break;
+ }
+ return false;
+
+ case MEM:
+ if (speed)
+ *cost += extra_cost->ldst.load;
+
+ return true;
+
+ case NEG:
+ op0 = CONST0_RTX (GET_MODE (x));
+ op1 = XEXP (x, 0);
+ goto cost_minus;
+
+ case COMPARE:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+
+ if (op1 == const0_rtx
+ && GET_CODE (op0) == AND)
+ {
+ x = op0;
+ goto cost_logic;
+ }
+
+ /* Comparisons can work if the order is swapped.
+ Canonicalization puts the more complex operation first, but
+ we want it in op1. */
+ if (! (REG_P (op0)
+ || (GET_CODE (op0) == SUBREG && REG_P (SUBREG_REG (op0)))))
+ {
+ op0 = XEXP (x, 1);
+ op1 = XEXP (x, 0);
+ }
+ goto cost_minus;
+
+ case MINUS:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+
+ cost_minus:
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
+ || (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT))
+ {
+ if (op0 != const0_rtx)
+ *cost += rtx_cost (op0, MINUS, 0, speed);
+
+ if (CONST_INT_P (op1))
+ {
+ if (!aarch64_uimm12_shift (INTVAL (op1)))
+ *cost += rtx_cost (op1, MINUS, 1, speed);
+ }
+ else
+ {
+ op1 = aarch64_strip_shift_or_extend (op1);
+ *cost += rtx_cost (op1, MINUS, 1, speed);
+ }
+ return true;
+ }
+
+ return false;
+
+ case PLUS:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+ {
+ if (CONST_INT_P (op1) && aarch64_uimm12_shift (INTVAL (op1)))
+ {
+ *cost += rtx_cost (op0, PLUS, 0, speed);
+ }
+ else
+ {
+ rtx new_op0 = aarch64_strip_shift_or_extend (op0);
+
+ if (new_op0 == op0
+ && GET_CODE (op0) == MULT)
+ {
+ if ((GET_CODE (XEXP (op0, 0)) == ZERO_EXTEND
+ && GET_CODE (XEXP (op0, 1)) == ZERO_EXTEND)
+ || (GET_CODE (XEXP (op0, 0)) == SIGN_EXTEND
+ && GET_CODE (XEXP (op0, 1)) == SIGN_EXTEND))
+ {
+ *cost += (rtx_cost (XEXP (XEXP (op0, 0), 0), MULT, 0,
+ speed)
+ + rtx_cost (XEXP (XEXP (op0, 1), 0), MULT, 1,
+ speed)
+ + rtx_cost (op1, PLUS, 1, speed));
+ if (speed)
+ *cost +=
+ extra_cost->mult[GET_MODE (x) == DImode].extend_add;
+ return true;
+ }
+
+ *cost += (rtx_cost (XEXP (op0, 0), MULT, 0, speed)
+ + rtx_cost (XEXP (op0, 1), MULT, 1, speed)
+ + rtx_cost (op1, PLUS, 1, speed));
+
+ if (speed)
+ *cost += extra_cost->mult[GET_MODE (x) == DImode].add;
+
+ return true;
+ }
+
+ *cost += (rtx_cost (new_op0, PLUS, 0, speed)
+ + rtx_cost (op1, PLUS, 1, speed));
+ }
+ return true;
+ }
+
+ return false;
+
+ case IOR:
+ case XOR:
+ case AND:
+ cost_logic:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+ {
+ if (CONST_INT_P (op1)
+ && aarch64_bitmask_imm (INTVAL (op1), GET_MODE (x)))
+ {
+ *cost += rtx_cost (op0, AND, 0, speed);
+ }
+ else
+ {
+ if (GET_CODE (op0) == NOT)
+ op0 = XEXP (op0, 0);
+ op0 = aarch64_strip_shift (op0);
+ *cost += (rtx_cost (op0, AND, 0, speed)
+ + rtx_cost (op1, AND, 1, speed));
+ }
+ return true;
+ }
+ return false;
+
+ case ZERO_EXTEND:
+ if ((GET_MODE (x) == DImode
+ && GET_MODE (XEXP (x, 0)) == SImode)
+ || GET_CODE (XEXP (x, 0)) == MEM)
+ {
+ *cost += rtx_cost (XEXP (x, 0), ZERO_EXTEND, 0, speed);
+ return true;
+ }
+ return false;
+
+ case SIGN_EXTEND:
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ {
+ *cost += rtx_cost (XEXP (x, 0), SIGN_EXTEND, 0, speed);
+ return true;
+ }
+ return false;
+
+ case ROTATE:
+ if (!CONST_INT_P (XEXP (x, 1)))
+ *cost += COSTS_N_INSNS (2);
+ /* Fall through. */
+ case ROTATERT:
+ case LSHIFTRT:
+ case ASHIFT:
+ case ASHIFTRT:
+
+ /* Shifting by a register often takes an extra cycle. */
+ if (speed && !CONST_INT_P (XEXP (x, 1)))
+ *cost += extra_cost->alu.arith_shift_reg;
+
+ *cost += rtx_cost (XEXP (x, 0), ASHIFT, 0, speed);
+ return true;
+
+ case HIGH:
+ if (!CONSTANT_P (XEXP (x, 0)))
+ *cost += rtx_cost (XEXP (x, 0), HIGH, 0, speed);
+ return true;
+
+ case LO_SUM:
+ if (!CONSTANT_P (XEXP (x, 1)))
+ *cost += rtx_cost (XEXP (x, 1), LO_SUM, 1, speed);
+ *cost += rtx_cost (XEXP (x, 0), LO_SUM, 0, speed);
+ return true;
+
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ *cost += rtx_cost (XEXP (x, 0), ZERO_EXTRACT, 0, speed);
+ return true;
+
+ case MULT:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+
+ *cost = COSTS_N_INSNS (1);
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+ {
+ if (CONST_INT_P (op1)
+ && exact_log2 (INTVAL (op1)) > 0)
+ {
+ *cost += rtx_cost (op0, ASHIFT, 0, speed);
+ return true;
+ }
+
+ if ((GET_CODE (op0) == ZERO_EXTEND
+ && GET_CODE (op1) == ZERO_EXTEND)
+ || (GET_CODE (op0) == SIGN_EXTEND
+ && GET_CODE (op1) == SIGN_EXTEND))
+ {
+ *cost += (rtx_cost (XEXP (op0, 0), MULT, 0, speed)
+ + rtx_cost (XEXP (op1, 0), MULT, 1, speed));
+ if (speed)
+ *cost += extra_cost->mult[GET_MODE (x) == DImode].extend;
+ return true;
+ }
+
+ if (speed)
+ *cost += extra_cost->mult[GET_MODE (x) == DImode].simple;
+ }
+ else if (speed)
+ {
+ if (GET_MODE (x) == DFmode)
+ *cost += extra_cost->fp[1].mult;
+ else if (GET_MODE (x) == SFmode)
+ *cost += extra_cost->fp[0].mult;
+ }
+
+ return false; /* All arguments need to be in registers. */
+
+ case MOD:
+ case UMOD:
+ *cost = COSTS_N_INSNS (2);
+ if (speed)
+ {
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+ *cost += (extra_cost->mult[GET_MODE (x) == DImode].add
+ + extra_cost->mult[GET_MODE (x) == DImode].idiv);
+ else if (GET_MODE (x) == DFmode)
+ *cost += (extra_cost->fp[1].mult
+ + extra_cost->fp[1].div);
+ else if (GET_MODE (x) == SFmode)
+ *cost += (extra_cost->fp[0].mult
+ + extra_cost->fp[0].div);
+ }
+ return false; /* All arguments need to be in registers. */
+
+ case DIV:
+ case UDIV:
+ *cost = COSTS_N_INSNS (1);
+ if (speed)
+ {
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
+ *cost += extra_cost->mult[GET_MODE (x) == DImode].idiv;
+ else if (GET_MODE (x) == DFmode)
+ *cost += extra_cost->fp[1].div;
+ else if (GET_MODE (x) == SFmode)
+ *cost += extra_cost->fp[0].div;
+ }
+ return false; /* All arguments need to be in registers. */
+
+ default:
+ break;
+ }
+ return false;
+}
+
+static int
+aarch64_address_cost (rtx x ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ addr_space_t as ATTRIBUTE_UNUSED, bool speed ATTRIBUTE_UNUSED)
+{
+ enum rtx_code c = GET_CODE (x);
+ const struct cpu_addrcost_table *addr_cost = aarch64_tune_params->addr_cost;
+
+ if (c == PRE_INC || c == PRE_DEC || c == PRE_MODIFY)
+ return addr_cost->pre_modify;
+
+ if (c == POST_INC || c == POST_DEC || c == POST_MODIFY)
+ return addr_cost->post_modify;
+
+ if (c == PLUS)
+ {
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return addr_cost->imm_offset;
+ else if (GET_CODE (XEXP (x, 0)) == MULT
+ || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
+ return addr_cost->register_extend;
+
+ return addr_cost->register_offset;
+ }
+ else if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
+ return addr_cost->imm_offset;
+
+ return 0;
+}
+
+static int
+aarch64_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
+ reg_class_t from, reg_class_t to)
+{
+ const struct cpu_regmove_cost *regmove_cost
+ = aarch64_tune_params->regmove_cost;
+
+ /* Moving between GPR and stack cost is the same as GP2GP. */
+ if ((from == GENERAL_REGS && to == STACK_REG)
+ || (to == GENERAL_REGS && from == STACK_REG))
+ return regmove_cost->GP2GP;
+
+ /* To/From the stack register, we move via the gprs. */
+ if (to == STACK_REG || from == STACK_REG)
+ return aarch64_register_move_cost (mode, from, GENERAL_REGS)
+ + aarch64_register_move_cost (mode, GENERAL_REGS, to);
+
+ if (from == GENERAL_REGS && to == GENERAL_REGS)
+ return regmove_cost->GP2GP;
+ else if (from == GENERAL_REGS)
+ return regmove_cost->GP2FP;
+ else if (to == GENERAL_REGS)
+ return regmove_cost->FP2GP;
+
+ /* When AdvSIMD instructions are disabled it is not possible to move
+ a 128-bit value directly between Q registers. This is handled in
+ secondary reload. A general register is used as a scratch to move
+ the upper DI value and the lower DI value is moved directly,
+ hence the cost is the sum of three moves. */
+
+ if (! TARGET_SIMD && GET_MODE_SIZE (from) == 128 && GET_MODE_SIZE (to) == 128)
+ return regmove_cost->GP2FP + regmove_cost->FP2GP + regmove_cost->FP2FP;
+
+ return regmove_cost->FP2FP;
+}
+
+static int
+aarch64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
+ reg_class_t rclass ATTRIBUTE_UNUSED,
+ bool in ATTRIBUTE_UNUSED)
+{
+ return aarch64_tune_params->memmov_cost;
+}
+
+/* Return the number of instructions that can be issued per cycle. */
+static int
+aarch64_sched_issue_rate (void)
+{
+ return aarch64_tune_params->issue_rate;
+}
+
+/* Vectorizer cost model target hooks. */
+
+/* Implement targetm.vectorize.builtin_vectorization_cost. */
+static int
+aarch64_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
+ tree vectype,
+ int misalign ATTRIBUTE_UNUSED)
+{
+ unsigned elements;
+
+ switch (type_of_cost)
+ {
+ case scalar_stmt:
+ return aarch64_tune_params->vec_costs->scalar_stmt_cost;
+
+ case scalar_load:
+ return aarch64_tune_params->vec_costs->scalar_load_cost;
+
+ case scalar_store:
+ return aarch64_tune_params->vec_costs->scalar_store_cost;
+
+ case vector_stmt:
+ return aarch64_tune_params->vec_costs->vec_stmt_cost;
+
+ case vector_load:
+ return aarch64_tune_params->vec_costs->vec_align_load_cost;
+
+ case vector_store:
+ return aarch64_tune_params->vec_costs->vec_store_cost;
+
+ case vec_to_scalar:
+ return aarch64_tune_params->vec_costs->vec_to_scalar_cost;
+
+ case scalar_to_vec:
+ return aarch64_tune_params->vec_costs->scalar_to_vec_cost;
+
+ case unaligned_load:
+ return aarch64_tune_params->vec_costs->vec_unalign_load_cost;
+
+ case unaligned_store:
+ return aarch64_tune_params->vec_costs->vec_unalign_store_cost;
+
+ case cond_branch_taken:
+ return aarch64_tune_params->vec_costs->cond_taken_branch_cost;
+
+ case cond_branch_not_taken:
+ return aarch64_tune_params->vec_costs->cond_not_taken_branch_cost;
+
+ case vec_perm:
+ case vec_promote_demote:
+ return aarch64_tune_params->vec_costs->vec_stmt_cost;
+
+ case vec_construct:
+ elements = TYPE_VECTOR_SUBPARTS (vectype);
+ return elements / 2 + 1;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Implement targetm.vectorize.add_stmt_cost. */
+static unsigned
+aarch64_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
+ struct _stmt_vec_info *stmt_info, int misalign,
+ enum vect_cost_model_location where)
+{
+ unsigned *cost = (unsigned *) data;
+ unsigned retval = 0;
+
+ if (flag_vect_cost_model)
+ {
+ tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
+ int stmt_cost =
+ aarch64_builtin_vectorization_cost (kind, vectype, misalign);
+
+ /* Statements in an inner loop relative to the loop being
+ vectorized are weighted more heavily. The value here is
+ a function (linear for now) of the loop nest level. */
+ if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
+ {
+ loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
+ struct loop *loop = LOOP_VINFO_LOOP (loop_info);
+ unsigned nest_level = loop_depth (loop);
+
+ count *= nest_level;
+ }
+
+ retval = (unsigned) (count * stmt_cost);
+ cost[where] += retval;
+ }
+
+ return retval;
+}
+
+static void initialize_aarch64_code_model (void);
+
+/* Parse the architecture extension string. */
+
+static void
+aarch64_parse_extension (char *str)
+{
+ /* The extension string is parsed left to right. */
+ const struct aarch64_option_extension *opt = NULL;
+
+ /* Flag to say whether we are adding or removing an extension. */
+ int adding_ext = -1;
+
+ while (str != NULL && *str != 0)
+ {
+ char *ext;
+ size_t len;
+
+ str++;
+ ext = strchr (str, '+');
+
+ if (ext != NULL)
+ len = ext - str;
+ else
+ len = strlen (str);
+
+ if (len >= 2 && strncmp (str, "no", 2) == 0)
+ {
+ adding_ext = 0;
+ len -= 2;
+ str += 2;
+ }
+ else if (len > 0)
+ adding_ext = 1;
+
+ if (len == 0)
+ {
+ error ("missing feature modifier after %qs", "+no");
+ return;
+ }
+
+ /* Scan over the extensions table trying to find an exact match. */
+ for (opt = all_extensions; opt->name != NULL; opt++)
+ {
+ if (strlen (opt->name) == len && strncmp (opt->name, str, len) == 0)
+ {
+ /* Add or remove the extension. */
+ if (adding_ext)
+ aarch64_isa_flags |= opt->flags_on;
+ else
+ aarch64_isa_flags &= ~(opt->flags_off);
+ break;
+ }
+ }
+
+ if (opt->name == NULL)
+ {
+ /* Extension not found in list. */
+ error ("unknown feature modifier %qs", str);
+ return;
+ }
+
+ str = ext;
+ };
+
+ return;
+}
+
+/* Parse the ARCH string. */
+
+static void
+aarch64_parse_arch (void)
+{
+ char *ext;
+ const struct processor *arch;
+ char *str = (char *) alloca (strlen (aarch64_arch_string) + 1);
+ size_t len;
+
+ strcpy (str, aarch64_arch_string);
+
+ ext = strchr (str, '+');
+
+ if (ext != NULL)
+ len = ext - str;
+ else
+ len = strlen (str);
+
+ if (len == 0)
+ {
+ error ("missing arch name in -march=%qs", str);
+ return;
+ }
+
+ /* Loop through the list of supported ARCHs to find a match. */
+ for (arch = all_architectures; arch->name != NULL; arch++)
+ {
+ if (strlen (arch->name) == len && strncmp (arch->name, str, len) == 0)
+ {
+ selected_arch = arch;
+ aarch64_isa_flags = selected_arch->flags;
+
+ if (!selected_cpu)
+ selected_cpu = &all_cores[selected_arch->core];
+
+ if (ext != NULL)
+ {
+ /* ARCH string contains at least one extension. */
+ aarch64_parse_extension (ext);
+ }
+
+ if (strcmp (selected_arch->arch, selected_cpu->arch))
+ {
+ warning (0, "switch -mcpu=%s conflicts with -march=%s switch",
+ selected_cpu->name, selected_arch->name);
+ }
+
+ return;
+ }
+ }
+
+ /* ARCH name not found in list. */
+ error ("unknown value %qs for -march", str);
+ return;
+}
+
+/* Parse the CPU string. */
+
+static void
+aarch64_parse_cpu (void)
+{
+ char *ext;
+ const struct processor *cpu;
+ char *str = (char *) alloca (strlen (aarch64_cpu_string) + 1);
+ size_t len;
+
+ strcpy (str, aarch64_cpu_string);
+
+ ext = strchr (str, '+');
+
+ if (ext != NULL)
+ len = ext - str;
+ else
+ len = strlen (str);
+
+ if (len == 0)
+ {
+ error ("missing cpu name in -mcpu=%qs", str);
+ return;
+ }
+
+ /* Loop through the list of supported CPUs to find a match. */
+ for (cpu = all_cores; cpu->name != NULL; cpu++)
+ {
+ if (strlen (cpu->name) == len && strncmp (cpu->name, str, len) == 0)
+ {
+ selected_cpu = cpu;
+ selected_tune = cpu;
+ aarch64_isa_flags = selected_cpu->flags;
+
+ if (ext != NULL)
+ {
+ /* CPU string contains at least one extension. */
+ aarch64_parse_extension (ext);
+ }
+
+ return;
+ }
+ }
+
+ /* CPU name not found in list. */
+ error ("unknown value %qs for -mcpu", str);
+ return;
+}
+
+/* Parse the TUNE string. */
+
+static void
+aarch64_parse_tune (void)
+{
+ const struct processor *cpu;
+ char *str = (char *) alloca (strlen (aarch64_tune_string) + 1);
+ strcpy (str, aarch64_tune_string);
+
+ /* Loop through the list of supported CPUs to find a match. */
+ for (cpu = all_cores; cpu->name != NULL; cpu++)
+ {
+ if (strcmp (cpu->name, str) == 0)
+ {
+ selected_tune = cpu;
+ return;
+ }
+ }
+
+ /* CPU name not found in list. */
+ error ("unknown value %qs for -mtune", str);
+ return;
+}
+
+
+/* Implement TARGET_OPTION_OVERRIDE. */
+
+static void
+aarch64_override_options (void)
+{
+ /* -mcpu=CPU is shorthand for -march=ARCH_FOR_CPU, -mtune=CPU.
+ If either of -march or -mtune is given, they override their
+ respective component of -mcpu.
+
+ So, first parse AARCH64_CPU_STRING, then the others, be careful
+ with -march as, if -mcpu is not present on the command line, march
+ must set a sensible default CPU. */
+ if (aarch64_cpu_string)
+ {
+ aarch64_parse_cpu ();
+ }
+
+ if (aarch64_arch_string)
+ {
+ aarch64_parse_arch ();
+ }
+
+ if (aarch64_tune_string)
+ {
+ aarch64_parse_tune ();
+ }
+
+#ifndef HAVE_AS_MABI_OPTION
+ /* The compiler may have been configured with 2.23.* binutils, which does
+ not have support for ILP32. */
+ if (TARGET_ILP32)
+ error ("Assembler does not support -mabi=ilp32");
+#endif
+
+ initialize_aarch64_code_model ();
+
+ aarch64_build_bitmask_table ();
+
+ /* This target defaults to strict volatile bitfields. */
+ if (flag_strict_volatile_bitfields < 0 && abi_version_at_least (2))
+ flag_strict_volatile_bitfields = 1;
+
+ /* If the user did not specify a processor, choose the default
+ one for them. This will be the CPU set during configuration using
+ --with-cpu, otherwise it is "generic". */
+ if (!selected_cpu)
+ {
+ selected_cpu = &all_cores[TARGET_CPU_DEFAULT & 0x3f];
+ aarch64_isa_flags = TARGET_CPU_DEFAULT >> 6;
+ }
+
+ gcc_assert (selected_cpu);
+
+ /* The selected cpu may be an architecture, so lookup tuning by core ID. */
+ if (!selected_tune)
+ selected_tune = &all_cores[selected_cpu->core];
+
+ aarch64_tune_flags = selected_tune->flags;
+ aarch64_tune = selected_tune->core;
+ aarch64_tune_params = selected_tune->tune;
+
+ aarch64_override_options_after_change ();
+}
+
+/* Implement targetm.override_options_after_change. */
+
+static void
+aarch64_override_options_after_change (void)
+{
+ faked_omit_frame_pointer = false;
+
+ /* To omit leaf frame pointers, we need to turn flag_omit_frame_pointer on so
+ that aarch64_frame_pointer_required will be called. We need to remember
+ whether flag_omit_frame_pointer was turned on normally or just faked. */
+
+ if (flag_omit_leaf_frame_pointer && !flag_omit_frame_pointer)
+ {
+ flag_omit_frame_pointer = true;
+ faked_omit_frame_pointer = true;
+ }
+}
+
+static struct machine_function *
+aarch64_init_machine_status (void)
+{
+ struct machine_function *machine;
+ machine = ggc_alloc_cleared_machine_function ();
+ return machine;
+}
+
+void
+aarch64_init_expanders (void)
+{
+ init_machine_status = aarch64_init_machine_status;
+}
+
+/* A checking mechanism for the implementation of the various code models. */
+static void
+initialize_aarch64_code_model (void)
+{
+ if (flag_pic)
+ {
+ switch (aarch64_cmodel_var)
+ {
+ case AARCH64_CMODEL_TINY:
+ aarch64_cmodel = AARCH64_CMODEL_TINY_PIC;
+ break;
+ case AARCH64_CMODEL_SMALL:
+ aarch64_cmodel = AARCH64_CMODEL_SMALL_PIC;
+ break;
+ case AARCH64_CMODEL_LARGE:
+ sorry ("code model %qs with -f%s", "large",
+ flag_pic > 1 ? "PIC" : "pic");
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ aarch64_cmodel = aarch64_cmodel_var;
+}
+
+/* Return true if SYMBOL_REF X binds locally. */
+
+static bool
+aarch64_symbol_binds_local_p (const_rtx x)
+{
+ return (SYMBOL_REF_DECL (x)
+ ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
+ : SYMBOL_REF_LOCAL_P (x));
+}
+
+/* Return true if SYMBOL_REF X is thread local */
+static bool
+aarch64_tls_symbol_p (rtx x)
+{
+ if (! TARGET_HAVE_TLS)
+ return false;
+
+ if (GET_CODE (x) != SYMBOL_REF)
+ return false;
+
+ return SYMBOL_REF_TLS_MODEL (x) != 0;
+}
+
+/* Classify a TLS symbol into one of the TLS kinds. */
+enum aarch64_symbol_type
+aarch64_classify_tls_symbol (rtx x)
+{
+ enum tls_model tls_kind = tls_symbolic_operand_type (x);
+
+ switch (tls_kind)
+ {
+ case TLS_MODEL_GLOBAL_DYNAMIC:
+ case TLS_MODEL_LOCAL_DYNAMIC:
+ return TARGET_TLS_DESC ? SYMBOL_SMALL_TLSDESC : SYMBOL_SMALL_TLSGD;
+
+ case TLS_MODEL_INITIAL_EXEC:
+ return SYMBOL_SMALL_GOTTPREL;
+
+ case TLS_MODEL_LOCAL_EXEC:
+ return SYMBOL_SMALL_TPREL;
+
+ case TLS_MODEL_EMULATED:
+ case TLS_MODEL_NONE:
+ return SYMBOL_FORCE_TO_MEM;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return the method that should be used to access SYMBOL_REF or
+ LABEL_REF X in context CONTEXT. */
+
+enum aarch64_symbol_type
+aarch64_classify_symbol (rtx x,
+ enum aarch64_symbol_context context ATTRIBUTE_UNUSED)
+{
+ if (GET_CODE (x) == LABEL_REF)
+ {
+ switch (aarch64_cmodel)
+ {
+ case AARCH64_CMODEL_LARGE:
+ return SYMBOL_FORCE_TO_MEM;
+
+ case AARCH64_CMODEL_TINY_PIC:
+ case AARCH64_CMODEL_TINY:
+ return SYMBOL_TINY_ABSOLUTE;
+
+ case AARCH64_CMODEL_SMALL_PIC:
+ case AARCH64_CMODEL_SMALL:
+ return SYMBOL_SMALL_ABSOLUTE;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ if (GET_CODE (x) == SYMBOL_REF)
+ {
+ if (aarch64_cmodel == AARCH64_CMODEL_LARGE)
+ return SYMBOL_FORCE_TO_MEM;
+
+ if (aarch64_tls_symbol_p (x))
+ return aarch64_classify_tls_symbol (x);
+
+ switch (aarch64_cmodel)
+ {
+ case AARCH64_CMODEL_TINY:
+ if (SYMBOL_REF_WEAK (x))
+ return SYMBOL_FORCE_TO_MEM;
+ return SYMBOL_TINY_ABSOLUTE;
+
+ case AARCH64_CMODEL_SMALL:
+ if (SYMBOL_REF_WEAK (x))
+ return SYMBOL_FORCE_TO_MEM;
+ return SYMBOL_SMALL_ABSOLUTE;
+
+ case AARCH64_CMODEL_TINY_PIC:
+ if (!aarch64_symbol_binds_local_p (x))
+ return SYMBOL_TINY_GOT;
+ return SYMBOL_TINY_ABSOLUTE;
+
+ case AARCH64_CMODEL_SMALL_PIC:
+ if (!aarch64_symbol_binds_local_p (x))
+ return SYMBOL_SMALL_GOT;
+ return SYMBOL_SMALL_ABSOLUTE;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ /* By default push everything into the constant pool. */
+ return SYMBOL_FORCE_TO_MEM;
+}
+
+bool
+aarch64_constant_address_p (rtx x)
+{
+ return (CONSTANT_P (x) && memory_address_p (DImode, x));
+}
+
+bool
+aarch64_legitimate_pic_operand_p (rtx x)
+{
+ if (GET_CODE (x) == SYMBOL_REF
+ || (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
+ return false;
+
+ return true;
+}
+
+/* Return true if X holds either a quarter-precision or
+ floating-point +0.0 constant. */
+static bool
+aarch64_valid_floating_const (enum machine_mode mode, rtx x)
+{
+ if (!CONST_DOUBLE_P (x))
+ return false;
+
+ /* TODO: We could handle moving 0.0 to a TFmode register,
+ but first we would like to refactor the movtf_aarch64
+ to be more amicable to split moves properly and
+ correctly gate on TARGET_SIMD. For now - reject all
+ constants which are not to SFmode or DFmode registers. */
+ if (!(mode == SFmode || mode == DFmode))
+ return false;
+
+ if (aarch64_float_const_zero_rtx_p (x))
+ return true;
+ return aarch64_float_const_representable_p (x);
+}
+
+static bool
+aarch64_legitimate_constant_p (enum machine_mode mode, rtx x)
+{
+ /* Do not allow vector struct mode constants. We could support
+ 0 and -1 easily, but they need support in aarch64-simd.md. */
+ if (TARGET_SIMD && aarch64_vect_struct_mode_p (mode))
+ return false;
+
+ /* This could probably go away because
+ we now decompose CONST_INTs according to expand_mov_immediate. */
+ if ((GET_CODE (x) == CONST_VECTOR
+ && aarch64_simd_valid_immediate (x, mode, false, NULL))
+ || CONST_INT_P (x) || aarch64_valid_floating_const (mode, x))
+ return !targetm.cannot_force_const_mem (mode, x);
+
+ if (GET_CODE (x) == HIGH
+ && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
+ return true;
+
+ return aarch64_constant_address_p (x);
+}
+
+rtx
+aarch64_load_tp (rtx target)
+{
+ if (!target
+ || GET_MODE (target) != Pmode
+ || !register_operand (target, Pmode))
+ target = gen_reg_rtx (Pmode);
+
+ /* Can return in any reg. */
+ emit_insn (gen_aarch64_load_tp_hard (target));
+ return target;
+}
+
+/* On AAPCS systems, this is the "struct __va_list". */
+static GTY(()) tree va_list_type;
+
+/* Implement TARGET_BUILD_BUILTIN_VA_LIST.
+ Return the type to use as __builtin_va_list.
+
+ AAPCS64 \S 7.1.4 requires that va_list be a typedef for a type defined as:
+
+ struct __va_list
+ {
+ void *__stack;
+ void *__gr_top;
+ void *__vr_top;
+ int __gr_offs;
+ int __vr_offs;
+ }; */
+
+static tree
+aarch64_build_builtin_va_list (void)
+{
+ tree va_list_name;
+ tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
+
+ /* Create the type. */
+ va_list_type = lang_hooks.types.make_type (RECORD_TYPE);
+ /* Give it the required name. */
+ va_list_name = build_decl (BUILTINS_LOCATION,
+ TYPE_DECL,
+ get_identifier ("__va_list"),
+ va_list_type);
+ DECL_ARTIFICIAL (va_list_name) = 1;
+ TYPE_NAME (va_list_type) = va_list_name;
+ TYPE_STUB_DECL (va_list_type) = va_list_name;
+
+ /* Create the fields. */
+ f_stack = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__stack"),
+ ptr_type_node);
+ f_grtop = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__gr_top"),
+ ptr_type_node);
+ f_vrtop = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__vr_top"),
+ ptr_type_node);
+ f_groff = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__gr_offs"),
+ integer_type_node);
+ f_vroff = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__vr_offs"),
+ integer_type_node);
+
+ DECL_ARTIFICIAL (f_stack) = 1;
+ DECL_ARTIFICIAL (f_grtop) = 1;
+ DECL_ARTIFICIAL (f_vrtop) = 1;
+ DECL_ARTIFICIAL (f_groff) = 1;
+ DECL_ARTIFICIAL (f_vroff) = 1;
+
+ DECL_FIELD_CONTEXT (f_stack) = va_list_type;
+ DECL_FIELD_CONTEXT (f_grtop) = va_list_type;
+ DECL_FIELD_CONTEXT (f_vrtop) = va_list_type;
+ DECL_FIELD_CONTEXT (f_groff) = va_list_type;
+ DECL_FIELD_CONTEXT (f_vroff) = va_list_type;
+
+ TYPE_FIELDS (va_list_type) = f_stack;
+ DECL_CHAIN (f_stack) = f_grtop;
+ DECL_CHAIN (f_grtop) = f_vrtop;
+ DECL_CHAIN (f_vrtop) = f_groff;
+ DECL_CHAIN (f_groff) = f_vroff;
+
+ /* Compute its layout. */
+ layout_type (va_list_type);
+
+ return va_list_type;
+}
+
+/* Implement TARGET_EXPAND_BUILTIN_VA_START. */
+static void
+aarch64_expand_builtin_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
+{
+ const CUMULATIVE_ARGS *cum;
+ tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
+ tree stack, grtop, vrtop, groff, vroff;
+ tree t;
+ int gr_save_area_size;
+ int vr_save_area_size;
+ int vr_offset;
+
+ cum = &crtl->args.info;
+ gr_save_area_size
+ = (NUM_ARG_REGS - cum->aapcs_ncrn) * UNITS_PER_WORD;
+ vr_save_area_size
+ = (NUM_FP_ARG_REGS - cum->aapcs_nvrn) * UNITS_PER_VREG;
+
+ if (TARGET_GENERAL_REGS_ONLY)
+ {
+ if (cum->aapcs_nvrn > 0)
+ sorry ("%qs and floating point or vector arguments",
+ "-mgeneral-regs-only");
+ vr_save_area_size = 0;
+ }
+
+ f_stack = TYPE_FIELDS (va_list_type_node);
+ f_grtop = DECL_CHAIN (f_stack);
+ f_vrtop = DECL_CHAIN (f_grtop);
+ f_groff = DECL_CHAIN (f_vrtop);
+ f_vroff = DECL_CHAIN (f_groff);
+
+ stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), valist, f_stack,
+ NULL_TREE);
+ grtop = build3 (COMPONENT_REF, TREE_TYPE (f_grtop), valist, f_grtop,
+ NULL_TREE);
+ vrtop = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop), valist, f_vrtop,
+ NULL_TREE);
+ groff = build3 (COMPONENT_REF, TREE_TYPE (f_groff), valist, f_groff,
+ NULL_TREE);
+ vroff = build3 (COMPONENT_REF, TREE_TYPE (f_vroff), valist, f_vroff,
+ NULL_TREE);
+
+ /* Emit code to initialize STACK, which points to the next varargs stack
+ argument. CUM->AAPCS_STACK_SIZE gives the number of stack words used
+ by named arguments. STACK is 8-byte aligned. */
+ t = make_tree (TREE_TYPE (stack), virtual_incoming_args_rtx);
+ if (cum->aapcs_stack_size > 0)
+ t = fold_build_pointer_plus_hwi (t, cum->aapcs_stack_size * UNITS_PER_WORD);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (stack), stack, t);
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Emit code to initialize GRTOP, the top of the GR save area.
+ virtual_incoming_args_rtx should have been 16 byte aligned. */
+ t = make_tree (TREE_TYPE (grtop), virtual_incoming_args_rtx);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (grtop), grtop, t);
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Emit code to initialize VRTOP, the top of the VR save area.
+ This address is gr_save_area_bytes below GRTOP, rounded
+ down to the next 16-byte boundary. */
+ t = make_tree (TREE_TYPE (vrtop), virtual_incoming_args_rtx);
+ vr_offset = AARCH64_ROUND_UP (gr_save_area_size,
+ STACK_BOUNDARY / BITS_PER_UNIT);
+
+ if (vr_offset)
+ t = fold_build_pointer_plus_hwi (t, -vr_offset);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (vrtop), vrtop, t);
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Emit code to initialize GROFF, the offset from GRTOP of the
+ next GPR argument. */
+ t = build2 (MODIFY_EXPR, TREE_TYPE (groff), groff,
+ build_int_cst (TREE_TYPE (groff), -gr_save_area_size));
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Likewise emit code to initialize VROFF, the offset from FTOP
+ of the next VR argument. */
+ t = build2 (MODIFY_EXPR, TREE_TYPE (vroff), vroff,
+ build_int_cst (TREE_TYPE (vroff), -vr_save_area_size));
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+}
+
+/* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
+
+static tree
+aarch64_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p ATTRIBUTE_UNUSED)
+{
+ tree addr;
+ bool indirect_p;
+ bool is_ha; /* is HFA or HVA. */
+ bool dw_align; /* double-word align. */
+ enum machine_mode ag_mode = VOIDmode;
+ int nregs;
+ enum machine_mode mode;
+
+ tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
+ tree stack, f_top, f_off, off, arg, roundup, on_stack;
+ HOST_WIDE_INT size, rsize, adjust, align;
+ tree t, u, cond1, cond2;
+
+ indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
+ if (indirect_p)
+ type = build_pointer_type (type);
+
+ mode = TYPE_MODE (type);
+
+ f_stack = TYPE_FIELDS (va_list_type_node);
+ f_grtop = DECL_CHAIN (f_stack);
+ f_vrtop = DECL_CHAIN (f_grtop);
+ f_groff = DECL_CHAIN (f_vrtop);
+ f_vroff = DECL_CHAIN (f_groff);
+
+ stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), unshare_expr (valist),
+ f_stack, NULL_TREE);
+ size = int_size_in_bytes (type);
+ align = aarch64_function_arg_alignment (mode, type) / BITS_PER_UNIT;
+
+ dw_align = false;
+ adjust = 0;
+ if (aarch64_vfp_is_call_or_return_candidate (mode,
+ type,
+ &ag_mode,
+ &nregs,
+ &is_ha))
+ {
+ /* TYPE passed in fp/simd registers. */
+ if (TARGET_GENERAL_REGS_ONLY)
+ sorry ("%qs and floating point or vector arguments",
+ "-mgeneral-regs-only");
+
+ f_top = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop),
+ unshare_expr (valist), f_vrtop, NULL_TREE);
+ f_off = build3 (COMPONENT_REF, TREE_TYPE (f_vroff),
+ unshare_expr (valist), f_vroff, NULL_TREE);
+
+ rsize = nregs * UNITS_PER_VREG;
+
+ if (is_ha)
+ {
+ if (BYTES_BIG_ENDIAN && GET_MODE_SIZE (ag_mode) < UNITS_PER_VREG)
+ adjust = UNITS_PER_VREG - GET_MODE_SIZE (ag_mode);
+ }
+ else if (BLOCK_REG_PADDING (mode, type, 1) == downward
+ && size < UNITS_PER_VREG)
+ {
+ adjust = UNITS_PER_VREG - size;
+ }
+ }
+ else
+ {
+ /* TYPE passed in general registers. */
+ f_top = build3 (COMPONENT_REF, TREE_TYPE (f_grtop),
+ unshare_expr (valist), f_grtop, NULL_TREE);
+ f_off = build3 (COMPONENT_REF, TREE_TYPE (f_groff),
+ unshare_expr (valist), f_groff, NULL_TREE);
+ rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
+ nregs = rsize / UNITS_PER_WORD;
+
+ if (align > 8)
+ dw_align = true;
+
+ if (BLOCK_REG_PADDING (mode, type, 1) == downward
+ && size < UNITS_PER_WORD)
+ {
+ adjust = UNITS_PER_WORD - size;
+ }
+ }
+
+ /* Get a local temporary for the field value. */
+ off = get_initialized_tmp_var (f_off, pre_p, NULL);
+
+ /* Emit code to branch if off >= 0. */
+ t = build2 (GE_EXPR, boolean_type_node, off,
+ build_int_cst (TREE_TYPE (off), 0));
+ cond1 = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
+
+ if (dw_align)
+ {
+ /* Emit: offs = (offs + 15) & -16. */
+ t = build2 (PLUS_EXPR, TREE_TYPE (off), off,
+ build_int_cst (TREE_TYPE (off), 15));
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (off), t,
+ build_int_cst (TREE_TYPE (off), -16));
+ roundup = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t);
+ }
+ else
+ roundup = NULL;
+
+ /* Update ap.__[g|v]r_offs */
+ t = build2 (PLUS_EXPR, TREE_TYPE (off), off,
+ build_int_cst (TREE_TYPE (off), rsize));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (f_off), unshare_expr (f_off), t);
+
+ /* String up. */
+ if (roundup)
+ t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t);
+
+ /* [cond2] if (ap.__[g|v]r_offs > 0) */
+ u = build2 (GT_EXPR, boolean_type_node, unshare_expr (f_off),
+ build_int_cst (TREE_TYPE (f_off), 0));
+ cond2 = build3 (COND_EXPR, ptr_type_node, u, NULL_TREE, NULL_TREE);
+
+ /* String up: make sure the assignment happens before the use. */
+ t = build2 (COMPOUND_EXPR, TREE_TYPE (cond2), t, cond2);
+ COND_EXPR_ELSE (cond1) = t;
+
+ /* Prepare the trees handling the argument that is passed on the stack;
+ the top level node will store in ON_STACK. */
+ arg = get_initialized_tmp_var (stack, pre_p, NULL);
+ if (align > 8)
+ {
+ /* if (alignof(type) > 8) (arg = arg + 15) & -16; */
+ t = fold_convert (intDI_type_node, arg);
+ t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
+ build_int_cst (TREE_TYPE (t), 15));
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
+ build_int_cst (TREE_TYPE (t), -16));
+ t = fold_convert (TREE_TYPE (arg), t);
+ roundup = build2 (MODIFY_EXPR, TREE_TYPE (arg), arg, t);
+ }
+ else
+ roundup = NULL;
+ /* Advance ap.__stack */
+ t = fold_convert (intDI_type_node, arg);
+ t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
+ build_int_cst (TREE_TYPE (t), size + 7));
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
+ build_int_cst (TREE_TYPE (t), -8));
+ t = fold_convert (TREE_TYPE (arg), t);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (stack), unshare_expr (stack), t);
+ /* String up roundup and advance. */
+ if (roundup)
+ t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t);
+ /* String up with arg */
+ on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), t, arg);
+ /* Big-endianness related address adjustment. */
+ if (BLOCK_REG_PADDING (mode, type, 1) == downward
+ && size < UNITS_PER_WORD)
+ {
+ t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (arg), arg,
+ size_int (UNITS_PER_WORD - size));
+ on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), on_stack, t);
+ }
+
+ COND_EXPR_THEN (cond1) = unshare_expr (on_stack);
+ COND_EXPR_THEN (cond2) = unshare_expr (on_stack);
+
+ /* Adjustment to OFFSET in the case of BIG_ENDIAN. */
+ t = off;
+ if (adjust)
+ t = build2 (PREINCREMENT_EXPR, TREE_TYPE (off), off,
+ build_int_cst (TREE_TYPE (off), adjust));
+
+ t = fold_convert (sizetype, t);
+ t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (f_top), f_top, t);
+
+ if (is_ha)
+ {
+ /* type ha; // treat as "struct {ftype field[n];}"
+ ... [computing offs]
+ for (i = 0; i <nregs; ++i, offs += 16)
+ ha.field[i] = *((ftype *)(ap.__vr_top + offs));
+ return ha; */
+ int i;
+ tree tmp_ha, field_t, field_ptr_t;
+
+ /* Declare a local variable. */
+ tmp_ha = create_tmp_var_raw (type, "ha");
+ gimple_add_tmp_var (tmp_ha);
+
+ /* Establish the base type. */
+ switch (ag_mode)
+ {
+ case SFmode:
+ field_t = float_type_node;
+ field_ptr_t = float_ptr_type_node;
+ break;
+ case DFmode:
+ field_t = double_type_node;
+ field_ptr_t = double_ptr_type_node;
+ break;
+ case TFmode:
+ field_t = long_double_type_node;
+ field_ptr_t = long_double_ptr_type_node;
+ break;
+/* The half precision and quad precision are not fully supported yet. Enable
+ the following code after the support is complete. Need to find the correct
+ type node for __fp16 *. */
+#if 0
+ case HFmode:
+ field_t = float_type_node;
+ field_ptr_t = float_ptr_type_node;
+ break;
+#endif
+ case V2SImode:
+ case V4SImode:
+ {
+ tree innertype = make_signed_type (GET_MODE_PRECISION (SImode));
+ field_t = build_vector_type_for_mode (innertype, ag_mode);
+ field_ptr_t = build_pointer_type (field_t);
+ }
+ break;
+ default:
+ gcc_assert (0);
+ }
+
+ /* *(field_ptr_t)&ha = *((field_ptr_t)vr_saved_area */
+ tmp_ha = build1 (ADDR_EXPR, field_ptr_t, tmp_ha);
+ addr = t;
+ t = fold_convert (field_ptr_t, addr);
+ t = build2 (MODIFY_EXPR, field_t,
+ build1 (INDIRECT_REF, field_t, tmp_ha),
+ build1 (INDIRECT_REF, field_t, t));
+
+ /* ha.field[i] = *((field_ptr_t)vr_saved_area + i) */
+ for (i = 1; i < nregs; ++i)
+ {
+ addr = fold_build_pointer_plus_hwi (addr, UNITS_PER_VREG);
+ u = fold_convert (field_ptr_t, addr);
+ u = build2 (MODIFY_EXPR, field_t,
+ build2 (MEM_REF, field_t, tmp_ha,
+ build_int_cst (field_ptr_t,
+ (i *
+ int_size_in_bytes (field_t)))),
+ build1 (INDIRECT_REF, field_t, u));
+ t = build2 (COMPOUND_EXPR, TREE_TYPE (t), t, u);
+ }
+
+ u = fold_convert (TREE_TYPE (f_top), tmp_ha);
+ t = build2 (COMPOUND_EXPR, TREE_TYPE (f_top), t, u);
+ }
+
+ COND_EXPR_ELSE (cond2) = t;
+ addr = fold_convert (build_pointer_type (type), cond1);
+ addr = build_va_arg_indirect_ref (addr);
+
+ if (indirect_p)
+ addr = build_va_arg_indirect_ref (addr);
+
+ return addr;
+}
+
+/* Implement TARGET_SETUP_INCOMING_VARARGS. */
+
+static void
+aarch64_setup_incoming_varargs (cumulative_args_t cum_v, enum machine_mode mode,
+ tree type, int *pretend_size ATTRIBUTE_UNUSED,
+ int no_rtl)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+ CUMULATIVE_ARGS local_cum;
+ int gr_saved, vr_saved;
+
+ /* The caller has advanced CUM up to, but not beyond, the last named
+ argument. Advance a local copy of CUM past the last "real" named
+ argument, to find out how many registers are left over. */
+ local_cum = *cum;
+ aarch64_function_arg_advance (pack_cumulative_args(&local_cum), mode, type, true);
+
+ /* Found out how many registers we need to save. */
+ gr_saved = NUM_ARG_REGS - local_cum.aapcs_ncrn;
+ vr_saved = NUM_FP_ARG_REGS - local_cum.aapcs_nvrn;
+
+ if (TARGET_GENERAL_REGS_ONLY)
+ {
+ if (local_cum.aapcs_nvrn > 0)
+ sorry ("%qs and floating point or vector arguments",
+ "-mgeneral-regs-only");
+ vr_saved = 0;
+ }
+
+ if (!no_rtl)
+ {
+ if (gr_saved > 0)
+ {
+ rtx ptr, mem;
+
+ /* virtual_incoming_args_rtx should have been 16-byte aligned. */
+ ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
+ - gr_saved * UNITS_PER_WORD);
+ mem = gen_frame_mem (BLKmode, ptr);
+ set_mem_alias_set (mem, get_varargs_alias_set ());
+
+ move_block_from_reg (local_cum.aapcs_ncrn + R0_REGNUM,
+ mem, gr_saved);
+ }
+ if (vr_saved > 0)
+ {
+ /* We can't use move_block_from_reg, because it will use
+ the wrong mode, storing D regs only. */
+ enum machine_mode mode = TImode;
+ int off, i;
+
+ /* Set OFF to the offset from virtual_incoming_args_rtx of
+ the first vector register. The VR save area lies below
+ the GR one, and is aligned to 16 bytes. */
+ off = -AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD,
+ STACK_BOUNDARY / BITS_PER_UNIT);
+ off -= vr_saved * UNITS_PER_VREG;
+
+ for (i = local_cum.aapcs_nvrn; i < NUM_FP_ARG_REGS; ++i)
+ {
+ rtx ptr, mem;
+
+ ptr = plus_constant (Pmode, virtual_incoming_args_rtx, off);
+ mem = gen_frame_mem (mode, ptr);
+ set_mem_alias_set (mem, get_varargs_alias_set ());
+ aarch64_emit_move (mem, gen_rtx_REG (mode, V0_REGNUM + i));
+ off += UNITS_PER_VREG;
+ }
+ }
+ }
+
+ /* We don't save the size into *PRETEND_SIZE because we want to avoid
+ any complication of having crtl->args.pretend_args_size changed. */
+ cfun->machine->saved_varargs_size
+ = (AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD,
+ STACK_BOUNDARY / BITS_PER_UNIT)
+ + vr_saved * UNITS_PER_VREG);
+}
+
+static void
+aarch64_conditional_register_usage (void)
+{
+ int i;
+ if (!TARGET_FLOAT)
+ {
+ for (i = V0_REGNUM; i <= V31_REGNUM; i++)
+ {
+ fixed_regs[i] = 1;
+ call_used_regs[i] = 1;
+ }
+ }
+}
+
+/* Walk down the type tree of TYPE counting consecutive base elements.
+ If *MODEP is VOIDmode, then set it to the first valid floating point
+ type. If a non-floating point type is found, or if a floating point
+ type that doesn't match a non-VOIDmode *MODEP is found, then return -1,
+ otherwise return the count in the sub-tree. */
+static int
+aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
+{
+ enum machine_mode mode;
+ HOST_WIDE_INT size;
+
+ switch (TREE_CODE (type))
+ {
+ case REAL_TYPE:
+ mode = TYPE_MODE (type);
+ if (mode != DFmode && mode != SFmode && mode != TFmode)
+ return -1;
+
+ if (*modep == VOIDmode)
+ *modep = mode;
+
+ if (*modep == mode)
+ return 1;
+
+ break;
+
+ case COMPLEX_TYPE:
+ mode = TYPE_MODE (TREE_TYPE (type));
+ if (mode != DFmode && mode != SFmode && mode != TFmode)
+ return -1;
+
+ if (*modep == VOIDmode)
+ *modep = mode;
+
+ if (*modep == mode)
+ return 2;
+
+ break;
+
+ case VECTOR_TYPE:
+ /* Use V2SImode and V4SImode as representatives of all 64-bit
+ and 128-bit vector types. */
+ size = int_size_in_bytes (type);
+ switch (size)
+ {
+ case 8:
+ mode = V2SImode;
+ break;
+ case 16:
+ mode = V4SImode;
+ break;
+ default:
+ return -1;
+ }
+
+ if (*modep == VOIDmode)
+ *modep = mode;
+
+ /* Vector modes are considered to be opaque: two vectors are
+ equivalent for the purposes of being homogeneous aggregates
+ if they are the same size. */
+ if (*modep == mode)
+ return 1;
+
+ break;
+
+ case ARRAY_TYPE:
+ {
+ int count;
+ tree index = TYPE_DOMAIN (type);
+
+ /* Can't handle incomplete types. */
+ if (!COMPLETE_TYPE_P (type))
+ return -1;
+
+ count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep);
+ if (count == -1
+ || !index
+ || !TYPE_MAX_VALUE (index)
+ || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
+ || !TYPE_MIN_VALUE (index)
+ || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
+ || count < 0)
+ return -1;
+
+ count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
+ - tree_to_uhwi (TYPE_MIN_VALUE (index)));
+
+ /* There must be no padding. */
+ if (!tree_fits_uhwi_p (TYPE_SIZE (type))
+ || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep)))
+ return -1;
+
+ return count;
+ }
+
+ case RECORD_TYPE:
+ {
+ int count = 0;
+ int sub_count;
+ tree field;
+
+ /* Can't handle incomplete types. */
+ if (!COMPLETE_TYPE_P (type))
+ return -1;
+
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
+ if (sub_count < 0)
+ return -1;
+ count += sub_count;
+ }
+
+ /* There must be no padding. */
+ if (!tree_fits_uhwi_p (TYPE_SIZE (type))
+ || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep)))
+ return -1;
+
+ return count;
+ }
+
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ {
+ /* These aren't very interesting except in a degenerate case. */
+ int count = 0;
+ int sub_count;
+ tree field;
+
+ /* Can't handle incomplete types. */
+ if (!COMPLETE_TYPE_P (type))
+ return -1;
+
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
+ if (sub_count < 0)
+ return -1;
+ count = count > sub_count ? count : sub_count;
+ }
+
+ /* There must be no padding. */
+ if (!tree_fits_uhwi_p (TYPE_SIZE (type))
+ || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
+ != count * GET_MODE_BITSIZE (*modep)))
+ return -1;
+
+ return count;
+ }
+
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+/* Return true if we use LRA instead of reload pass. */
+static bool
+aarch64_lra_p (void)
+{
+ return aarch64_lra_flag;
+}
+
+/* Return TRUE if the type, as described by TYPE and MODE, is a composite
+ type as described in AAPCS64 \S 4.3. This includes aggregate, union and
+ array types. The C99 floating-point complex types are also considered
+ as composite types, according to AAPCS64 \S 7.1.1. The complex integer
+ types, which are GCC extensions and out of the scope of AAPCS64, are
+ treated as composite types here as well.
+
+ Note that MODE itself is not sufficient in determining whether a type
+ is such a composite type or not. This is because
+ stor-layout.c:compute_record_mode may have already changed the MODE
+ (BLKmode) of a RECORD_TYPE TYPE to some other mode. For example, a
+ structure with only one field may have its MODE set to the mode of the
+ field. Also an integer mode whose size matches the size of the
+ RECORD_TYPE type may be used to substitute the original mode
+ (i.e. BLKmode) in certain circumstances. In other words, MODE cannot be
+ solely relied on. */
+
+static bool
+aarch64_composite_type_p (const_tree type,
+ enum machine_mode mode)
+{
+ if (type && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE))
+ return true;
+
+ if (mode == BLKmode
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
+ return true;
+
+ return false;
+}
+
+/* Return TRUE if the type, as described by TYPE and MODE, is a short vector
+ type as described in AAPCS64 \S 4.1.2.
+
+ See the comment above aarch64_composite_type_p for the notes on MODE. */
+
+static bool
+aarch64_short_vector_p (const_tree type,
+ enum machine_mode mode)
+{
+ HOST_WIDE_INT size = -1;
+
+ if (type && TREE_CODE (type) == VECTOR_TYPE)
+ size = int_size_in_bytes (type);
+ else if (!aarch64_composite_type_p (type, mode)
+ && (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
+ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT))
+ size = GET_MODE_SIZE (mode);
+
+ return (size == 8 || size == 16) ? true : false;
+}
+
+/* Return TRUE if an argument, whose type is described by TYPE and MODE,
+ shall be passed or returned in simd/fp register(s) (providing these
+ parameter passing registers are available).
+
+ Upon successful return, *COUNT returns the number of needed registers,
+ *BASE_MODE returns the mode of the individual register and when IS_HAF
+ is not NULL, *IS_HA indicates whether or not the argument is a homogeneous
+ floating-point aggregate or a homogeneous short-vector aggregate. */
+
+static bool
+aarch64_vfp_is_call_or_return_candidate (enum machine_mode mode,
+ const_tree type,
+ enum machine_mode *base_mode,
+ int *count,
+ bool *is_ha)
+{
+ enum machine_mode new_mode = VOIDmode;
+ bool composite_p = aarch64_composite_type_p (type, mode);
+
+ if (is_ha != NULL) *is_ha = false;
+
+ if ((!composite_p && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ || aarch64_short_vector_p (type, mode))
+ {
+ *count = 1;
+ new_mode = mode;
+ }
+ else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ {
+ if (is_ha != NULL) *is_ha = true;
+ *count = 2;
+ new_mode = GET_MODE_INNER (mode);
+ }
+ else if (type && composite_p)
+ {
+ int ag_count = aapcs_vfp_sub_candidate (type, &new_mode);
+
+ if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
+ {
+ if (is_ha != NULL) *is_ha = true;
+ *count = ag_count;
+ }
+ else
+ return false;
+ }
+ else
+ return false;
+
+ *base_mode = new_mode;
+ return true;
+}
+
+/* Implement TARGET_STRUCT_VALUE_RTX. */
+
+static rtx
+aarch64_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
+ int incoming ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (Pmode, AARCH64_STRUCT_VALUE_REGNUM);
+}
+
+/* Implements target hook vector_mode_supported_p. */
+static bool
+aarch64_vector_mode_supported_p (enum machine_mode mode)
+{
+ if (TARGET_SIMD
+ && (mode == V4SImode || mode == V8HImode
+ || mode == V16QImode || mode == V2DImode
+ || mode == V2SImode || mode == V4HImode
+ || mode == V8QImode || mode == V2SFmode
+ || mode == V4SFmode || mode == V2DFmode))
+ return true;
+
+ return false;
+}
+
+/* Return appropriate SIMD container
+ for MODE within a vector of WIDTH bits. */
+static enum machine_mode
+aarch64_simd_container_mode (enum machine_mode mode, unsigned width)
+{
+ gcc_assert (width == 64 || width == 128);
+ if (TARGET_SIMD)
+ {
+ if (width == 128)
+ switch (mode)
+ {
+ case DFmode:
+ return V2DFmode;
+ case SFmode:
+ return V4SFmode;
+ case SImode:
+ return V4SImode;
+ case HImode:
+ return V8HImode;
+ case QImode:
+ return V16QImode;
+ case DImode:
+ return V2DImode;
+ default:
+ break;
+ }
+ else
+ switch (mode)
+ {
+ case SFmode:
+ return V2SFmode;
+ case SImode:
+ return V2SImode;
+ case HImode:
+ return V4HImode;
+ case QImode:
+ return V8QImode;
+ default:
+ break;
+ }
+ }
+ return word_mode;
+}
+
+/* Return 128-bit container as the preferred SIMD mode for MODE. */
+static enum machine_mode
+aarch64_preferred_simd_mode (enum machine_mode mode)
+{
+ return aarch64_simd_container_mode (mode, 128);
+}
+
+/* Return the bitmask of possible vector sizes for the vectorizer
+ to iterate over. */
+static unsigned int
+aarch64_autovectorize_vector_sizes (void)
+{
+ return (16 | 8);
+}
+
+/* A table to help perform AArch64-specific name mangling for AdvSIMD
+ vector types in order to conform to the AAPCS64 (see "Procedure
+ Call Standard for the ARM 64-bit Architecture", Appendix A). To
+ qualify for emission with the mangled names defined in that document,
+ a vector type must not only be of the correct mode but also be
+ composed of AdvSIMD vector element types (e.g.
+ _builtin_aarch64_simd_qi); these types are registered by
+ aarch64_init_simd_builtins (). In other words, vector types defined
+ in other ways e.g. via vector_size attribute will get default
+ mangled names. */
+typedef struct
+{
+ enum machine_mode mode;
+ const char *element_type_name;
+ const char *mangled_name;
+} aarch64_simd_mangle_map_entry;
+
+static aarch64_simd_mangle_map_entry aarch64_simd_mangle_map[] = {
+ /* 64-bit containerized types. */
+ { V8QImode, "__builtin_aarch64_simd_qi", "10__Int8x8_t" },
+ { V8QImode, "__builtin_aarch64_simd_uqi", "11__Uint8x8_t" },
+ { V4HImode, "__builtin_aarch64_simd_hi", "11__Int16x4_t" },
+ { V4HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x4_t" },
+ { V2SImode, "__builtin_aarch64_simd_si", "11__Int32x2_t" },
+ { V2SImode, "__builtin_aarch64_simd_usi", "12__Uint32x2_t" },
+ { V2SFmode, "__builtin_aarch64_simd_sf", "13__Float32x2_t" },
+ { V8QImode, "__builtin_aarch64_simd_poly8", "11__Poly8x8_t" },
+ { V4HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x4_t" },
+ /* 128-bit containerized types. */
+ { V16QImode, "__builtin_aarch64_simd_qi", "11__Int8x16_t" },
+ { V16QImode, "__builtin_aarch64_simd_uqi", "12__Uint8x16_t" },
+ { V8HImode, "__builtin_aarch64_simd_hi", "11__Int16x8_t" },
+ { V8HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x8_t" },
+ { V4SImode, "__builtin_aarch64_simd_si", "11__Int32x4_t" },
+ { V4SImode, "__builtin_aarch64_simd_usi", "12__Uint32x4_t" },
+ { V2DImode, "__builtin_aarch64_simd_di", "11__Int64x2_t" },
+ { V2DImode, "__builtin_aarch64_simd_udi", "12__Uint64x2_t" },
+ { V4SFmode, "__builtin_aarch64_simd_sf", "13__Float32x4_t" },
+ { V2DFmode, "__builtin_aarch64_simd_df", "13__Float64x2_t" },
+ { V16QImode, "__builtin_aarch64_simd_poly8", "12__Poly8x16_t" },
+ { V8HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x8_t" },
+ { V2DImode, "__builtin_aarch64_simd_poly64", "12__Poly64x2_t" },
+ { VOIDmode, NULL, NULL }
+};
+
+/* Implement TARGET_MANGLE_TYPE. */
+
+static const char *
+aarch64_mangle_type (const_tree type)
+{
+ /* The AArch64 ABI documents say that "__va_list" has to be
+ managled as if it is in the "std" namespace. */
+ if (lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type))
+ return "St9__va_list";
+
+ /* Check the mode of the vector type, and the name of the vector
+ element type, against the table. */
+ if (TREE_CODE (type) == VECTOR_TYPE)
+ {
+ aarch64_simd_mangle_map_entry *pos = aarch64_simd_mangle_map;
+
+ while (pos->mode != VOIDmode)
+ {
+ tree elt_type = TREE_TYPE (type);
+
+ if (pos->mode == TYPE_MODE (type)
+ && TREE_CODE (TYPE_NAME (elt_type)) == TYPE_DECL
+ && !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (elt_type))),
+ pos->element_type_name))
+ return pos->mangled_name;
+
+ pos++;
+ }
+ }
+
+ /* Use the default mangling. */
+ return NULL;
+}
+
+/* Return the equivalent letter for size. */
+static char
+sizetochar (int size)
+{
+ switch (size)
+ {
+ case 64: return 'd';
+ case 32: return 's';
+ case 16: return 'h';
+ case 8 : return 'b';
+ default: gcc_unreachable ();
+ }
+}
+
+/* Return true iff x is a uniform vector of floating-point
+ constants, and the constant can be represented in
+ quarter-precision form. Note, as aarch64_float_const_representable
+ rejects both +0.0 and -0.0, we will also reject +0.0 and -0.0. */
+static bool
+aarch64_vect_float_const_representable_p (rtx x)
+{
+ int i = 0;
+ REAL_VALUE_TYPE r0, ri;
+ rtx x0, xi;
+
+ if (GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_FLOAT)
+ return false;
+
+ x0 = CONST_VECTOR_ELT (x, 0);
+ if (!CONST_DOUBLE_P (x0))
+ return false;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r0, x0);
+
+ for (i = 1; i < CONST_VECTOR_NUNITS (x); i++)
+ {
+ xi = CONST_VECTOR_ELT (x, i);
+ if (!CONST_DOUBLE_P (xi))
+ return false;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (ri, xi);
+ if (!REAL_VALUES_EQUAL (r0, ri))
+ return false;
+ }
+
+ return aarch64_float_const_representable_p (x0);
+}
+
+/* Return true for valid and false for invalid. */
+bool
+aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, bool inverse,
+ struct simd_immediate_info *info)
+{
+#define CHECK(STRIDE, ELSIZE, CLASS, TEST, SHIFT, NEG) \
+ matches = 1; \
+ for (i = 0; i < idx; i += (STRIDE)) \
+ if (!(TEST)) \
+ matches = 0; \
+ if (matches) \
+ { \
+ immtype = (CLASS); \
+ elsize = (ELSIZE); \
+ eshift = (SHIFT); \
+ emvn = (NEG); \
+ break; \
+ }
+
+ unsigned int i, elsize = 0, idx = 0, n_elts = CONST_VECTOR_NUNITS (op);
+ unsigned int innersize = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ unsigned char bytes[16];
+ int immtype = -1, matches;
+ unsigned int invmask = inverse ? 0xff : 0;
+ int eshift, emvn;
+
+ if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
+ {
+ if (! (aarch64_simd_imm_zero_p (op, mode)
+ || aarch64_vect_float_const_representable_p (op)))
+ return false;
+
+ if (info)
+ {
+ info->value = CONST_VECTOR_ELT (op, 0);
+ info->element_width = GET_MODE_BITSIZE (GET_MODE (info->value));
+ info->mvn = false;
+ info->shift = 0;
+ }
+
+ return true;
+ }
+
+ /* Splat vector constant out into a byte vector. */
+ for (i = 0; i < n_elts; i++)
+ {
+ rtx el = CONST_VECTOR_ELT (op, i);
+ unsigned HOST_WIDE_INT elpart;
+ unsigned int part, parts;
+
+ if (GET_CODE (el) == CONST_INT)
+ {
+ elpart = INTVAL (el);
+ parts = 1;
+ }
+ else if (GET_CODE (el) == CONST_DOUBLE)
+ {
+ elpart = CONST_DOUBLE_LOW (el);
+ parts = 2;
+ }
+ else
+ gcc_unreachable ();
+
+ for (part = 0; part < parts; part++)
+ {
+ unsigned int byte;
+ for (byte = 0; byte < innersize; byte++)
+ {
+ bytes[idx++] = (elpart & 0xff) ^ invmask;
+ elpart >>= BITS_PER_UNIT;
+ }
+ if (GET_CODE (el) == CONST_DOUBLE)
+ elpart = CONST_DOUBLE_HIGH (el);
+ }
+ }
+
+ /* Sanity check. */
+ gcc_assert (idx == GET_MODE_SIZE (mode));
+
+ do
+ {
+ CHECK (4, 32, 0, bytes[i] == bytes[0] && bytes[i + 1] == 0
+ && bytes[i + 2] == 0 && bytes[i + 3] == 0, 0, 0);
+
+ CHECK (4, 32, 1, bytes[i] == 0 && bytes[i + 1] == bytes[1]
+ && bytes[i + 2] == 0 && bytes[i + 3] == 0, 8, 0);
+
+ CHECK (4, 32, 2, bytes[i] == 0 && bytes[i + 1] == 0
+ && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 16, 0);
+
+ CHECK (4, 32, 3, bytes[i] == 0 && bytes[i + 1] == 0
+ && bytes[i + 2] == 0 && bytes[i + 3] == bytes[3], 24, 0);
+
+ CHECK (2, 16, 4, bytes[i] == bytes[0] && bytes[i + 1] == 0, 0, 0);
+
+ CHECK (2, 16, 5, bytes[i] == 0 && bytes[i + 1] == bytes[1], 8, 0);
+
+ CHECK (4, 32, 6, bytes[i] == bytes[0] && bytes[i + 1] == 0xff
+ && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 0, 1);
+
+ CHECK (4, 32, 7, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
+ && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 8, 1);
+
+ CHECK (4, 32, 8, bytes[i] == 0xff && bytes[i + 1] == 0xff
+ && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 16, 1);
+
+ CHECK (4, 32, 9, bytes[i] == 0xff && bytes[i + 1] == 0xff
+ && bytes[i + 2] == 0xff && bytes[i + 3] == bytes[3], 24, 1);
+
+ CHECK (2, 16, 10, bytes[i] == bytes[0] && bytes[i + 1] == 0xff, 0, 1);
+
+ CHECK (2, 16, 11, bytes[i] == 0xff && bytes[i + 1] == bytes[1], 8, 1);
+
+ CHECK (4, 32, 12, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
+ && bytes[i + 2] == 0 && bytes[i + 3] == 0, 8, 0);
+
+ CHECK (4, 32, 13, bytes[i] == 0 && bytes[i + 1] == bytes[1]
+ && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 8, 1);
+
+ CHECK (4, 32, 14, bytes[i] == 0xff && bytes[i + 1] == 0xff
+ && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 16, 0);
+
+ CHECK (4, 32, 15, bytes[i] == 0 && bytes[i + 1] == 0
+ && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 16, 1);
+
+ CHECK (1, 8, 16, bytes[i] == bytes[0], 0, 0);
+
+ CHECK (1, 64, 17, (bytes[i] == 0 || bytes[i] == 0xff)
+ && bytes[i] == bytes[(i + 8) % idx], 0, 0);
+ }
+ while (0);
+
+ if (immtype == -1)
+ return false;
+
+ if (info)
+ {
+ info->element_width = elsize;
+ info->mvn = emvn != 0;
+ info->shift = eshift;
+
+ unsigned HOST_WIDE_INT imm = 0;
+
+ if (immtype >= 12 && immtype <= 15)
+ info->msl = true;
+
+ /* Un-invert bytes of recognized vector, if necessary. */
+ if (invmask != 0)
+ for (i = 0; i < idx; i++)
+ bytes[i] ^= invmask;
+
+ if (immtype == 17)
+ {
+ /* FIXME: Broken on 32-bit H_W_I hosts. */
+ gcc_assert (sizeof (HOST_WIDE_INT) == 8);
+
+ for (i = 0; i < 8; i++)
+ imm |= (unsigned HOST_WIDE_INT) (bytes[i] ? 0xff : 0)
+ << (i * BITS_PER_UNIT);
+
+
+ info->value = GEN_INT (imm);
+ }
+ else
+ {
+ for (i = 0; i < elsize / BITS_PER_UNIT; i++)
+ imm |= (unsigned HOST_WIDE_INT) bytes[i] << (i * BITS_PER_UNIT);
+
+ /* Construct 'abcdefgh' because the assembler cannot handle
+ generic constants. */
+ if (info->mvn)
+ imm = ~imm;
+ imm = (imm >> info->shift) & 0xff;
+ info->value = GEN_INT (imm);
+ }
+ }
+
+ return true;
+#undef CHECK
+}
+
+static bool
+aarch64_const_vec_all_same_int_p (rtx x,
+ HOST_WIDE_INT minval,
+ HOST_WIDE_INT maxval)
+{
+ HOST_WIDE_INT firstval;
+ int count, i;
+
+ if (GET_CODE (x) != CONST_VECTOR
+ || GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_INT)
+ return false;
+
+ firstval = INTVAL (CONST_VECTOR_ELT (x, 0));
+ if (firstval < minval || firstval > maxval)
+ return false;
+
+ count = CONST_VECTOR_NUNITS (x);
+ for (i = 1; i < count; i++)
+ if (INTVAL (CONST_VECTOR_ELT (x, i)) != firstval)
+ return false;
+
+ return true;
+}
+
+/* Check of immediate shift constants are within range. */
+bool
+aarch64_simd_shift_imm_p (rtx x, enum machine_mode mode, bool left)
+{
+ int bit_width = GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT;
+ if (left)
+ return aarch64_const_vec_all_same_int_p (x, 0, bit_width - 1);
+ else
+ return aarch64_const_vec_all_same_int_p (x, 1, bit_width);
+}
+
+/* Return true if X is a uniform vector where all elements
+ are either the floating-point constant 0.0 or the
+ integer constant 0. */
+bool
+aarch64_simd_imm_zero_p (rtx x, enum machine_mode mode)
+{
+ return x == CONST0_RTX (mode);
+}
+
+bool
+aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ HOST_WIDE_INT imm = INTVAL (x);
+ int i;
+
+ for (i = 0; i < 8; i++)
+ {
+ unsigned int byte = imm & 0xff;
+ if (byte != 0xff && byte != 0)
+ return false;
+ imm >>= 8;
+ }
+
+ return true;
+}
+
+bool
+aarch64_mov_operand_p (rtx x,
+ enum aarch64_symbol_context context,
+ enum machine_mode mode)
+{
+ if (GET_CODE (x) == HIGH
+ && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
+ return true;
+
+ if (CONST_INT_P (x) && aarch64_move_imm (INTVAL (x), mode))
+ return true;
+
+ if (GET_CODE (x) == SYMBOL_REF && mode == DImode && CONSTANT_ADDRESS_P (x))
+ return true;
+
+ return aarch64_classify_symbolic_expression (x, context)
+ == SYMBOL_TINY_ABSOLUTE;
+}
+
+/* Return a const_int vector of VAL. */
+rtx
+aarch64_simd_gen_const_vector_dup (enum machine_mode mode, int val)
+{
+ int nunits = GET_MODE_NUNITS (mode);
+ rtvec v = rtvec_alloc (nunits);
+ int i;
+
+ for (i=0; i < nunits; i++)
+ RTVEC_ELT (v, i) = GEN_INT (val);
+
+ return gen_rtx_CONST_VECTOR (mode, v);
+}
+
+/* Check OP is a legal scalar immediate for the MOVI instruction. */
+
+bool
+aarch64_simd_scalar_immediate_valid_for_move (rtx op, enum machine_mode mode)
+{
+ enum machine_mode vmode;
+
+ gcc_assert (!VECTOR_MODE_P (mode));
+ vmode = aarch64_preferred_simd_mode (mode);
+ rtx op_v = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (op));
+ return aarch64_simd_valid_immediate (op_v, vmode, false, NULL);
+}
+
+/* Construct and return a PARALLEL RTX vector. */
+rtx
+aarch64_simd_vect_par_cnst_half (enum machine_mode mode, bool high)
+{
+ int nunits = GET_MODE_NUNITS (mode);
+ rtvec v = rtvec_alloc (nunits / 2);
+ int base = high ? nunits / 2 : 0;
+ rtx t1;
+ int i;
+
+ for (i=0; i < nunits / 2; i++)
+ RTVEC_ELT (v, i) = GEN_INT (base + i);
+
+ t1 = gen_rtx_PARALLEL (mode, v);
+ return t1;
+}
+
+/* Bounds-check lanes. Ensure OPERAND lies between LOW (inclusive) and
+ HIGH (exclusive). */
+void
+aarch64_simd_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
+{
+ HOST_WIDE_INT lane;
+ gcc_assert (GET_CODE (operand) == CONST_INT);
+ lane = INTVAL (operand);
+
+ if (lane < low || lane >= high)
+ error ("lane out of range");
+}
+
+void
+aarch64_simd_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
+{
+ gcc_assert (GET_CODE (operand) == CONST_INT);
+ HOST_WIDE_INT lane = INTVAL (operand);
+
+ if (lane < low || lane >= high)
+ error ("constant out of range");
+}
+
+/* Emit code to reinterpret one AdvSIMD type as another,
+ without altering bits. */
+void
+aarch64_simd_reinterpret (rtx dest, rtx src)
+{
+ emit_move_insn (dest, gen_lowpart (GET_MODE (dest), src));
+}
+
+/* Emit code to place a AdvSIMD pair result in memory locations (with equal
+ registers). */
+void
+aarch64_simd_emit_pair_result_insn (enum machine_mode mode,
+ rtx (*intfn) (rtx, rtx, rtx), rtx destaddr,
+ rtx op1)
+{
+ rtx mem = gen_rtx_MEM (mode, destaddr);
+ rtx tmp1 = gen_reg_rtx (mode);
+ rtx tmp2 = gen_reg_rtx (mode);
+
+ emit_insn (intfn (tmp1, op1, tmp2));
+
+ emit_move_insn (mem, tmp1);
+ mem = adjust_address (mem, mode, GET_MODE_SIZE (mode));
+ emit_move_insn (mem, tmp2);
+}
+
+/* Return TRUE if OP is a valid vector addressing mode. */
+bool
+aarch64_simd_mem_operand_p (rtx op)
+{
+ return MEM_P (op) && (GET_CODE (XEXP (op, 0)) == POST_INC
+ || GET_CODE (XEXP (op, 0)) == REG);
+}
+
+/* Set up OPERANDS for a register copy from SRC to DEST, taking care
+ not to early-clobber SRC registers in the process.
+
+ We assume that the operands described by SRC and DEST represent a
+ decomposed copy of OPERANDS[1] into OPERANDS[0]. COUNT is the
+ number of components into which the copy has been decomposed. */
+void
+aarch64_simd_disambiguate_copy (rtx *operands, rtx *dest,
+ rtx *src, unsigned int count)
+{
+ unsigned int i;
+
+ if (!reg_overlap_mentioned_p (operands[0], operands[1])
+ || REGNO (operands[0]) < REGNO (operands[1]))
+ {
+ for (i = 0; i < count; i++)
+ {
+ operands[2 * i] = dest[i];
+ operands[2 * i + 1] = src[i];
+ }
+ }
+ else
+ {
+ for (i = 0; i < count; i++)
+ {
+ operands[2 * i] = dest[count - i - 1];
+ operands[2 * i + 1] = src[count - i - 1];
+ }
+ }
+}
+
+/* Compute and return the length of aarch64_simd_mov<mode>, where <mode> is
+ one of VSTRUCT modes: OI, CI or XI. */
+int
+aarch64_simd_attr_length_move (rtx insn)
+{
+ enum machine_mode mode;
+
+ extract_insn_cached (insn);
+
+ if (REG_P (recog_data.operand[0]) && REG_P (recog_data.operand[1]))
+ {
+ mode = GET_MODE (recog_data.operand[0]);
+ switch (mode)
+ {
+ case OImode:
+ return 8;
+ case CImode:
+ return 12;
+ case XImode:
+ return 16;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ return 4;
+}
+
+/* Implement target hook TARGET_VECTOR_ALIGNMENT. The AAPCS64 sets the maximum
+ alignment of a vector to 128 bits. */
+static HOST_WIDE_INT
+aarch64_simd_vector_alignment (const_tree type)
+{
+ HOST_WIDE_INT align = tree_to_shwi (TYPE_SIZE (type));
+ return MIN (align, 128);
+}
+
+/* Implement target hook TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE. */
+static bool
+aarch64_simd_vector_alignment_reachable (const_tree type, bool is_packed)
+{
+ if (is_packed)
+ return false;
+
+ /* We guarantee alignment for vectors up to 128-bits. */
+ if (tree_int_cst_compare (TYPE_SIZE (type),
+ bitsize_int (BIGGEST_ALIGNMENT)) > 0)
+ return false;
+
+ /* Vectors whose size is <= BIGGEST_ALIGNMENT are naturally aligned. */
+ return true;
+}
+
+/* If VALS is a vector constant that can be loaded into a register
+ using DUP, generate instructions to do so and return an RTX to
+ assign to the register. Otherwise return NULL_RTX. */
+static rtx
+aarch64_simd_dup_constant (rtx vals)
+{
+ enum machine_mode mode = GET_MODE (vals);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ int n_elts = GET_MODE_NUNITS (mode);
+ bool all_same = true;
+ rtx x;
+ int i;
+
+ if (GET_CODE (vals) != CONST_VECTOR)
+ return NULL_RTX;
+
+ for (i = 1; i < n_elts; ++i)
+ {
+ x = CONST_VECTOR_ELT (vals, i);
+ if (!rtx_equal_p (x, CONST_VECTOR_ELT (vals, 0)))
+ all_same = false;
+ }
+
+ if (!all_same)
+ return NULL_RTX;
+
+ /* We can load this constant by using DUP and a constant in a
+ single ARM register. This will be cheaper than a vector
+ load. */
+ x = copy_to_mode_reg (inner_mode, CONST_VECTOR_ELT (vals, 0));
+ return gen_rtx_VEC_DUPLICATE (mode, x);
+}
+
+
+/* Generate code to load VALS, which is a PARALLEL containing only
+ constants (for vec_init) or CONST_VECTOR, efficiently into a
+ register. Returns an RTX to copy into the register, or NULL_RTX
+ for a PARALLEL that can not be converted into a CONST_VECTOR. */
+static rtx
+aarch64_simd_make_constant (rtx vals)
+{
+ enum machine_mode mode = GET_MODE (vals);
+ rtx const_dup;
+ rtx const_vec = NULL_RTX;
+ int n_elts = GET_MODE_NUNITS (mode);
+ int n_const = 0;
+ int i;
+
+ if (GET_CODE (vals) == CONST_VECTOR)
+ const_vec = vals;
+ else if (GET_CODE (vals) == PARALLEL)
+ {
+ /* A CONST_VECTOR must contain only CONST_INTs and
+ CONST_DOUBLEs, but CONSTANT_P allows more (e.g. SYMBOL_REF).
+ Only store valid constants in a CONST_VECTOR. */
+ for (i = 0; i < n_elts; ++i)
+ {
+ rtx x = XVECEXP (vals, 0, i);
+ if (CONST_INT_P (x) || CONST_DOUBLE_P (x))
+ n_const++;
+ }
+ if (n_const == n_elts)
+ const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
+ }
+ else
+ gcc_unreachable ();
+
+ if (const_vec != NULL_RTX
+ && aarch64_simd_valid_immediate (const_vec, mode, false, NULL))
+ /* Load using MOVI/MVNI. */
+ return const_vec;
+ else if ((const_dup = aarch64_simd_dup_constant (vals)) != NULL_RTX)
+ /* Loaded using DUP. */
+ return const_dup;
+ else if (const_vec != NULL_RTX)
+ /* Load from constant pool. We can not take advantage of single-cycle
+ LD1 because we need a PC-relative addressing mode. */
+ return const_vec;
+ else
+ /* A PARALLEL containing something not valid inside CONST_VECTOR.
+ We can not construct an initializer. */
+ return NULL_RTX;
+}
+
+void
+aarch64_expand_vector_init (rtx target, rtx vals)
+{
+ enum machine_mode mode = GET_MODE (target);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ int n_elts = GET_MODE_NUNITS (mode);
+ int n_var = 0, one_var = -1;
+ bool all_same = true;
+ rtx x, mem;
+ int i;
+
+ x = XVECEXP (vals, 0, 0);
+ if (!CONST_INT_P (x) && !CONST_DOUBLE_P (x))
+ n_var = 1, one_var = 0;
+
+ for (i = 1; i < n_elts; ++i)
+ {
+ x = XVECEXP (vals, 0, i);
+ if (!CONST_INT_P (x) && !CONST_DOUBLE_P (x))
+ ++n_var, one_var = i;
+
+ if (!rtx_equal_p (x, XVECEXP (vals, 0, 0)))
+ all_same = false;
+ }
+
+ if (n_var == 0)
+ {
+ rtx constant = aarch64_simd_make_constant (vals);
+ if (constant != NULL_RTX)
+ {
+ emit_move_insn (target, constant);
+ return;
+ }
+ }
+
+ /* Splat a single non-constant element if we can. */
+ if (all_same)
+ {
+ x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, 0));
+ aarch64_emit_move (target, gen_rtx_VEC_DUPLICATE (mode, x));
+ return;
+ }
+
+ /* One field is non-constant. Load constant then overwrite varying
+ field. This is more efficient than using the stack. */
+ if (n_var == 1)
+ {
+ rtx copy = copy_rtx (vals);
+ rtx index = GEN_INT (one_var);
+ enum insn_code icode;
+
+ /* Load constant part of vector, substitute neighboring value for
+ varying element. */
+ XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, one_var ^ 1);
+ aarch64_expand_vector_init (target, copy);
+
+ /* Insert variable. */
+ x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, one_var));
+ icode = optab_handler (vec_set_optab, mode);
+ gcc_assert (icode != CODE_FOR_nothing);
+ emit_insn (GEN_FCN (icode) (target, x, index));
+ return;
+ }
+
+ /* Construct the vector in memory one field at a time
+ and load the whole vector. */
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
+ for (i = 0; i < n_elts; i++)
+ emit_move_insn (adjust_address_nv (mem, inner_mode,
+ i * GET_MODE_SIZE (inner_mode)),
+ XVECEXP (vals, 0, i));
+ emit_move_insn (target, mem);
+
+}
+
+static unsigned HOST_WIDE_INT
+aarch64_shift_truncation_mask (enum machine_mode mode)
+{
+ return
+ (aarch64_vector_mode_supported_p (mode)
+ || aarch64_vect_struct_mode_p (mode)) ? 0 : (GET_MODE_BITSIZE (mode) - 1);
+}
+
+#ifndef TLS_SECTION_ASM_FLAG
+#define TLS_SECTION_ASM_FLAG 'T'
+#endif
+
+void
+aarch64_elf_asm_named_section (const char *name, unsigned int flags,
+ tree decl ATTRIBUTE_UNUSED)
+{
+ char flagchars[10], *f = flagchars;
+
+ /* If we have already declared this section, we can use an
+ abbreviated form to switch back to it -- unless this section is
+ part of a COMDAT groups, in which case GAS requires the full
+ declaration every time. */
+ if (!(HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
+ && (flags & SECTION_DECLARED))
+ {
+ fprintf (asm_out_file, "\t.section\t%s\n", name);
+ return;
+ }
+
+ if (!(flags & SECTION_DEBUG))
+ *f++ = 'a';
+ if (flags & SECTION_WRITE)
+ *f++ = 'w';
+ if (flags & SECTION_CODE)
+ *f++ = 'x';
+ if (flags & SECTION_SMALL)
+ *f++ = 's';
+ if (flags & SECTION_MERGE)
+ *f++ = 'M';
+ if (flags & SECTION_STRINGS)
+ *f++ = 'S';
+ if (flags & SECTION_TLS)
+ *f++ = TLS_SECTION_ASM_FLAG;
+ if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
+ *f++ = 'G';
+ *f = '\0';
+
+ fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
+
+ if (!(flags & SECTION_NOTYPE))
+ {
+ const char *type;
+ const char *format;
+
+ if (flags & SECTION_BSS)
+ type = "nobits";
+ else
+ type = "progbits";
+
+#ifdef TYPE_OPERAND_FMT
+ format = "," TYPE_OPERAND_FMT;
+#else
+ format = ",@%s";
+#endif
+
+ fprintf (asm_out_file, format, type);
+
+ if (flags & SECTION_ENTSIZE)
+ fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
+ if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
+ {
+ if (TREE_CODE (decl) == IDENTIFIER_NODE)
+ fprintf (asm_out_file, ",%s,comdat", IDENTIFIER_POINTER (decl));
+ else
+ fprintf (asm_out_file, ",%s,comdat",
+ IDENTIFIER_POINTER (DECL_COMDAT_GROUP (decl)));
+ }
+ }
+
+ putc ('\n', asm_out_file);
+}
+
+/* Select a format to encode pointers in exception handling data. */
+int
+aarch64_asm_preferred_eh_data_format (int code ATTRIBUTE_UNUSED, int global)
+{
+ int type;
+ switch (aarch64_cmodel)
+ {
+ case AARCH64_CMODEL_TINY:
+ case AARCH64_CMODEL_TINY_PIC:
+ case AARCH64_CMODEL_SMALL:
+ case AARCH64_CMODEL_SMALL_PIC:
+ /* text+got+data < 4Gb. 4-byte signed relocs are sufficient
+ for everything. */
+ type = DW_EH_PE_sdata4;
+ break;
+ default:
+ /* No assumptions here. 8-byte relocs required. */
+ type = DW_EH_PE_sdata8;
+ break;
+ }
+ return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
+}
+
+/* Emit load exclusive. */
+
+static void
+aarch64_emit_load_exclusive (enum machine_mode mode, rtx rval,
+ rtx mem, rtx model_rtx)
+{
+ rtx (*gen) (rtx, rtx, rtx);
+
+ switch (mode)
+ {
+ case QImode: gen = gen_aarch64_load_exclusiveqi; break;
+ case HImode: gen = gen_aarch64_load_exclusivehi; break;
+ case SImode: gen = gen_aarch64_load_exclusivesi; break;
+ case DImode: gen = gen_aarch64_load_exclusivedi; break;
+ default:
+ gcc_unreachable ();
+ }
+
+ emit_insn (gen (rval, mem, model_rtx));
+}
+
+/* Emit store exclusive. */
+
+static void
+aarch64_emit_store_exclusive (enum machine_mode mode, rtx bval,
+ rtx rval, rtx mem, rtx model_rtx)
+{
+ rtx (*gen) (rtx, rtx, rtx, rtx);
+
+ switch (mode)
+ {
+ case QImode: gen = gen_aarch64_store_exclusiveqi; break;
+ case HImode: gen = gen_aarch64_store_exclusivehi; break;
+ case SImode: gen = gen_aarch64_store_exclusivesi; break;
+ case DImode: gen = gen_aarch64_store_exclusivedi; break;
+ default:
+ gcc_unreachable ();
+ }
+
+ emit_insn (gen (bval, rval, mem, model_rtx));
+}
+
+/* Mark the previous jump instruction as unlikely. */
+
+static void
+aarch64_emit_unlikely_jump (rtx insn)
+{
+ int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
+
+ insn = emit_jump_insn (insn);
+ add_int_reg_note (insn, REG_BR_PROB, very_unlikely);
+}
+
+/* Expand a compare and swap pattern. */
+
+void
+aarch64_expand_compare_and_swap (rtx operands[])
+{
+ rtx bval, rval, mem, oldval, newval, is_weak, mod_s, mod_f, x;
+ enum machine_mode mode, cmp_mode;
+ rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+
+ bval = operands[0];
+ rval = operands[1];
+ mem = operands[2];
+ oldval = operands[3];
+ newval = operands[4];
+ is_weak = operands[5];
+ mod_s = operands[6];
+ mod_f = operands[7];
+ mode = GET_MODE (mem);
+ cmp_mode = mode;
+
+ /* Normally the succ memory model must be stronger than fail, but in the
+ unlikely event of fail being ACQUIRE and succ being RELEASE we need to
+ promote succ to ACQ_REL so that we don't lose the acquire semantics. */
+
+ if (INTVAL (mod_f) == MEMMODEL_ACQUIRE
+ && INTVAL (mod_s) == MEMMODEL_RELEASE)
+ mod_s = GEN_INT (MEMMODEL_ACQ_REL);
+
+ switch (mode)
+ {
+ case QImode:
+ case HImode:
+ /* For short modes, we're going to perform the comparison in SImode,
+ so do the zero-extension now. */
+ cmp_mode = SImode;
+ rval = gen_reg_rtx (SImode);
+ oldval = convert_modes (SImode, mode, oldval, true);
+ /* Fall through. */
+
+ case SImode:
+ case DImode:
+ /* Force the value into a register if needed. */
+ if (!aarch64_plus_operand (oldval, mode))
+ oldval = force_reg (cmp_mode, oldval);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ switch (mode)
+ {
+ case QImode: gen = gen_atomic_compare_and_swapqi_1; break;
+ case HImode: gen = gen_atomic_compare_and_swaphi_1; break;
+ case SImode: gen = gen_atomic_compare_and_swapsi_1; break;
+ case DImode: gen = gen_atomic_compare_and_swapdi_1; break;
+ default:
+ gcc_unreachable ();
+ }
+
+ emit_insn (gen (rval, mem, oldval, newval, is_weak, mod_s, mod_f));
+
+ if (mode == QImode || mode == HImode)
+ emit_move_insn (operands[1], gen_lowpart (mode, rval));
+
+ x = gen_rtx_REG (CCmode, CC_REGNUM);
+ x = gen_rtx_EQ (SImode, x, const0_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, bval, x));
+}
+
+/* Split a compare and swap pattern. */
+
+void
+aarch64_split_compare_and_swap (rtx operands[])
+{
+ rtx rval, mem, oldval, newval, scratch;
+ enum machine_mode mode;
+ bool is_weak;
+ rtx label1, label2, x, cond;
+
+ rval = operands[0];
+ mem = operands[1];
+ oldval = operands[2];
+ newval = operands[3];
+ is_weak = (operands[4] != const0_rtx);
+ scratch = operands[7];
+ mode = GET_MODE (mem);
+
+ label1 = NULL_RTX;
+ if (!is_weak)
+ {
+ label1 = gen_label_rtx ();
+ emit_label (label1);
+ }
+ label2 = gen_label_rtx ();
+
+ aarch64_emit_load_exclusive (mode, rval, mem, operands[5]);
+
+ cond = aarch64_gen_compare_reg (NE, rval, oldval);
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
+ gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
+ aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
+
+ aarch64_emit_store_exclusive (mode, scratch, mem, newval, operands[5]);
+
+ if (!is_weak)
+ {
+ x = gen_rtx_NE (VOIDmode, scratch, const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
+ gen_rtx_LABEL_REF (Pmode, label1), pc_rtx);
+ aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
+ }
+ else
+ {
+ cond = gen_rtx_REG (CCmode, CC_REGNUM);
+ x = gen_rtx_COMPARE (CCmode, scratch, const0_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, cond, x));
+ }
+
+ emit_label (label2);
+}
+
+/* Split an atomic operation. */
+
+void
+aarch64_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
+ rtx value, rtx model_rtx, rtx cond)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ enum machine_mode wmode = (mode == DImode ? DImode : SImode);
+ rtx label, x;
+
+ label = gen_label_rtx ();
+ emit_label (label);
+
+ if (new_out)
+ new_out = gen_lowpart (wmode, new_out);
+ if (old_out)
+ old_out = gen_lowpart (wmode, old_out);
+ else
+ old_out = new_out;
+ value = simplify_gen_subreg (wmode, value, mode, 0);
+
+ aarch64_emit_load_exclusive (mode, old_out, mem, model_rtx);
+
+ switch (code)
+ {
+ case SET:
+ new_out = value;
+ break;
+
+ case NOT:
+ x = gen_rtx_AND (wmode, old_out, value);
+ emit_insn (gen_rtx_SET (VOIDmode, new_out, x));
+ x = gen_rtx_NOT (wmode, new_out);
+ emit_insn (gen_rtx_SET (VOIDmode, new_out, x));
+ break;
+
+ case MINUS:
+ if (CONST_INT_P (value))
+ {
+ value = GEN_INT (-INTVAL (value));
+ code = PLUS;
+ }
+ /* Fall through. */
+
+ default:
+ x = gen_rtx_fmt_ee (code, wmode, old_out, value);
+ emit_insn (gen_rtx_SET (VOIDmode, new_out, x));
+ break;
+ }
+
+ aarch64_emit_store_exclusive (mode, cond, mem,
+ gen_lowpart (mode, new_out), model_rtx);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
+ gen_rtx_LABEL_REF (Pmode, label), pc_rtx);
+ aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
+}
+
+static void
+aarch64_print_extension (void)
+{
+ const struct aarch64_option_extension *opt = NULL;
+
+ for (opt = all_extensions; opt->name != NULL; opt++)
+ if ((aarch64_isa_flags & opt->flags_on) == opt->flags_on)
+ asm_fprintf (asm_out_file, "+%s", opt->name);
+
+ asm_fprintf (asm_out_file, "\n");
+}
+
+static void
+aarch64_start_file (void)
+{
+ if (selected_arch)
+ {
+ asm_fprintf (asm_out_file, "\t.arch %s", selected_arch->name);
+ aarch64_print_extension ();
+ }
+ else if (selected_cpu)
+ {
+ const char *truncated_name
+ = aarch64_rewrite_selected_cpu (selected_cpu->name);
+ asm_fprintf (asm_out_file, "\t.cpu %s", truncated_name);
+ aarch64_print_extension ();
+ }
+ default_file_start();
+}
+
+/* Target hook for c_mode_for_suffix. */
+static enum machine_mode
+aarch64_c_mode_for_suffix (char suffix)
+{
+ if (suffix == 'q')
+ return TFmode;
+
+ return VOIDmode;
+}
+
+/* We can only represent floating point constants which will fit in
+ "quarter-precision" values. These values are characterised by
+ a sign bit, a 4-bit mantissa and a 3-bit exponent. And are given
+ by:
+
+ (-1)^s * (n/16) * 2^r
+
+ Where:
+ 's' is the sign bit.
+ 'n' is an integer in the range 16 <= n <= 31.
+ 'r' is an integer in the range -3 <= r <= 4. */
+
+/* Return true iff X can be represented by a quarter-precision
+ floating point immediate operand X. Note, we cannot represent 0.0. */
+bool
+aarch64_float_const_representable_p (rtx x)
+{
+ /* This represents our current view of how many bits
+ make up the mantissa. */
+ int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
+ int exponent;
+ unsigned HOST_WIDE_INT mantissa, mask;
+ HOST_WIDE_INT m1, m2;
+ REAL_VALUE_TYPE r, m;
+
+ if (!CONST_DOUBLE_P (x))
+ return false;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+
+ /* We cannot represent infinities, NaNs or +/-zero. We won't
+ know if we have +zero until we analyse the mantissa, but we
+ can reject the other invalid values. */
+ if (REAL_VALUE_ISINF (r) || REAL_VALUE_ISNAN (r)
+ || REAL_VALUE_MINUS_ZERO (r))
+ return false;
+
+ /* Extract exponent. */
+ r = real_value_abs (&r);
+ exponent = REAL_EXP (&r);
+
+ /* For the mantissa, we expand into two HOST_WIDE_INTS, apart from the
+ highest (sign) bit, with a fixed binary point at bit point_pos.
+ m1 holds the low part of the mantissa, m2 the high part.
+ WARNING: If we ever have a representation using more than 2 * H_W_I - 1
+ bits for the mantissa, this can fail (low bits will be lost). */
+ real_ldexp (&m, &r, point_pos - exponent);
+ REAL_VALUE_TO_INT (&m1, &m2, m);
+
+ /* If the low part of the mantissa has bits set we cannot represent
+ the value. */
+ if (m1 != 0)
+ return false;
+ /* We have rejected the lower HOST_WIDE_INT, so update our
+ understanding of how many bits lie in the mantissa and
+ look only at the high HOST_WIDE_INT. */
+ mantissa = m2;
+ point_pos -= HOST_BITS_PER_WIDE_INT;
+
+ /* We can only represent values with a mantissa of the form 1.xxxx. */
+ mask = ((unsigned HOST_WIDE_INT)1 << (point_pos - 5)) - 1;
+ if ((mantissa & mask) != 0)
+ return false;
+
+ /* Having filtered unrepresentable values, we may now remove all
+ but the highest 5 bits. */
+ mantissa >>= point_pos - 5;
+
+ /* We cannot represent the value 0.0, so reject it. This is handled
+ elsewhere. */
+ if (mantissa == 0)
+ return false;
+
+ /* Then, as bit 4 is always set, we can mask it off, leaving
+ the mantissa in the range [0, 15]. */
+ mantissa &= ~(1 << 4);
+ gcc_assert (mantissa <= 15);
+
+ /* GCC internally does not use IEEE754-like encoding (where normalized
+ significands are in the range [1, 2). GCC uses [0.5, 1) (see real.c).
+ Our mantissa values are shifted 4 places to the left relative to
+ normalized IEEE754 so we must modify the exponent returned by REAL_EXP
+ by 5 places to correct for GCC's representation. */
+ exponent = 5 - exponent;
+
+ return (exponent >= 0 && exponent <= 7);
+}
+
+char*
+aarch64_output_simd_mov_immediate (rtx const_vector,
+ enum machine_mode mode,
+ unsigned width)
+{
+ bool is_valid;
+ static char templ[40];
+ const char *mnemonic;
+ const char *shift_op;
+ unsigned int lane_count = 0;
+ char element_char;
+
+ struct simd_immediate_info info = { NULL_RTX, 0, 0, false, false };
+
+ /* This will return true to show const_vector is legal for use as either
+ a AdvSIMD MOVI instruction (or, implicitly, MVNI) immediate. It will
+ also update INFO to show how the immediate should be generated. */
+ is_valid = aarch64_simd_valid_immediate (const_vector, mode, false, &info);
+ gcc_assert (is_valid);
+
+ element_char = sizetochar (info.element_width);
+ lane_count = width / info.element_width;
+
+ mode = GET_MODE_INNER (mode);
+ if (mode == SFmode || mode == DFmode)
+ {
+ gcc_assert (info.shift == 0 && ! info.mvn);
+ if (aarch64_float_const_zero_rtx_p (info.value))
+ info.value = GEN_INT (0);
+ else
+ {
+#define buf_size 20
+ REAL_VALUE_TYPE r;
+ REAL_VALUE_FROM_CONST_DOUBLE (r, info.value);
+ char float_buf[buf_size] = {'\0'};
+ real_to_decimal_for_mode (float_buf, &r, buf_size, buf_size, 1, mode);
+#undef buf_size
+
+ if (lane_count == 1)
+ snprintf (templ, sizeof (templ), "fmov\t%%d0, %s", float_buf);
+ else
+ snprintf (templ, sizeof (templ), "fmov\t%%0.%d%c, %s",
+ lane_count, element_char, float_buf);
+ return templ;
+ }
+ }
+
+ mnemonic = info.mvn ? "mvni" : "movi";
+ shift_op = info.msl ? "msl" : "lsl";
+
+ if (lane_count == 1)
+ snprintf (templ, sizeof (templ), "%s\t%%d0, " HOST_WIDE_INT_PRINT_HEX,
+ mnemonic, UINTVAL (info.value));
+ else if (info.shift)
+ snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX
+ ", %s %d", mnemonic, lane_count, element_char,
+ UINTVAL (info.value), shift_op, info.shift);
+ else
+ snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX,
+ mnemonic, lane_count, element_char, UINTVAL (info.value));
+ return templ;
+}
+
+char*
+aarch64_output_scalar_simd_mov_immediate (rtx immediate,
+ enum machine_mode mode)
+{
+ enum machine_mode vmode;
+
+ gcc_assert (!VECTOR_MODE_P (mode));
+ vmode = aarch64_simd_container_mode (mode, 64);
+ rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (immediate));
+ return aarch64_output_simd_mov_immediate (v_op, vmode, 64);
+}
+
+/* Split operands into moves from op[1] + op[2] into op[0]. */
+
+void
+aarch64_split_combinev16qi (rtx operands[3])
+{
+ unsigned int dest = REGNO (operands[0]);
+ unsigned int src1 = REGNO (operands[1]);
+ unsigned int src2 = REGNO (operands[2]);
+ enum machine_mode halfmode = GET_MODE (operands[1]);
+ unsigned int halfregs = HARD_REGNO_NREGS (src1, halfmode);
+ rtx destlo, desthi;
+
+ gcc_assert (halfmode == V16QImode);
+
+ if (src1 == dest && src2 == dest + halfregs)
+ {
+ /* No-op move. Can't split to nothing; emit something. */
+ emit_note (NOTE_INSN_DELETED);
+ return;
+ }
+
+ /* Preserve register attributes for variable tracking. */
+ destlo = gen_rtx_REG_offset (operands[0], halfmode, dest, 0);
+ desthi = gen_rtx_REG_offset (operands[0], halfmode, dest + halfregs,
+ GET_MODE_SIZE (halfmode));
+
+ /* Special case of reversed high/low parts. */
+ if (reg_overlap_mentioned_p (operands[2], destlo)
+ && reg_overlap_mentioned_p (operands[1], desthi))
+ {
+ emit_insn (gen_xorv16qi3 (operands[1], operands[1], operands[2]));
+ emit_insn (gen_xorv16qi3 (operands[2], operands[1], operands[2]));
+ emit_insn (gen_xorv16qi3 (operands[1], operands[1], operands[2]));
+ }
+ else if (!reg_overlap_mentioned_p (operands[2], destlo))
+ {
+ /* Try to avoid unnecessary moves if part of the result
+ is in the right place already. */
+ if (src1 != dest)
+ emit_move_insn (destlo, operands[1]);
+ if (src2 != dest + halfregs)
+ emit_move_insn (desthi, operands[2]);
+ }
+ else
+ {
+ if (src2 != dest + halfregs)
+ emit_move_insn (desthi, operands[2]);
+ if (src1 != dest)
+ emit_move_insn (destlo, operands[1]);
+ }
+}
+
+/* vec_perm support. */
+
+#define MAX_VECT_LEN 16
+
+struct expand_vec_perm_d
+{
+ rtx target, op0, op1;
+ unsigned char perm[MAX_VECT_LEN];
+ enum machine_mode vmode;
+ unsigned char nelt;
+ bool one_vector_p;
+ bool testing_p;
+};
+
+/* Generate a variable permutation. */
+
+static void
+aarch64_expand_vec_perm_1 (rtx target, rtx op0, rtx op1, rtx sel)
+{
+ enum machine_mode vmode = GET_MODE (target);
+ bool one_vector_p = rtx_equal_p (op0, op1);
+
+ gcc_checking_assert (vmode == V8QImode || vmode == V16QImode);
+ gcc_checking_assert (GET_MODE (op0) == vmode);
+ gcc_checking_assert (GET_MODE (op1) == vmode);
+ gcc_checking_assert (GET_MODE (sel) == vmode);
+ gcc_checking_assert (TARGET_SIMD);
+
+ if (one_vector_p)
+ {
+ if (vmode == V8QImode)
+ {
+ /* Expand the argument to a V16QI mode by duplicating it. */
+ rtx pair = gen_reg_rtx (V16QImode);
+ emit_insn (gen_aarch64_combinev8qi (pair, op0, op0));
+ emit_insn (gen_aarch64_tbl1v8qi (target, pair, sel));
+ }
+ else
+ {
+ emit_insn (gen_aarch64_tbl1v16qi (target, op0, sel));
+ }
+ }
+ else
+ {
+ rtx pair;
+
+ if (vmode == V8QImode)
+ {
+ pair = gen_reg_rtx (V16QImode);
+ emit_insn (gen_aarch64_combinev8qi (pair, op0, op1));
+ emit_insn (gen_aarch64_tbl1v8qi (target, pair, sel));
+ }
+ else
+ {
+ pair = gen_reg_rtx (OImode);
+ emit_insn (gen_aarch64_combinev16qi (pair, op0, op1));
+ emit_insn (gen_aarch64_tbl2v16qi (target, pair, sel));
+ }
+ }
+}
+
+void
+aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
+{
+ enum machine_mode vmode = GET_MODE (target);
+ unsigned int i, nelt = GET_MODE_NUNITS (vmode);
+ bool one_vector_p = rtx_equal_p (op0, op1);
+ rtx rmask[MAX_VECT_LEN], mask;
+
+ gcc_checking_assert (!BYTES_BIG_ENDIAN);
+
+ /* The TBL instruction does not use a modulo index, so we must take care
+ of that ourselves. */
+ mask = GEN_INT (one_vector_p ? nelt - 1 : 2 * nelt - 1);
+ for (i = 0; i < nelt; ++i)
+ rmask[i] = mask;
+ mask = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rmask));
+ sel = expand_simple_binop (vmode, AND, sel, mask, NULL, 0, OPTAB_LIB_WIDEN);
+
+ aarch64_expand_vec_perm_1 (target, op0, op1, sel);
+}
+
+/* Recognize patterns suitable for the TRN instructions. */
+static bool
+aarch64_evpc_trn (struct expand_vec_perm_d *d)
+{
+ unsigned int i, odd, mask, nelt = d->nelt;
+ rtx out, in0, in1, x;
+ rtx (*gen) (rtx, rtx, rtx);
+ enum machine_mode vmode = d->vmode;
+
+ if (GET_MODE_UNIT_SIZE (vmode) > 8)
+ return false;
+
+ /* Note that these are little-endian tests.
+ We correct for big-endian later. */
+ if (d->perm[0] == 0)
+ odd = 0;
+ else if (d->perm[0] == 1)
+ odd = 1;
+ else
+ return false;
+ mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
+
+ for (i = 0; i < nelt; i += 2)
+ {
+ if (d->perm[i] != i + odd)
+ return false;
+ if (d->perm[i + 1] != ((i + nelt + odd) & mask))
+ return false;
+ }
+
+ /* Success! */
+ if (d->testing_p)
+ return true;
+
+ in0 = d->op0;
+ in1 = d->op1;
+ if (BYTES_BIG_ENDIAN)
+ {
+ x = in0, in0 = in1, in1 = x;
+ odd = !odd;
+ }
+ out = d->target;
+
+ if (odd)
+ {
+ switch (vmode)
+ {
+ case V16QImode: gen = gen_aarch64_trn2v16qi; break;
+ case V8QImode: gen = gen_aarch64_trn2v8qi; break;
+ case V8HImode: gen = gen_aarch64_trn2v8hi; break;
+ case V4HImode: gen = gen_aarch64_trn2v4hi; break;
+ case V4SImode: gen = gen_aarch64_trn2v4si; break;
+ case V2SImode: gen = gen_aarch64_trn2v2si; break;
+ case V2DImode: gen = gen_aarch64_trn2v2di; break;
+ case V4SFmode: gen = gen_aarch64_trn2v4sf; break;
+ case V2SFmode: gen = gen_aarch64_trn2v2sf; break;
+ case V2DFmode: gen = gen_aarch64_trn2v2df; break;
+ default:
+ return false;
+ }
+ }
+ else
+ {
+ switch (vmode)
+ {
+ case V16QImode: gen = gen_aarch64_trn1v16qi; break;
+ case V8QImode: gen = gen_aarch64_trn1v8qi; break;
+ case V8HImode: gen = gen_aarch64_trn1v8hi; break;
+ case V4HImode: gen = gen_aarch64_trn1v4hi; break;
+ case V4SImode: gen = gen_aarch64_trn1v4si; break;
+ case V2SImode: gen = gen_aarch64_trn1v2si; break;
+ case V2DImode: gen = gen_aarch64_trn1v2di; break;
+ case V4SFmode: gen = gen_aarch64_trn1v4sf; break;
+ case V2SFmode: gen = gen_aarch64_trn1v2sf; break;
+ case V2DFmode: gen = gen_aarch64_trn1v2df; break;
+ default:
+ return false;
+ }
+ }
+
+ emit_insn (gen (out, in0, in1));
+ return true;
+}
+
+/* Recognize patterns suitable for the UZP instructions. */
+static bool
+aarch64_evpc_uzp (struct expand_vec_perm_d *d)
+{
+ unsigned int i, odd, mask, nelt = d->nelt;
+ rtx out, in0, in1, x;
+ rtx (*gen) (rtx, rtx, rtx);
+ enum machine_mode vmode = d->vmode;
+
+ if (GET_MODE_UNIT_SIZE (vmode) > 8)
+ return false;
+
+ /* Note that these are little-endian tests.
+ We correct for big-endian later. */
+ if (d->perm[0] == 0)
+ odd = 0;
+ else if (d->perm[0] == 1)
+ odd = 1;
+ else
+ return false;
+ mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
+
+ for (i = 0; i < nelt; i++)
+ {
+ unsigned elt = (i * 2 + odd) & mask;
+ if (d->perm[i] != elt)
+ return false;
+ }
+
+ /* Success! */
+ if (d->testing_p)
+ return true;
+
+ in0 = d->op0;
+ in1 = d->op1;
+ if (BYTES_BIG_ENDIAN)
+ {
+ x = in0, in0 = in1, in1 = x;
+ odd = !odd;
+ }
+ out = d->target;
+
+ if (odd)
+ {
+ switch (vmode)
+ {
+ case V16QImode: gen = gen_aarch64_uzp2v16qi; break;
+ case V8QImode: gen = gen_aarch64_uzp2v8qi; break;
+ case V8HImode: gen = gen_aarch64_uzp2v8hi; break;
+ case V4HImode: gen = gen_aarch64_uzp2v4hi; break;
+ case V4SImode: gen = gen_aarch64_uzp2v4si; break;
+ case V2SImode: gen = gen_aarch64_uzp2v2si; break;
+ case V2DImode: gen = gen_aarch64_uzp2v2di; break;
+ case V4SFmode: gen = gen_aarch64_uzp2v4sf; break;
+ case V2SFmode: gen = gen_aarch64_uzp2v2sf; break;
+ case V2DFmode: gen = gen_aarch64_uzp2v2df; break;
+ default:
+ return false;
+ }
+ }
+ else
+ {
+ switch (vmode)
+ {
+ case V16QImode: gen = gen_aarch64_uzp1v16qi; break;
+ case V8QImode: gen = gen_aarch64_uzp1v8qi; break;
+ case V8HImode: gen = gen_aarch64_uzp1v8hi; break;
+ case V4HImode: gen = gen_aarch64_uzp1v4hi; break;
+ case V4SImode: gen = gen_aarch64_uzp1v4si; break;
+ case V2SImode: gen = gen_aarch64_uzp1v2si; break;
+ case V2DImode: gen = gen_aarch64_uzp1v2di; break;
+ case V4SFmode: gen = gen_aarch64_uzp1v4sf; break;
+ case V2SFmode: gen = gen_aarch64_uzp1v2sf; break;
+ case V2DFmode: gen = gen_aarch64_uzp1v2df; break;
+ default:
+ return false;
+ }
+ }
+
+ emit_insn (gen (out, in0, in1));
+ return true;
+}
+
+/* Recognize patterns suitable for the ZIP instructions. */
+static bool
+aarch64_evpc_zip (struct expand_vec_perm_d *d)
+{
+ unsigned int i, high, mask, nelt = d->nelt;
+ rtx out, in0, in1, x;
+ rtx (*gen) (rtx, rtx, rtx);
+ enum machine_mode vmode = d->vmode;
+
+ if (GET_MODE_UNIT_SIZE (vmode) > 8)
+ return false;
+
+ /* Note that these are little-endian tests.
+ We correct for big-endian later. */
+ high = nelt / 2;
+ if (d->perm[0] == high)
+ /* Do Nothing. */
+ ;
+ else if (d->perm[0] == 0)
+ high = 0;
+ else
+ return false;
+ mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
+
+ for (i = 0; i < nelt / 2; i++)
+ {
+ unsigned elt = (i + high) & mask;
+ if (d->perm[i * 2] != elt)
+ return false;
+ elt = (elt + nelt) & mask;
+ if (d->perm[i * 2 + 1] != elt)
+ return false;
+ }
+
+ /* Success! */
+ if (d->testing_p)
+ return true;
+
+ in0 = d->op0;
+ in1 = d->op1;
+ if (BYTES_BIG_ENDIAN)
+ {
+ x = in0, in0 = in1, in1 = x;
+ high = !high;
+ }
+ out = d->target;
+
+ if (high)
+ {
+ switch (vmode)
+ {
+ case V16QImode: gen = gen_aarch64_zip2v16qi; break;
+ case V8QImode: gen = gen_aarch64_zip2v8qi; break;
+ case V8HImode: gen = gen_aarch64_zip2v8hi; break;
+ case V4HImode: gen = gen_aarch64_zip2v4hi; break;
+ case V4SImode: gen = gen_aarch64_zip2v4si; break;
+ case V2SImode: gen = gen_aarch64_zip2v2si; break;
+ case V2DImode: gen = gen_aarch64_zip2v2di; break;
+ case V4SFmode: gen = gen_aarch64_zip2v4sf; break;
+ case V2SFmode: gen = gen_aarch64_zip2v2sf; break;
+ case V2DFmode: gen = gen_aarch64_zip2v2df; break;
+ default:
+ return false;
+ }
+ }
+ else
+ {
+ switch (vmode)
+ {
+ case V16QImode: gen = gen_aarch64_zip1v16qi; break;
+ case V8QImode: gen = gen_aarch64_zip1v8qi; break;
+ case V8HImode: gen = gen_aarch64_zip1v8hi; break;
+ case V4HImode: gen = gen_aarch64_zip1v4hi; break;
+ case V4SImode: gen = gen_aarch64_zip1v4si; break;
+ case V2SImode: gen = gen_aarch64_zip1v2si; break;
+ case V2DImode: gen = gen_aarch64_zip1v2di; break;
+ case V4SFmode: gen = gen_aarch64_zip1v4sf; break;
+ case V2SFmode: gen = gen_aarch64_zip1v2sf; break;
+ case V2DFmode: gen = gen_aarch64_zip1v2df; break;
+ default:
+ return false;
+ }
+ }
+
+ emit_insn (gen (out, in0, in1));
+ return true;
+}
+
+static bool
+aarch64_evpc_dup (struct expand_vec_perm_d *d)
+{
+ rtx (*gen) (rtx, rtx, rtx);
+ rtx out = d->target;
+ rtx in0;
+ enum machine_mode vmode = d->vmode;
+ unsigned int i, elt, nelt = d->nelt;
+ rtx lane;
+
+ /* TODO: This may not be big-endian safe. */
+ if (BYTES_BIG_ENDIAN)
+ return false;
+
+ elt = d->perm[0];
+ for (i = 1; i < nelt; i++)
+ {
+ if (elt != d->perm[i])
+ return false;
+ }
+
+ /* The generic preparation in aarch64_expand_vec_perm_const_1
+ swaps the operand order and the permute indices if it finds
+ d->perm[0] to be in the second operand. Thus, we can always
+ use d->op0 and need not do any extra arithmetic to get the
+ correct lane number. */
+ in0 = d->op0;
+ lane = GEN_INT (elt);
+
+ switch (vmode)
+ {
+ case V16QImode: gen = gen_aarch64_dup_lanev16qi; break;
+ case V8QImode: gen = gen_aarch64_dup_lanev8qi; break;
+ case V8HImode: gen = gen_aarch64_dup_lanev8hi; break;
+ case V4HImode: gen = gen_aarch64_dup_lanev4hi; break;
+ case V4SImode: gen = gen_aarch64_dup_lanev4si; break;
+ case V2SImode: gen = gen_aarch64_dup_lanev2si; break;
+ case V2DImode: gen = gen_aarch64_dup_lanev2di; break;
+ case V4SFmode: gen = gen_aarch64_dup_lanev4sf; break;
+ case V2SFmode: gen = gen_aarch64_dup_lanev2sf; break;
+ case V2DFmode: gen = gen_aarch64_dup_lanev2df; break;
+ default:
+ return false;
+ }
+
+ emit_insn (gen (out, in0, lane));
+ return true;
+}
+
+static bool
+aarch64_evpc_tbl (struct expand_vec_perm_d *d)
+{
+ rtx rperm[MAX_VECT_LEN], sel;
+ enum machine_mode vmode = d->vmode;
+ unsigned int i, nelt = d->nelt;
+
+ /* TODO: ARM's TBL indexing is little-endian. In order to handle GCC's
+ numbering of elements for big-endian, we must reverse the order. */
+ if (BYTES_BIG_ENDIAN)
+ return false;
+
+ if (d->testing_p)
+ return true;
+
+ /* Generic code will try constant permutation twice. Once with the
+ original mode and again with the elements lowered to QImode.
+ So wait and don't do the selector expansion ourselves. */
+ if (vmode != V8QImode && vmode != V16QImode)
+ return false;
+
+ for (i = 0; i < nelt; ++i)
+ rperm[i] = GEN_INT (d->perm[i]);
+ sel = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rperm));
+ sel = force_reg (vmode, sel);
+
+ aarch64_expand_vec_perm_1 (d->target, d->op0, d->op1, sel);
+ return true;
+}
+
+static bool
+aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
+{
+ /* The pattern matching functions above are written to look for a small
+ number to begin the sequence (0, 1, N/2). If we begin with an index
+ from the second operand, we can swap the operands. */
+ if (d->perm[0] >= d->nelt)
+ {
+ unsigned i, nelt = d->nelt;
+ rtx x;
+
+ for (i = 0; i < nelt; ++i)
+ d->perm[i] = (d->perm[i] + nelt) & (2 * nelt - 1);
+
+ x = d->op0;
+ d->op0 = d->op1;
+ d->op1 = x;
+ }
+
+ if (TARGET_SIMD)
+ {
+ if (aarch64_evpc_zip (d))
+ return true;
+ else if (aarch64_evpc_uzp (d))
+ return true;
+ else if (aarch64_evpc_trn (d))
+ return true;
+ else if (aarch64_evpc_dup (d))
+ return true;
+ return aarch64_evpc_tbl (d);
+ }
+ return false;
+}
+
+/* Expand a vec_perm_const pattern. */
+
+bool
+aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel)
+{
+ struct expand_vec_perm_d d;
+ int i, nelt, which;
+
+ d.target = target;
+ d.op0 = op0;
+ d.op1 = op1;
+
+ d.vmode = GET_MODE (target);
+ gcc_assert (VECTOR_MODE_P (d.vmode));
+ d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
+ d.testing_p = false;
+
+ for (i = which = 0; i < nelt; ++i)
+ {
+ rtx e = XVECEXP (sel, 0, i);
+ int ei = INTVAL (e) & (2 * nelt - 1);
+ which |= (ei < nelt ? 1 : 2);
+ d.perm[i] = ei;
+ }
+
+ switch (which)
+ {
+ default:
+ gcc_unreachable ();
+
+ case 3:
+ d.one_vector_p = false;
+ if (!rtx_equal_p (op0, op1))
+ break;
+
+ /* The elements of PERM do not suggest that only the first operand
+ is used, but both operands are identical. Allow easier matching
+ of the permutation by folding the permutation into the single
+ input vector. */
+ /* Fall Through. */
+ case 2:
+ for (i = 0; i < nelt; ++i)
+ d.perm[i] &= nelt - 1;
+ d.op0 = op1;
+ d.one_vector_p = true;
+ break;
+
+ case 1:
+ d.op1 = op0;
+ d.one_vector_p = true;
+ break;
+ }
+
+ return aarch64_expand_vec_perm_const_1 (&d);
+}
+
+static bool
+aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
+ const unsigned char *sel)
+{
+ struct expand_vec_perm_d d;
+ unsigned int i, nelt, which;
+ bool ret;
+
+ d.vmode = vmode;
+ d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
+ d.testing_p = true;
+ memcpy (d.perm, sel, nelt);
+
+ /* Calculate whether all elements are in one vector. */
+ for (i = which = 0; i < nelt; ++i)
+ {
+ unsigned char e = d.perm[i];
+ gcc_assert (e < 2 * nelt);
+ which |= (e < nelt ? 1 : 2);
+ }
+
+ /* If all elements are from the second vector, reindex as if from the
+ first vector. */
+ if (which == 2)
+ for (i = 0; i < nelt; ++i)
+ d.perm[i] -= nelt;
+
+ /* Check whether the mask can be applied to a single vector. */
+ d.one_vector_p = (which != 3);
+
+ d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
+ d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
+ if (!d.one_vector_p)
+ d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
+
+ start_sequence ();
+ ret = aarch64_expand_vec_perm_const_1 (&d);
+ end_sequence ();
+
+ return ret;
+}
+
+/* Implement target hook CANNOT_CHANGE_MODE_CLASS. */
+bool
+aarch64_cannot_change_mode_class (enum machine_mode from,
+ enum machine_mode to,
+ enum reg_class rclass)
+{
+ /* Full-reg subregs are allowed on general regs or any class if they are
+ the same size. */
+ if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to)
+ || !reg_classes_intersect_p (FP_REGS, rclass))
+ return false;
+
+ /* Limited combinations of subregs are safe on FPREGs. Particularly,
+ 1. Vector Mode to Scalar mode where 1 unit of the vector is accessed.
+ 2. Scalar to Scalar for integer modes or same size float modes.
+ 3. Vector to Vector modes. */
+ if (GET_MODE_SIZE (from) > GET_MODE_SIZE (to))
+ {
+ if (aarch64_vector_mode_supported_p (from)
+ && GET_MODE_SIZE (GET_MODE_INNER (from)) == GET_MODE_SIZE (to))
+ return false;
+
+ if (GET_MODE_NUNITS (from) == 1
+ && GET_MODE_NUNITS (to) == 1
+ && (GET_MODE_CLASS (from) == MODE_INT
+ || from == to))
+ return false;
+
+ if (aarch64_vector_mode_supported_p (from)
+ && aarch64_vector_mode_supported_p (to))
+ return false;
+ }
+
+ return true;
+}
+
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST aarch64_address_cost
+
+/* This hook will determines whether unnamed bitfields affect the alignment
+ of the containing structure. The hook returns true if the structure
+ should inherit the alignment requirements of an unnamed bitfield's
+ type. */
+#undef TARGET_ALIGN_ANON_BITFIELD
+#define TARGET_ALIGN_ANON_BITFIELD hook_bool_void_true
+
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP "\t.xword\t"
+
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
+
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
+
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
+ hook_bool_const_tree_hwi_hwi_const_tree_true
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START aarch64_start_file
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK aarch64_output_mi_thunk
+
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION aarch64_select_rtx_section
+
+#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
+#define TARGET_ASM_TRAMPOLINE_TEMPLATE aarch64_asm_trampoline_template
+
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST aarch64_build_builtin_va_list
+
+#undef TARGET_CALLEE_COPIES
+#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_false
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE aarch64_can_eliminate
+
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM aarch64_cannot_force_const_mem
+
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
+#define TARGET_CONDITIONAL_REGISTER_USAGE aarch64_conditional_register_usage
+
+/* Only the least significant bit is used for initialization guard
+ variables. */
+#undef TARGET_CXX_GUARD_MASK_BIT
+#define TARGET_CXX_GUARD_MASK_BIT hook_bool_void_true
+
+#undef TARGET_C_MODE_FOR_SUFFIX
+#define TARGET_C_MODE_FOR_SUFFIX aarch64_c_mode_for_suffix
+
+#ifdef TARGET_BIG_ENDIAN_DEFAULT
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS (MASK_BIG_END)
+#endif
+
+#undef TARGET_CLASS_MAX_NREGS
+#define TARGET_CLASS_MAX_NREGS aarch64_class_max_nregs
+
+#undef TARGET_BUILTIN_DECL
+#define TARGET_BUILTIN_DECL aarch64_builtin_decl
+
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN aarch64_expand_builtin
+
+#undef TARGET_EXPAND_BUILTIN_VA_START
+#define TARGET_EXPAND_BUILTIN_VA_START aarch64_expand_builtin_va_start
+
+#undef TARGET_FOLD_BUILTIN
+#define TARGET_FOLD_BUILTIN aarch64_fold_builtin
+
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG aarch64_function_arg
+
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE aarch64_function_arg_advance
+
+#undef TARGET_FUNCTION_ARG_BOUNDARY
+#define TARGET_FUNCTION_ARG_BOUNDARY aarch64_function_arg_boundary
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL aarch64_function_ok_for_sibcall
+
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE aarch64_function_value
+
+#undef TARGET_FUNCTION_VALUE_REGNO_P
+#define TARGET_FUNCTION_VALUE_REGNO_P aarch64_function_value_regno_p
+
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED aarch64_frame_pointer_required
+
+#undef TARGET_GIMPLE_FOLD_BUILTIN
+#define TARGET_GIMPLE_FOLD_BUILTIN aarch64_gimple_fold_builtin
+
+#undef TARGET_GIMPLIFY_VA_ARG_EXPR
+#define TARGET_GIMPLIFY_VA_ARG_EXPR aarch64_gimplify_va_arg_expr
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS aarch64_init_builtins
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P aarch64_legitimate_address_hook_p
+
+#undef TARGET_LEGITIMATE_CONSTANT_P
+#define TARGET_LEGITIMATE_CONSTANT_P aarch64_legitimate_constant_p
+
+#undef TARGET_LIBGCC_CMP_RETURN_MODE
+#define TARGET_LIBGCC_CMP_RETURN_MODE aarch64_libgcc_cmp_return_mode
+
+#undef TARGET_LRA_P
+#define TARGET_LRA_P aarch64_lra_p
+
+#undef TARGET_MANGLE_TYPE
+#define TARGET_MANGLE_TYPE aarch64_mangle_type
+
+#undef TARGET_MEMORY_MOVE_COST
+#define TARGET_MEMORY_MOVE_COST aarch64_memory_move_cost
+
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
+
+/* This target hook should return true if accesses to volatile bitfields
+ should use the narrowest mode possible. It should return false if these
+ accesses should use the bitfield container type. */
+#undef TARGET_NARROW_VOLATILE_BITFIELD
+#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE aarch64_override_options
+
+#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
+#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
+ aarch64_override_options_after_change
+
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE aarch64_pass_by_reference
+
+#undef TARGET_PREFERRED_RELOAD_CLASS
+#define TARGET_PREFERRED_RELOAD_CLASS aarch64_preferred_reload_class
+
+#undef TARGET_SECONDARY_RELOAD
+#define TARGET_SECONDARY_RELOAD aarch64_secondary_reload
+
+#undef TARGET_SHIFT_TRUNCATION_MASK
+#define TARGET_SHIFT_TRUNCATION_MASK aarch64_shift_truncation_mask
+
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS aarch64_setup_incoming_varargs
+
+#undef TARGET_STRUCT_VALUE_RTX
+#define TARGET_STRUCT_VALUE_RTX aarch64_struct_value_rtx
+
+#undef TARGET_REGISTER_MOVE_COST
+#define TARGET_REGISTER_MOVE_COST aarch64_register_move_cost
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY aarch64_return_in_memory
+
+#undef TARGET_RETURN_IN_MSB
+#define TARGET_RETURN_IN_MSB aarch64_return_in_msb
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS aarch64_rtx_costs
+
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE aarch64_sched_issue_rate
+
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT aarch64_trampoline_init
+
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P aarch64_use_blocks_for_constant_p
+
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P aarch64_vector_mode_supported_p
+
+#undef TARGET_ARRAY_MODE_SUPPORTED_P
+#define TARGET_ARRAY_MODE_SUPPORTED_P aarch64_array_mode_supported_p
+
+#undef TARGET_VECTORIZE_ADD_STMT_COST
+#define TARGET_VECTORIZE_ADD_STMT_COST aarch64_add_stmt_cost
+
+#undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
+#define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
+ aarch64_builtin_vectorization_cost
+
+#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
+#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE aarch64_preferred_simd_mode
+
+#undef TARGET_VECTORIZE_BUILTINS
+#define TARGET_VECTORIZE_BUILTINS
+
+#undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
+#define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
+ aarch64_builtin_vectorized_function
+
+#undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
+#define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
+ aarch64_autovectorize_vector_sizes
+
+/* Section anchor support. */
+
+#undef TARGET_MIN_ANCHOR_OFFSET
+#define TARGET_MIN_ANCHOR_OFFSET -256
+
+/* Limit the maximum anchor offset to 4k-1, since that's the limit for a
+ byte offset; we can do much more for larger data types, but have no way
+ to determine the size of the access. We assume accesses are aligned. */
+#undef TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET 4095
+
+#undef TARGET_VECTOR_ALIGNMENT
+#define TARGET_VECTOR_ALIGNMENT aarch64_simd_vector_alignment
+
+#undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
+#define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE \
+ aarch64_simd_vector_alignment_reachable
+
+/* vec_perm support. */
+
+#undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
+#define TARGET_VECTORIZE_VEC_PERM_CONST_OK \
+ aarch64_vectorize_vec_perm_const_ok
+
+
+#undef TARGET_FIXED_CONDITION_CODE_REGS
+#define TARGET_FIXED_CONDITION_CODE_REGS aarch64_fixed_condition_code_regs
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+#include "gt-aarch64.h"
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64.h b/gcc-4.9/gcc/config/aarch64/aarch64.h
new file mode 100644
index 000000000..7962aa472
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64.h
@@ -0,0 +1,873 @@
+/* Machine description for AArch64 architecture.
+ Copyright (C) 2009-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_AARCH64_H
+#define GCC_AARCH64_H
+
+/* Target CPU builtins. */
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__aarch64__"); \
+ if (TARGET_BIG_END) \
+ builtin_define ("__AARCH64EB__"); \
+ else \
+ builtin_define ("__AARCH64EL__"); \
+ \
+ if (!TARGET_GENERAL_REGS_ONLY) \
+ builtin_define ("__ARM_NEON"); \
+ \
+ switch (aarch64_cmodel) \
+ { \
+ case AARCH64_CMODEL_TINY: \
+ case AARCH64_CMODEL_TINY_PIC: \
+ builtin_define ("__AARCH64_CMODEL_TINY__"); \
+ break; \
+ case AARCH64_CMODEL_SMALL: \
+ case AARCH64_CMODEL_SMALL_PIC: \
+ builtin_define ("__AARCH64_CMODEL_SMALL__");\
+ break; \
+ case AARCH64_CMODEL_LARGE: \
+ builtin_define ("__AARCH64_CMODEL_LARGE__"); \
+ break; \
+ default: \
+ break; \
+ } \
+ \
+ if (TARGET_ILP32) \
+ { \
+ cpp_define (parse_in, "_ILP32"); \
+ cpp_define (parse_in, "__ILP32__"); \
+ } \
+ if (TARGET_CRYPTO) \
+ builtin_define ("__ARM_FEATURE_CRYPTO"); \
+ } while (0)
+
+
+
+/* Target machine storage layout. */
+
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ if (MODE == QImode || MODE == HImode) \
+ { \
+ MODE = SImode; \
+ } \
+ }
+
+/* Bits are always numbered from the LSBit. */
+#define BITS_BIG_ENDIAN 0
+
+/* Big/little-endian flavour. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+/* AdvSIMD is supported in the default configuration, unless disabled by
+ -mgeneral-regs-only. */
+#define TARGET_SIMD !TARGET_GENERAL_REGS_ONLY
+#define TARGET_FLOAT !TARGET_GENERAL_REGS_ONLY
+
+#define UNITS_PER_WORD 8
+
+#define UNITS_PER_VREG 16
+
+#define PARM_BOUNDARY 64
+
+#define STACK_BOUNDARY 128
+
+#define FUNCTION_BOUNDARY 32
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT 128
+
+#define SHORT_TYPE_SIZE 16
+
+#define INT_TYPE_SIZE 32
+
+#define LONG_TYPE_SIZE (TARGET_ILP32 ? 32 : 64)
+
+#define POINTER_SIZE (TARGET_ILP32 ? 32 : 64)
+
+#define LONG_LONG_TYPE_SIZE 64
+
+#define FLOAT_TYPE_SIZE 32
+
+#define DOUBLE_TYPE_SIZE 64
+
+#define LONG_DOUBLE_TYPE_SIZE 128
+
+/* The architecture reserves all bits of the address for hardware use,
+ so the vbit must go into the delta field of pointers to member
+ functions. This is the same config as that in the AArch32
+ port. */
+#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta
+
+/* Make strings word-aligned so that strcpy from constants will be
+ faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST \
+ && !optimize_size \
+ && (ALIGN) < BITS_PER_WORD) \
+ ? BITS_PER_WORD : ALIGN)
+
+#define DATA_ALIGNMENT(EXP, ALIGN) \
+ ((((ALIGN) < BITS_PER_WORD) \
+ && (TREE_CODE (EXP) == ARRAY_TYPE \
+ || TREE_CODE (EXP) == UNION_TYPE \
+ || TREE_CODE (EXP) == RECORD_TYPE)) \
+ ? BITS_PER_WORD : (ALIGN))
+
+#define LOCAL_ALIGNMENT(EXP, ALIGN) DATA_ALIGNMENT(EXP, ALIGN)
+
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* Defined by the ABI */
+#define WCHAR_TYPE "unsigned int"
+#define WCHAR_TYPE_SIZE 32
+
+/* Using long long breaks -ansi and -std=c90, so these will need to be
+ made conditional for an LLP64 ABI. */
+
+#define SIZE_TYPE "long unsigned int"
+
+#define PTRDIFF_TYPE "long int"
+
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+
+/* Instruction tuning/selection flags. */
+
+/* Bit values used to identify processor capabilities. */
+#define AARCH64_FL_SIMD (1 << 0) /* Has SIMD instructions. */
+#define AARCH64_FL_FP (1 << 1) /* Has FP. */
+#define AARCH64_FL_CRYPTO (1 << 2) /* Has crypto. */
+#define AARCH64_FL_SLOWMUL (1 << 3) /* A slow multiply core. */
+#define AARCH64_FL_CRC (1 << 4) /* Has CRC. */
+
+/* Has FP and SIMD. */
+#define AARCH64_FL_FPSIMD (AARCH64_FL_FP | AARCH64_FL_SIMD)
+
+/* Has FP without SIMD. */
+#define AARCH64_FL_FPQ16 (AARCH64_FL_FP & ~AARCH64_FL_SIMD)
+
+/* Architecture flags that effect instruction selection. */
+#define AARCH64_FL_FOR_ARCH8 (AARCH64_FL_FPSIMD)
+
+/* Macros to test ISA flags. */
+extern unsigned long aarch64_isa_flags;
+#define AARCH64_ISA_CRC (aarch64_isa_flags & AARCH64_FL_CRC)
+#define AARCH64_ISA_CRYPTO (aarch64_isa_flags & AARCH64_FL_CRYPTO)
+#define AARCH64_ISA_FP (aarch64_isa_flags & AARCH64_FL_FP)
+#define AARCH64_ISA_SIMD (aarch64_isa_flags & AARCH64_FL_SIMD)
+
+/* Macros to test tuning flags. */
+extern unsigned long aarch64_tune_flags;
+#define AARCH64_TUNE_SLOWMUL (aarch64_tune_flags & AARCH64_FL_SLOWMUL)
+
+/* Crypto is an optional feature. */
+#define TARGET_CRYPTO AARCH64_ISA_CRYPTO
+
+/* Standard register usage. */
+
+/* 31 64-bit general purpose registers R0-R30:
+ R30 LR (link register)
+ R29 FP (frame pointer)
+ R19-R28 Callee-saved registers
+ R18 The platform register; use as temporary register.
+ R17 IP1 The second intra-procedure-call temporary register
+ (can be used by call veneers and PLT code); otherwise use
+ as a temporary register
+ R16 IP0 The first intra-procedure-call temporary register (can
+ be used by call veneers and PLT code); otherwise use as a
+ temporary register
+ R9-R15 Temporary registers
+ R8 Structure value parameter / temporary register
+ R0-R7 Parameter/result registers
+
+ SP stack pointer, encoded as X/R31 where permitted.
+ ZR zero register, encoded as X/R31 elsewhere
+
+ 32 x 128-bit floating-point/vector registers
+ V16-V31 Caller-saved (temporary) registers
+ V8-V15 Callee-saved registers
+ V0-V7 Parameter/result registers
+
+ The vector register V0 holds scalar B0, H0, S0 and D0 in its least
+ significant bits. Unlike AArch32 S1 is not packed into D0,
+ etc. */
+
+/* Note that we don't mark X30 as a call-clobbered register. The idea is
+ that it's really the call instructions themselves which clobber X30.
+ We don't care what the called function does with it afterwards.
+
+ This approach makes it easier to implement sibcalls. Unlike normal
+ calls, sibcalls don't clobber X30, so the register reaches the
+ called function intact. EPILOGUE_USES says that X30 is useful
+ to the called function. */
+
+#define FIXED_REGISTERS \
+ { \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* R0 - R7 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* R8 - R15 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* R16 - R23 */ \
+ 0, 0, 0, 0, 0, 1, 0, 1, /* R24 - R30, SP */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* V0 - V7 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* V16 - V23 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* V24 - V31 */ \
+ 1, 1, 1, /* SFP, AP, CC */ \
+ }
+
+#define CALL_USED_REGISTERS \
+ { \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* R0 - R7 */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* R8 - R15 */ \
+ 1, 1, 1, 0, 0, 0, 0, 0, /* R16 - R23 */ \
+ 0, 0, 0, 0, 0, 1, 0, 1, /* R24 - R30, SP */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* V0 - V7 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* V16 - V23 */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* V24 - V31 */ \
+ 1, 1, 1, /* SFP, AP, CC */ \
+ }
+
+#define REGISTER_NAMES \
+ { \
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", \
+ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", \
+ "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", \
+ "x24", "x25", "x26", "x27", "x28", "x29", "x30", "sp", \
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \
+ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \
+ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", \
+ "sfp", "ap", "cc", \
+ }
+
+/* Generate the register aliases for core register N */
+#define R_ALIASES(N) {"r" # N, R0_REGNUM + (N)}, \
+ {"w" # N, R0_REGNUM + (N)}
+
+#define V_ALIASES(N) {"q" # N, V0_REGNUM + (N)}, \
+ {"d" # N, V0_REGNUM + (N)}, \
+ {"s" # N, V0_REGNUM + (N)}, \
+ {"h" # N, V0_REGNUM + (N)}, \
+ {"b" # N, V0_REGNUM + (N)}
+
+/* Provide aliases for all of the ISA defined register name forms.
+ These aliases are convenient for use in the clobber lists of inline
+ asm statements. */
+
+#define ADDITIONAL_REGISTER_NAMES \
+ { R_ALIASES(0), R_ALIASES(1), R_ALIASES(2), R_ALIASES(3), \
+ R_ALIASES(4), R_ALIASES(5), R_ALIASES(6), R_ALIASES(7), \
+ R_ALIASES(8), R_ALIASES(9), R_ALIASES(10), R_ALIASES(11), \
+ R_ALIASES(12), R_ALIASES(13), R_ALIASES(14), R_ALIASES(15), \
+ R_ALIASES(16), R_ALIASES(17), R_ALIASES(18), R_ALIASES(19), \
+ R_ALIASES(20), R_ALIASES(21), R_ALIASES(22), R_ALIASES(23), \
+ R_ALIASES(24), R_ALIASES(25), R_ALIASES(26), R_ALIASES(27), \
+ R_ALIASES(28), R_ALIASES(29), R_ALIASES(30), {"wsp", R0_REGNUM + 31}, \
+ V_ALIASES(0), V_ALIASES(1), V_ALIASES(2), V_ALIASES(3), \
+ V_ALIASES(4), V_ALIASES(5), V_ALIASES(6), V_ALIASES(7), \
+ V_ALIASES(8), V_ALIASES(9), V_ALIASES(10), V_ALIASES(11), \
+ V_ALIASES(12), V_ALIASES(13), V_ALIASES(14), V_ALIASES(15), \
+ V_ALIASES(16), V_ALIASES(17), V_ALIASES(18), V_ALIASES(19), \
+ V_ALIASES(20), V_ALIASES(21), V_ALIASES(22), V_ALIASES(23), \
+ V_ALIASES(24), V_ALIASES(25), V_ALIASES(26), V_ALIASES(27), \
+ V_ALIASES(28), V_ALIASES(29), V_ALIASES(30), V_ALIASES(31) \
+ }
+
+/* Say that the epilogue uses the return address register. Note that
+ in the case of sibcalls, the values "used by the epilogue" are
+ considered live at the start of the called function. */
+
+#define EPILOGUE_USES(REGNO) \
+ ((REGNO) == LR_REGNUM)
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers. */
+#define EXIT_IGNORE_STACK 1
+
+#define STATIC_CHAIN_REGNUM R18_REGNUM
+#define HARD_FRAME_POINTER_REGNUM R29_REGNUM
+#define FRAME_POINTER_REGNUM SFP_REGNUM
+#define STACK_POINTER_REGNUM SP_REGNUM
+#define ARG_POINTER_REGNUM AP_REGNUM
+#define FIRST_PSEUDO_REGISTER 67
+
+/* The number of (integer) argument register available. */
+#define NUM_ARG_REGS 8
+#define NUM_FP_ARG_REGS 8
+
+/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
+ four members. */
+#define HA_MAX_NUM_FLDS 4
+
+/* External dwarf register number scheme. These number are used to
+ identify registers in dwarf debug information, the values are
+ defined by the AArch64 ABI. The numbering scheme is independent of
+ GCC's internal register numbering scheme. */
+
+#define AARCH64_DWARF_R0 0
+
+/* The number of R registers, note 31! not 32. */
+#define AARCH64_DWARF_NUMBER_R 31
+
+#define AARCH64_DWARF_SP 31
+#define AARCH64_DWARF_V0 64
+
+/* The number of V registers. */
+#define AARCH64_DWARF_NUMBER_V 32
+
+/* For signal frames we need to use an alternative return column. This
+ value must not correspond to a hard register and must be out of the
+ range of DWARF_FRAME_REGNUM(). */
+#define DWARF_ALT_FRAME_RETURN_COLUMN \
+ (AARCH64_DWARF_V0 + AARCH64_DWARF_NUMBER_V)
+
+/* We add 1 extra frame register for use as the
+ DWARF_ALT_FRAME_RETURN_COLUMN. */
+#define DWARF_FRAME_REGISTERS (DWARF_ALT_FRAME_RETURN_COLUMN + 1)
+
+
+#define DBX_REGISTER_NUMBER(REGNO) aarch64_dbx_register_number (REGNO)
+/* Provide a definition of DWARF_FRAME_REGNUM here so that fallback unwinders
+ can use DWARF_ALT_FRAME_RETURN_COLUMN defined below. This is just the same
+ as the default definition in dwarf2out.c. */
+#undef DWARF_FRAME_REGNUM
+#define DWARF_FRAME_REGNUM(REGNO) DBX_REGISTER_NUMBER (REGNO)
+
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LR_REGNUM)
+
+#define HARD_REGNO_NREGS(REGNO, MODE) aarch64_hard_regno_nregs (REGNO, MODE)
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) aarch64_hard_regno_mode_ok (REGNO, MODE)
+
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+#define DWARF2_UNWIND_INFO 1
+
+/* Use R0 through R3 to pass exception handling information. */
+#define EH_RETURN_DATA_REGNO(N) \
+ ((N) < 4 ? ((unsigned int) R0_REGNUM + (N)) : INVALID_REGNUM)
+
+/* Select a format to encode pointers in exception handling data. */
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
+ aarch64_asm_preferred_eh_data_format ((CODE), (GLOBAL))
+
+/* The register that holds the return address in exception handlers. */
+#define AARCH64_EH_STACKADJ_REGNUM (R0_REGNUM + 4)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, AARCH64_EH_STACKADJ_REGNUM)
+
+/* Don't use __builtin_setjmp until we've defined it. */
+#undef DONT_USE_BUILTIN_SETJMP
+#define DONT_USE_BUILTIN_SETJMP 1
+
+/* Register in which the structure value is to be returned. */
+#define AARCH64_STRUCT_VALUE_REGNUM R8_REGNUM
+
+/* Non-zero if REGNO is part of the Core register set.
+
+ The rather unusual way of expressing this check is to avoid
+ warnings when building the compiler when R0_REGNUM is 0 and REGNO
+ is unsigned. */
+#define GP_REGNUM_P(REGNO) \
+ (((unsigned) (REGNO - R0_REGNUM)) <= (R30_REGNUM - R0_REGNUM))
+
+#define FP_REGNUM_P(REGNO) \
+ (((unsigned) (REGNO - V0_REGNUM)) <= (V31_REGNUM - V0_REGNUM))
+
+#define FP_LO_REGNUM_P(REGNO) \
+ (((unsigned) (REGNO - V0_REGNUM)) <= (V15_REGNUM - V0_REGNUM))
+
+
+/* Register and constant classes. */
+
+enum reg_class
+{
+ NO_REGS,
+ CORE_REGS,
+ GENERAL_REGS,
+ STACK_REG,
+ POINTER_REGS,
+ FP_LO_REGS,
+ FP_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES /* Last */
+};
+
+#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "CORE_REGS", \
+ "GENERAL_REGS", \
+ "STACK_REG", \
+ "POINTER_REGS", \
+ "FP_LO_REGS", \
+ "FP_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
+ { 0x7fffffff, 0x00000000, 0x00000003 }, /* CORE_REGS */ \
+ { 0x7fffffff, 0x00000000, 0x00000003 }, /* GENERAL_REGS */ \
+ { 0x80000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \
+ { 0xffffffff, 0x00000000, 0x00000003 }, /* POINTER_REGS */ \
+ { 0x00000000, 0x0000ffff, 0x00000000 }, /* FP_LO_REGS */ \
+ { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \
+ { 0xffffffff, 0xffffffff, 0x00000007 } /* ALL_REGS */ \
+}
+
+#define REGNO_REG_CLASS(REGNO) aarch64_regno_regclass (REGNO)
+
+#define INDEX_REG_CLASS CORE_REGS
+#define BASE_REG_CLASS POINTER_REGS
+
+/* Register pairs used to eliminate unneeded registers that point into
+ the stack frame. */
+#define ELIMINABLE_REGS \
+{ \
+ { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \
+}
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ (OFFSET) = aarch64_initial_elimination_offset (FROM, TO)
+
+/* CPU/ARCH option handling. */
+#include "config/aarch64/aarch64-opts.h"
+
+enum target_cpus
+{
+#define AARCH64_CORE(NAME, INTERNAL_IDENT, IDENT, ARCH, FLAGS, COSTS) \
+ TARGET_CPU_##INTERNAL_IDENT,
+#include "aarch64-cores.def"
+#undef AARCH64_CORE
+ TARGET_CPU_generic
+};
+
+/* If there is no CPU defined at configure, use generic as default. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT \
+ (TARGET_CPU_generic | (AARCH64_CPU_DEFAULT_FLAGS << 6))
+#endif
+
+/* The processor for which instructions should be scheduled. */
+extern enum aarch64_processor aarch64_tune;
+
+/* RTL generation support. */
+#define INIT_EXPANDERS aarch64_init_expanders ()
+
+
+/* Stack layout; function entry, exit and calling. */
+#define STACK_GROWS_DOWNWARD 1
+
+#define FRAME_GROWS_DOWNWARD 1
+
+#define STARTING_FRAME_OFFSET 0
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Fix for VFP */
+#define LIBCALL_VALUE(MODE) \
+ gen_rtx_REG (MODE, FLOAT_MODE_P (MODE) ? V0_REGNUM : R0_REGNUM)
+
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+#define AARCH64_ROUND_UP(X, ALIGNMENT) \
+ (((X) + ((ALIGNMENT) - 1)) & ~((ALIGNMENT) - 1))
+
+#define AARCH64_ROUND_DOWN(X, ALIGNMENT) \
+ ((X) & ~((ALIGNMENT) - 1))
+
+#ifdef HOST_WIDE_INT
+struct GTY (()) aarch64_frame
+{
+ HOST_WIDE_INT reg_offset[FIRST_PSEUDO_REGISTER];
+ HOST_WIDE_INT saved_regs_size;
+ /* Padding if needed after the all the callee save registers have
+ been saved. */
+ HOST_WIDE_INT padding0;
+ HOST_WIDE_INT hardfp_offset; /* HARD_FRAME_POINTER_REGNUM */
+ HOST_WIDE_INT fp_lr_offset; /* Space needed for saving fp and/or lr */
+
+ bool laid_out;
+};
+
+typedef struct GTY (()) machine_function
+{
+ struct aarch64_frame frame;
+
+ /* The number of extra stack bytes taken up by register varargs.
+ This area is allocated by the callee at the very top of the frame. */
+ HOST_WIDE_INT saved_varargs_size;
+
+} machine_function;
+#endif
+
+/* Which ABI to use. */
+enum aarch64_abi_type
+{
+ AARCH64_ABI_LP64 = 0,
+ AARCH64_ABI_ILP32 = 1
+};
+
+#ifndef AARCH64_ABI_DEFAULT
+#define AARCH64_ABI_DEFAULT AARCH64_ABI_LP64
+#endif
+
+#define TARGET_ILP32 (aarch64_abi & AARCH64_ABI_ILP32)
+
+enum arm_pcs
+{
+ ARM_PCS_AAPCS64, /* Base standard AAPCS for 64 bit. */
+ ARM_PCS_UNKNOWN
+};
+
+
+extern enum arm_pcs arm_pcs_variant;
+
+#ifndef ARM_DEFAULT_PCS
+#define ARM_DEFAULT_PCS ARM_PCS_AAPCS64
+#endif
+
+/* We can't use enum machine_mode inside a generator file because it
+ hasn't been created yet; we shouldn't be using any code that
+ needs the real definition though, so this ought to be safe. */
+#ifdef GENERATOR_FILE
+#define MACHMODE int
+#else
+#include "insn-modes.h"
+#define MACHMODE enum machine_mode
+#endif
+
+
+/* AAPCS related state tracking. */
+typedef struct
+{
+ enum arm_pcs pcs_variant;
+ int aapcs_arg_processed; /* No need to lay out this argument again. */
+ int aapcs_ncrn; /* Next Core register number. */
+ int aapcs_nextncrn; /* Next next core register number. */
+ int aapcs_nvrn; /* Next Vector register number. */
+ int aapcs_nextnvrn; /* Next Next Vector register number. */
+ rtx aapcs_reg; /* Register assigned to this argument. This
+ is NULL_RTX if this parameter goes on
+ the stack. */
+ MACHMODE aapcs_vfp_rmode;
+ int aapcs_stack_words; /* If the argument is passed on the stack, this
+ is the number of words needed, after rounding
+ up. Only meaningful when
+ aapcs_reg == NULL_RTX. */
+ int aapcs_stack_size; /* The total size (in words, per 8 byte) of the
+ stack arg area so far. */
+} CUMULATIVE_ARGS;
+
+#define FUNCTION_ARG_PADDING(MODE, TYPE) \
+ (aarch64_pad_arg_upward (MODE, TYPE) ? upward : downward)
+
+#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
+ (aarch64_pad_reg_upward (MODE, TYPE, FIRST) ? upward : downward)
+
+#define PAD_VARARGS_DOWN 0
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
+ aarch64_init_cumulative_args (&(CUM), FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS)
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ aarch64_function_arg_regno_p(REGNO)
+
+
+/* ISA Features. */
+
+/* Addressing modes, etc. */
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_DECREMENT 1
+#define HAVE_PRE_DECREMENT 1
+#define HAVE_POST_MODIFY_DISP 1
+#define HAVE_PRE_MODIFY_DISP 1
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#define CONSTANT_ADDRESS_P(X) aarch64_constant_address_p(X)
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c. */
+
+#define LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_L, WIN) \
+do { \
+ rtx new_x = aarch64_legitimize_reload_address (&(X), MODE, OPNUM, TYPE, \
+ IND_L); \
+ if (new_x) \
+ { \
+ X = new_x; \
+ goto WIN; \
+ } \
+} while (0)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ aarch64_regno_ok_for_base_p (REGNO, true)
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ aarch64_regno_ok_for_index_p (REGNO, true)
+
+#define LEGITIMATE_PIC_OPERAND_P(X) \
+ aarch64_legitimate_pic_operand_p (X)
+
+#define CASE_VECTOR_MODE Pmode
+
+#define DEFAULT_SIGNED_CHAR 0
+
+/* An integer expression for the size in bits of the largest integer machine
+ mode that should actually be used. We allow pairs of registers. */
+#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode)
+
+/* Maximum bytes moved by a single instruction (load/store pair). */
+#define MOVE_MAX (UNITS_PER_WORD * 2)
+
+/* The base cost overhead of a memcpy call, for MOVE_RATIO and friends. */
+#define AARCH64_CALL_RATIO 8
+
+/* When optimizing for size, give a better estimate of the length of a memcpy
+ call, but use the default otherwise. But move_by_pieces_ninsns() counts
+ memory-to-memory moves, and we'll have to generate a load & store for each,
+ so halve the value to take that into account. */
+#define MOVE_RATIO(speed) \
+ (((speed) ? 15 : AARCH64_CALL_RATIO) / 2)
+
+/* For CLEAR_RATIO, when optimizing for size, give a better estimate
+ of the length of a memset call, but use the default otherwise. */
+#define CLEAR_RATIO(speed) \
+ ((speed) ? 15 : AARCH64_CALL_RATIO)
+
+/* SET_RATIO is similar to CLEAR_RATIO, but for a non-zero constant, so when
+ optimizing for size adjust the ratio to account for the overhead of loading
+ the constant. */
+#define SET_RATIO(speed) \
+ ((speed) ? 15 : AARCH64_CALL_RATIO - 2)
+
+/* STORE_BY_PIECES_P can be used when copying a constant string, but
+ in that case each 64-bit chunk takes 5 insns instead of 2 (LDR/STR).
+ For now we always fail this and let the move_by_pieces code copy
+ the string from read-only memory. */
+#define STORE_BY_PIECES_P(SIZE, ALIGN) 0
+
+/* Disable auto-increment in move_by_pieces et al. Use of auto-increment is
+ rarely a good idea in straight-line code since it adds an extra address
+ dependency between each instruction. Better to use incrementing offsets. */
+#define USE_LOAD_POST_INCREMENT(MODE) 0
+#define USE_LOAD_POST_DECREMENT(MODE) 0
+#define USE_LOAD_PRE_INCREMENT(MODE) 0
+#define USE_LOAD_PRE_DECREMENT(MODE) 0
+#define USE_STORE_POST_INCREMENT(MODE) 0
+#define USE_STORE_POST_DECREMENT(MODE) 0
+#define USE_STORE_PRE_INCREMENT(MODE) 0
+#define USE_STORE_PRE_DECREMENT(MODE) 0
+
+/* ?? #define WORD_REGISTER_OPERATIONS */
+
+/* Define if loading from memory in MODE, an integral mode narrower than
+ BITS_PER_WORD will either zero-extend or sign-extend. The value of this
+ macro should be the code that says which one of the two operations is
+ implicitly done, or UNKNOWN if none. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Define this macro to be non-zero if instructions will fail to work
+ if given data not on the nominal alignment. */
+#define STRICT_ALIGNMENT TARGET_STRICT_ALIGN
+
+/* Define this macro to be non-zero if accessing less than a word of
+ memory is no faster than accessing a word of memory, i.e., if such
+ accesses require more than one instruction or if there is no
+ difference in cost.
+ Although there's no difference in instruction count or cycles,
+ in AArch64 we don't want to expand to a sub-word to a 64-bit access
+ if we don't have to, for power-saving reasons. */
+#define SLOW_BYTE_ACCESS 0
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+#define NO_FUNCTION_CSE 1
+
+/* Specify the machine mode that the hardware addresses have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode DImode
+
+/* A C expression whose value is zero if pointers that need to be extended
+ from being `POINTER_SIZE' bits wide to `Pmode' are sign-extended and
+ greater then zero if they are zero-extended and less then zero if the
+ ptr_extend instruction should be used. */
+#define POINTERS_EXTEND_UNSIGNED 1
+
+/* Mode of a function address in a call instruction (for indexing purposes). */
+#define FUNCTION_MODE Pmode
+
+#define SELECT_CC_MODE(OP, X, Y) aarch64_select_cc_mode (OP, X, Y)
+
+#define REVERSIBLE_CC_MODE(MODE) 1
+
+#define REVERSE_CONDITION(CODE, MODE) \
+ (((MODE) == CCFPmode || (MODE) == CCFPEmode) \
+ ? reverse_condition_maybe_unordered (CODE) \
+ : reverse_condition (CODE))
+
+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+ ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE))
+#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+ ((VALUE) = ((MODE) == SImode ? 32 : 64), 2)
+
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
+
+#define RETURN_ADDR_RTX aarch64_return_addr
+
+/* 3 insns + padding + 2 pointer-sized entries. */
+#define TRAMPOLINE_SIZE (TARGET_ILP32 ? 24 : 32)
+
+/* Trampolines contain dwords, so must be dword aligned. */
+#define TRAMPOLINE_ALIGNMENT 64
+
+/* Put trampolines in the text section so that mapping symbols work
+ correctly. */
+#define TRAMPOLINE_SECTION text_section
+
+/* To start with. */
+#define BRANCH_COST(SPEED_P, PREDICTABLE_P) 2
+
+
+/* Assembly output. */
+
+/* For now we'll make all jump tables pc-relative. */
+#define CASE_VECTOR_PC_RELATIVE 1
+
+#define CASE_VECTOR_SHORTEN_MODE(min, max, body) \
+ ((min < -0x1fff0 || max > 0x1fff0) ? SImode \
+ : (min < -0x1f0 || max > 0x1f0) ? HImode \
+ : QImode)
+
+/* Jump table alignment is explicit in ASM_OUTPUT_CASE_LABEL. */
+#define ADDR_VEC_ALIGN(JUMPTABLE) 0
+
+#define PRINT_OPERAND(STREAM, X, CODE) aarch64_print_operand (STREAM, X, CODE)
+
+#define PRINT_OPERAND_ADDRESS(STREAM, X) \
+ aarch64_print_operand_address (STREAM, X)
+
+#define MCOUNT_NAME "_mcount"
+
+#define NO_PROFILE_COUNTERS 1
+
+/* Emit rtl for profiling. Output assembler code to FILE
+ to call "_mcount" for profiling a function entry. */
+#define PROFILE_HOOK(LABEL) \
+ { \
+ rtx fun, lr; \
+ lr = get_hard_reg_initial_val (Pmode, LR_REGNUM); \
+ fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \
+ emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lr, Pmode); \
+ }
+
+/* All the work done in PROFILE_HOOK, but still required. */
+#define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0)
+
+/* For some reason, the Linux headers think they know how to define
+ these macros. They don't!!! */
+#undef ASM_APP_ON
+#undef ASM_APP_OFF
+#define ASM_APP_ON "\t" ASM_COMMENT_START " Start of user assembly\n"
+#define ASM_APP_OFF "\t" ASM_COMMENT_START " End of user assembly\n"
+
+#define CONSTANT_POOL_BEFORE_FUNCTION 0
+
+/* This definition should be relocated to aarch64-elf-raw.h. This macro
+ should be undefined in aarch64-linux.h and a clear_cache pattern
+ implmented to emit either the call to __aarch64_sync_cache_range()
+ directly or preferably the appropriate sycall or cache clear
+ instructions inline. */
+#define CLEAR_INSN_CACHE(beg, end) \
+ extern void __aarch64_sync_cache_range (void *, void *); \
+ __aarch64_sync_cache_range (beg, end)
+
+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
+ aarch64_cannot_change_mode_class (FROM, TO, CLASS)
+
+#define SHIFT_COUNT_TRUNCATED !TARGET_SIMD
+
+/* Callee only saves lower 64-bits of a 128-bit register. Tell the
+ compiler the callee clobbers the top 64-bits when restoring the
+ bottom 64-bits. */
+#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) \
+ (FP_REGNUM_P (REGNO) && GET_MODE_SIZE (MODE) > 8)
+
+/* Check TLS Descriptors mechanism is selected. */
+#define TARGET_TLS_DESC (aarch64_tls_dialect == TLS_DESCRIPTORS)
+
+extern enum aarch64_code_model aarch64_cmodel;
+
+/* When using the tiny addressing model conditional and unconditional branches
+ can span the whole of the available address space (1MB). */
+#define HAS_LONG_COND_BRANCH \
+ (aarch64_cmodel == AARCH64_CMODEL_TINY \
+ || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
+
+#define HAS_LONG_UNCOND_BRANCH \
+ (aarch64_cmodel == AARCH64_CMODEL_TINY \
+ || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
+
+/* Modes valid for AdvSIMD Q registers. */
+#define AARCH64_VALID_SIMD_QREG_MODE(MODE) \
+ ((MODE) == V4SImode || (MODE) == V8HImode || (MODE) == V16QImode \
+ || (MODE) == V4SFmode || (MODE) == V2DImode || mode == V2DFmode)
+
+#define ENDIAN_LANE_N(mode, n) \
+ (BYTES_BIG_ENDIAN ? GET_MODE_NUNITS (mode) - 1 - n : n)
+
+#define BIG_LITTLE_SPEC \
+ " %{mcpu=*:-mcpu=%:rewrite_mcpu(%{mcpu=*:%*})}"
+
+extern const char *aarch64_rewrite_mcpu (int argc, const char **argv);
+#define BIG_LITTLE_CPU_SPEC_FUNCTIONS \
+ { "rewrite_mcpu", aarch64_rewrite_mcpu },
+
+#define ASM_CPU_SPEC \
+ BIG_LITTLE_SPEC
+
+#define EXTRA_SPEC_FUNCTIONS BIG_LITTLE_CPU_SPEC_FUNCTIONS
+
+#define EXTRA_SPECS \
+ { "asm_cpu_spec", ASM_CPU_SPEC }
+
+#endif /* GCC_AARCH64_H */
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64.md b/gcc-4.9/gcc/config/aarch64/aarch64.md
new file mode 100644
index 000000000..c86a29d8e
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64.md
@@ -0,0 +1,3642 @@
+;; Machine description for AArch64 architecture.
+;; Copyright (C) 2009-2014 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Register numbers
+(define_constants
+ [
+ (R0_REGNUM 0)
+ (R1_REGNUM 1)
+ (R2_REGNUM 2)
+ (R3_REGNUM 3)
+ (R4_REGNUM 4)
+ (R5_REGNUM 5)
+ (R6_REGNUM 6)
+ (R7_REGNUM 7)
+ (R8_REGNUM 8)
+ (R9_REGNUM 9)
+ (R10_REGNUM 10)
+ (R11_REGNUM 11)
+ (R12_REGNUM 12)
+ (R13_REGNUM 13)
+ (R14_REGNUM 14)
+ (R15_REGNUM 15)
+ (R16_REGNUM 16)
+ (IP0_REGNUM 16)
+ (R17_REGNUM 17)
+ (IP1_REGNUM 17)
+ (R18_REGNUM 18)
+ (R19_REGNUM 19)
+ (R20_REGNUM 20)
+ (R21_REGNUM 21)
+ (R22_REGNUM 22)
+ (R23_REGNUM 23)
+ (R24_REGNUM 24)
+ (R25_REGNUM 25)
+ (R26_REGNUM 26)
+ (R27_REGNUM 27)
+ (R28_REGNUM 28)
+ (R29_REGNUM 29)
+ (R30_REGNUM 30)
+ (LR_REGNUM 30)
+ (SP_REGNUM 31)
+ (V0_REGNUM 32)
+ (V15_REGNUM 47)
+ (V31_REGNUM 63)
+ (SFP_REGNUM 64)
+ (AP_REGNUM 65)
+ (CC_REGNUM 66)
+ ]
+)
+
+(define_c_enum "unspec" [
+ UNSPEC_CASESI
+ UNSPEC_CLS
+ UNSPEC_FRECPE
+ UNSPEC_FRECPS
+ UNSPEC_FRECPX
+ UNSPEC_FRINTA
+ UNSPEC_FRINTI
+ UNSPEC_FRINTM
+ UNSPEC_FRINTN
+ UNSPEC_FRINTP
+ UNSPEC_FRINTX
+ UNSPEC_FRINTZ
+ UNSPEC_GOTSMALLPIC
+ UNSPEC_GOTSMALLTLS
+ UNSPEC_GOTTINYPIC
+ UNSPEC_LD1
+ UNSPEC_LD2
+ UNSPEC_LD3
+ UNSPEC_LD4
+ UNSPEC_MB
+ UNSPEC_NOP
+ UNSPEC_PRLG_STK
+ UNSPEC_RBIT
+ UNSPEC_SISD_NEG
+ UNSPEC_SISD_SSHL
+ UNSPEC_SISD_USHL
+ UNSPEC_SSHL_2S
+ UNSPEC_SSHR64
+ UNSPEC_ST1
+ UNSPEC_ST2
+ UNSPEC_ST3
+ UNSPEC_ST4
+ UNSPEC_TLS
+ UNSPEC_TLSDESC
+ UNSPEC_USHL_2S
+ UNSPEC_USHR64
+ UNSPEC_VSTRUCTDUMMY
+])
+
+(define_c_enum "unspecv" [
+ UNSPECV_EH_RETURN ; Represent EH_RETURN
+ ]
+)
+
+;; If further include files are added the defintion of MD_INCLUDES
+;; must be updated.
+
+(include "constraints.md")
+(include "predicates.md")
+(include "iterators.md")
+
+;; -------------------------------------------------------------------
+;; Instruction types and attributes
+;; -------------------------------------------------------------------
+
+; The "type" attribute is is included here from AArch32 backend to be able
+; to share pipeline descriptions.
+(include "../arm/types.md")
+
+;; Attribute that specifies whether or not the instruction touches fp
+;; registers.
+(define_attr "fp" "no,yes" (const_string "no"))
+
+;; Attribute that specifies whether or not the instruction touches simd
+;; registers.
+(define_attr "simd" "no,yes" (const_string "no"))
+
+(define_attr "length" ""
+ (const_int 4))
+
+;; Attribute that controls whether an alternative is enabled or not.
+;; Currently it is only used to disable alternatives which touch fp or simd
+;; registers when -mgeneral-regs-only is specified.
+(define_attr "enabled" "no,yes"
+ (cond [(ior
+ (and (eq_attr "fp" "yes")
+ (eq (symbol_ref "TARGET_FLOAT") (const_int 0)))
+ (and (eq_attr "simd" "yes")
+ (eq (symbol_ref "TARGET_SIMD") (const_int 0))))
+ (const_string "no")
+ ] (const_string "yes")))
+
+;; -------------------------------------------------------------------
+;; Pipeline descriptions and scheduling
+;; -------------------------------------------------------------------
+
+;; Processor types.
+(include "aarch64-tune.md")
+
+;; True if the generic scheduling description should be used.
+
+(define_attr "generic_sched" "yes,no"
+ (const (if_then_else
+ (eq_attr "tune" "cortexa53,cortexa15")
+ (const_string "no")
+ (const_string "yes"))))
+
+;; Scheduling
+(include "../arm/cortex-a53.md")
+(include "../arm/cortex-a15.md")
+
+;; -------------------------------------------------------------------
+;; Jumps and other miscellaneous insns
+;; -------------------------------------------------------------------
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:DI 0 "register_operand" "r"))]
+ ""
+ "br\\t%0"
+ [(set_attr "type" "branch")]
+)
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "b\\t%l0"
+ [(set_attr "type" "branch")]
+)
+
+(define_expand "cbranch<mode>4"
+ [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
+ [(match_operand:GPI 1 "register_operand" "")
+ (match_operand:GPI 2 "aarch64_plus_operand" "")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "
+ operands[1] = aarch64_gen_compare_reg (GET_CODE (operands[0]), operands[1],
+ operands[2]);
+ operands[2] = const0_rtx;
+ "
+)
+
+(define_expand "cbranch<mode>4"
+ [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
+ [(match_operand:GPF 1 "register_operand" "")
+ (match_operand:GPF 2 "aarch64_reg_or_zero" "")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "
+ operands[1] = aarch64_gen_compare_reg (GET_CODE (operands[0]), operands[1],
+ operands[2]);
+ operands[2] = const0_rtx;
+ "
+)
+
+(define_insn "*condjump"
+ [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "b%m0\\t%l2"
+ [(set_attr "type" "branch")]
+)
+
+(define_expand "casesi"
+ [(match_operand:SI 0 "register_operand" "") ; Index
+ (match_operand:SI 1 "const_int_operand" "") ; Lower bound
+ (match_operand:SI 2 "const_int_operand" "") ; Total range
+ (match_operand:DI 3 "" "") ; Table label
+ (match_operand:DI 4 "" "")] ; Out of range label
+ ""
+ {
+ if (operands[1] != const0_rtx)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ /* Canonical RTL says that if you have:
+
+ (minus (X) (CONST))
+
+ then this should be emitted as:
+
+ (plus (X) (-CONST))
+
+ The use of trunc_int_for_mode ensures that the resulting
+ constant can be represented in SImode, this is important
+ for the corner case where operand[1] is INT_MIN. */
+
+ operands[1] = GEN_INT (trunc_int_for_mode (-INTVAL (operands[1]), SImode));
+
+ if (!(*insn_data[CODE_FOR_addsi3].operand[2].predicate)
+ (operands[1], SImode))
+ operands[1] = force_reg (SImode, operands[1]);
+ emit_insn (gen_addsi3 (reg, operands[0], operands[1]));
+ operands[0] = reg;
+ }
+
+ if (!aarch64_plus_operand (operands[2], SImode))
+ operands[2] = force_reg (SImode, operands[2]);
+ emit_jump_insn (gen_cbranchsi4 (gen_rtx_GTU (SImode, const0_rtx,
+ const0_rtx),
+ operands[0], operands[2], operands[4]));
+
+ operands[2] = force_reg (DImode, gen_rtx_LABEL_REF (VOIDmode, operands[3]));
+ emit_jump_insn (gen_casesi_dispatch (operands[2], operands[0],
+ operands[3]));
+ DONE;
+ }
+)
+
+(define_insn "casesi_dispatch"
+ [(parallel
+ [(set (pc)
+ (mem:DI (unspec [(match_operand:DI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r")]
+ UNSPEC_CASESI)))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:DI 3 "=r"))
+ (clobber (match_scratch:DI 4 "=r"))
+ (use (label_ref (match_operand 2 "" "")))])]
+ ""
+ "*
+ return aarch64_output_casesi (operands);
+ "
+ [(set_attr "length" "16")
+ (set_attr "type" "branch")]
+)
+
+(define_insn "nop"
+ [(unspec[(const_int 0)] UNSPEC_NOP)]
+ ""
+ "nop"
+ [(set_attr "type" "no_insn")]
+)
+
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 8))]
+ ""
+ "brk #1000"
+ [(set_attr "type" "trap")])
+
+(define_expand "prologue"
+ [(clobber (const_int 0))]
+ ""
+ "
+ aarch64_expand_prologue ();
+ DONE;
+ "
+)
+
+(define_expand "epilogue"
+ [(clobber (const_int 0))]
+ ""
+ "
+ aarch64_expand_epilogue (false);
+ DONE;
+ "
+)
+
+(define_expand "sibcall_epilogue"
+ [(clobber (const_int 0))]
+ ""
+ "
+ aarch64_expand_epilogue (true);
+ DONE;
+ "
+)
+
+(define_insn "*do_return"
+ [(return)]
+ ""
+ "ret"
+ [(set_attr "type" "branch")]
+)
+
+(define_insn "eh_return"
+ [(unspec_volatile [(match_operand:DI 0 "register_operand" "r")]
+ UNSPECV_EH_RETURN)]
+ ""
+ "#"
+ [(set_attr "type" "branch")]
+
+)
+
+(define_split
+ [(unspec_volatile [(match_operand:DI 0 "register_operand" "")]
+ UNSPECV_EH_RETURN)]
+ "reload_completed"
+ [(set (match_dup 1) (match_dup 0))]
+ {
+ operands[1] = aarch64_final_eh_return_addr ();
+ }
+)
+
+(define_insn "*cb<optab><mode>1"
+ [(set (pc) (if_then_else (EQL (match_operand:GPI 0 "register_operand" "r")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ "<cbz>\\t%<w>0, %l1"
+ [(set_attr "type" "branch")]
+
+)
+
+(define_insn "*tb<optab><mode>1"
+ [(set (pc) (if_then_else
+ (EQL (zero_extract:DI (match_operand:GPI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand 1 "const_int_operand" "n"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (clobber (match_scratch:DI 3 "=r"))]
+ ""
+ "*
+ if (get_attr_length (insn) == 8)
+ return \"ubfx\\t%<w>3, %<w>0, %1, #1\;<cbz>\\t%<w>3, %l2\";
+ return \"<tbz>\\t%<w>0, %1, %l2\";
+ "
+ [(set_attr "type" "branch")
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 2) (pc)) (const_int -32768))
+ (lt (minus (match_dup 2) (pc)) (const_int 32764)))
+ (const_int 4)
+ (const_int 8)))]
+)
+
+(define_insn "*cb<optab><mode>1"
+ [(set (pc) (if_then_else (LTGE (match_operand:ALLI 0 "register_operand" "r")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (clobber (match_scratch:DI 2 "=r"))]
+ ""
+ "*
+ if (get_attr_length (insn) == 8)
+ return \"ubfx\\t%<w>2, %<w>0, <sizem1>, #1\;<cbz>\\t%<w>2, %l1\";
+ return \"<tbz>\\t%<w>0, <sizem1>, %l1\";
+ "
+ [(set_attr "type" "branch")
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 1) (pc)) (const_int -32768))
+ (lt (minus (match_dup 1) (pc)) (const_int 32764)))
+ (const_int 4)
+ (const_int 8)))]
+)
+
+;; -------------------------------------------------------------------
+;; Subroutine calls and sibcalls
+;; -------------------------------------------------------------------
+
+(define_expand "call"
+ [(parallel [(call (match_operand 0 "memory_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:DI LR_REGNUM))])]
+ ""
+ "
+ {
+ rtx callee;
+
+ /* In an untyped call, we can get NULL for operand 2. */
+ if (operands[2] == NULL)
+ operands[2] = const0_rtx;
+
+ /* Decide if we should generate indirect calls by loading the
+ 64-bit address of the callee into a register before performing
+ the branch-and-link. */
+ callee = XEXP (operands[0], 0);
+ if (GET_CODE (callee) == SYMBOL_REF
+ ? aarch64_is_long_call_p (callee)
+ : !REG_P (callee))
+ XEXP (operands[0], 0) = force_reg (Pmode, callee);
+ }"
+)
+
+(define_insn "*call_reg"
+ [(call (mem:DI (match_operand:DI 0 "register_operand" "r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:DI LR_REGNUM))]
+ ""
+ "blr\\t%0"
+ [(set_attr "type" "call")]
+)
+
+(define_insn "*call_symbol"
+ [(call (mem:DI (match_operand:DI 0 "" ""))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:DI LR_REGNUM))]
+ "GET_CODE (operands[0]) == SYMBOL_REF
+ && !aarch64_is_long_call_p (operands[0])"
+ "bl\\t%a0"
+ [(set_attr "type" "call")]
+)
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "general_operand" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:DI LR_REGNUM))])]
+ ""
+ "
+ {
+ rtx callee;
+
+ /* In an untyped call, we can get NULL for operand 3. */
+ if (operands[3] == NULL)
+ operands[3] = const0_rtx;
+
+ /* Decide if we should generate indirect calls by loading the
+ 64-bit address of the callee into a register before performing
+ the branch-and-link. */
+ callee = XEXP (operands[1], 0);
+ if (GET_CODE (callee) == SYMBOL_REF
+ ? aarch64_is_long_call_p (callee)
+ : !REG_P (callee))
+ XEXP (operands[1], 0) = force_reg (Pmode, callee);
+ }"
+)
+
+(define_insn "*call_value_reg"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "register_operand" "r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:DI LR_REGNUM))]
+ ""
+ "blr\\t%1"
+ [(set_attr "type" "call")]
+
+)
+
+(define_insn "*call_value_symbol"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "" ""))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:DI LR_REGNUM))]
+ "GET_CODE (operands[1]) == SYMBOL_REF
+ && !aarch64_is_long_call_p (operands[1])"
+ "bl\\t%a1"
+ [(set_attr "type" "call")]
+)
+
+(define_expand "sibcall"
+ [(parallel [(call (match_operand 0 "memory_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (return)
+ (use (match_operand 2 "" ""))])]
+ ""
+ {
+ if (operands[2] == NULL_RTX)
+ operands[2] = const0_rtx;
+ }
+)
+
+(define_expand "sibcall_value"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "general_operand" "")))
+ (return)
+ (use (match_operand 3 "" ""))])]
+ ""
+ {
+ if (operands[3] == NULL_RTX)
+ operands[3] = const0_rtx;
+ }
+)
+
+(define_insn "*sibcall_insn"
+ [(call (mem:DI (match_operand:DI 0 "" "X"))
+ (match_operand 1 "" ""))
+ (return)
+ (use (match_operand 2 "" ""))]
+ "GET_CODE (operands[0]) == SYMBOL_REF"
+ "b\\t%a0"
+ [(set_attr "type" "branch")]
+
+)
+
+(define_insn "*sibcall_value_insn"
+ [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand 1 "" "X"))
+ (match_operand 2 "" "")))
+ (return)
+ (use (match_operand 3 "" ""))]
+ "GET_CODE (operands[1]) == SYMBOL_REF"
+ "b\\t%a1"
+ [(set_attr "type" "branch")]
+)
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "")
+ (const_int 0))
+ (match_operand 1 "")
+ (match_operand 2 "")])]
+ ""
+{
+ int i;
+
+ emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+ DONE;
+})
+
+;; -------------------------------------------------------------------
+;; Moves
+;; -------------------------------------------------------------------
+
+(define_expand "mov<mode>"
+ [(set (match_operand:SHORT 0 "nonimmediate_operand" "")
+ (match_operand:SHORT 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+ "
+)
+
+(define_insn "*mov<mode>_aarch64"
+ [(set (match_operand:SHORT 0 "nonimmediate_operand" "=r,r, *w,r,*w, m, m, r,*w,*w")
+ (match_operand:SHORT 1 "general_operand" " r,M,D<hq>,m, m,rZ,*w,*w, r,*w"))]
+ "(register_operand (operands[0], <MODE>mode)
+ || aarch64_reg_or_zero (operands[1], <MODE>mode))"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return "mov\t%w0, %w1";
+ case 1:
+ return "mov\t%w0, %1";
+ case 2:
+ return aarch64_output_scalar_simd_mov_immediate (operands[1],
+ <MODE>mode);
+ case 3:
+ return "ldr<size>\t%w0, %1";
+ case 4:
+ return "ldr\t%<size>0, %1";
+ case 5:
+ return "str<size>\t%w1, %0";
+ case 6:
+ return "str\t%<size>1, %0";
+ case 7:
+ return "umov\t%w0, %1.<v>[0]";
+ case 8:
+ return "dup\t%0.<Vallxd>, %w1";
+ case 9:
+ return "dup\t%<Vetype>0, %1.<v>[0]";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "mov_reg,mov_imm,mov_imm,load1,load1,store1,store1,\
+ neon_from_gp<q>,neon_from_gp<q>, neon_dup")
+ (set_attr "simd" "*,*,yes,*,*,*,*,yes,yes,yes")]
+)
+
+(define_expand "mov<mode>"
+ [(set (match_operand:GPI 0 "nonimmediate_operand" "")
+ (match_operand:GPI 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+
+ if (CONSTANT_P (operands[1]))
+ {
+ aarch64_expand_mov_immediate (operands[0], operands[1]);
+ DONE;
+ }
+ "
+)
+
+(define_insn "*movsi_aarch64"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,k,r,r,r,*w,m, m,r,r ,*w, r,*w")
+ (match_operand:SI 1 "aarch64_mov_operand" " r,r,k,M,m, m,rZ,*w,S,Ush,rZ,*w,*w"))]
+ "(register_operand (operands[0], SImode)
+ || aarch64_reg_or_zero (operands[1], SImode))"
+ "@
+ mov\\t%w0, %w1
+ mov\\t%w0, %w1
+ mov\\t%w0, %w1
+ mov\\t%w0, %1
+ ldr\\t%w0, %1
+ ldr\\t%s0, %1
+ str\\t%w1, %0
+ str\\t%s1, %0
+ adr\\t%x0, %a1
+ adrp\\t%x0, %A1
+ fmov\\t%s0, %w1
+ fmov\\t%w0, %s1
+ fmov\\t%s0, %s1"
+ [(set_attr "type" "mov_reg,mov_reg,mov_reg,mov_imm,load1,load1,store1,store1,\
+ adr,adr,fmov,fmov,fmov")
+ (set_attr "fp" "*,*,*,*,*,yes,*,yes,*,*,yes,yes,yes")]
+)
+
+(define_insn "*movdi_aarch64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,k,r,r,r,*w,m, m,r,r, *w, r,*w,w")
+ (match_operand:DI 1 "aarch64_mov_operand" " r,r,k,N,m, m,rZ,*w,S,Ush,rZ,*w,*w,Dd"))]
+ "(register_operand (operands[0], DImode)
+ || aarch64_reg_or_zero (operands[1], DImode))"
+ "@
+ mov\\t%x0, %x1
+ mov\\t%0, %x1
+ mov\\t%x0, %1
+ mov\\t%x0, %1
+ ldr\\t%x0, %1
+ ldr\\t%d0, %1
+ str\\t%x1, %0
+ str\\t%d1, %0
+ adr\\t%x0, %a1
+ adrp\\t%x0, %A1
+ fmov\\t%d0, %x1
+ fmov\\t%x0, %d1
+ fmov\\t%d0, %d1
+ movi\\t%d0, %1"
+ [(set_attr "type" "mov_reg,mov_reg,mov_reg,mov_imm,load1,load1,store1,store1,\
+ adr,adr,fmov,fmov,fmov,fmov")
+ (set_attr "fp" "*,*,*,*,*,yes,*,yes,*,*,yes,yes,yes,*")
+ (set_attr "simd" "*,*,*,*,*,*,*,*,*,*,*,*,*,yes")]
+)
+
+(define_insn "insv_imm<mode>"
+ [(set (zero_extract:GPI (match_operand:GPI 0 "register_operand" "+r")
+ (const_int 16)
+ (match_operand:GPI 1 "const_int_operand" "n"))
+ (match_operand:GPI 2 "const_int_operand" "n"))]
+ "UINTVAL (operands[1]) < GET_MODE_BITSIZE (<MODE>mode)
+ && UINTVAL (operands[1]) % 16 == 0"
+ "movk\\t%<w>0, %X2, lsl %1"
+ [(set_attr "type" "mov_imm")]
+)
+
+(define_expand "movti"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (match_operand:TI 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
+ operands[1] = force_reg (TImode, operands[1]);
+ "
+)
+
+(define_insn "*movti_aarch64"
+ [(set (match_operand:TI 0
+ "nonimmediate_operand" "=r, *w,r ,*w,r ,Ump,Ump,*w,m")
+ (match_operand:TI 1
+ "aarch64_movti_operand" " rn,r ,*w,*w,Ump,r ,Z , m,*w"))]
+ "(register_operand (operands[0], TImode)
+ || aarch64_reg_or_zero (operands[1], TImode))"
+ "@
+ #
+ #
+ #
+ orr\\t%0.16b, %1.16b, %1.16b
+ ldp\\t%0, %H0, %1
+ stp\\t%1, %H1, %0
+ stp\\txzr, xzr, %0
+ ldr\\t%q0, %1
+ str\\t%q1, %0"
+ [(set_attr "type" "multiple,f_mcr,f_mrc,neon_logic_q, \
+ load2,store2,store2,f_loadd,f_stored")
+ (set_attr "length" "8,8,8,4,4,4,4,4,4")
+ (set_attr "simd" "*,*,*,yes,*,*,*,*,*")
+ (set_attr "fp" "*,*,*,*,*,*,*,yes,yes")]
+)
+
+;; Split a TImode register-register or register-immediate move into
+;; its component DImode pieces, taking care to handle overlapping
+;; source and dest registers.
+(define_split
+ [(set (match_operand:TI 0 "register_operand" "")
+ (match_operand:TI 1 "aarch64_reg_or_imm" ""))]
+ "reload_completed && aarch64_split_128bit_move_p (operands[0], operands[1])"
+ [(const_int 0)]
+{
+ aarch64_split_128bit_move (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "mov<mode>"
+ [(set (match_operand:GPF 0 "nonimmediate_operand" "")
+ (match_operand:GPF 1 "general_operand" ""))]
+ ""
+ "
+ if (!TARGET_FLOAT)
+ {
+ sorry (\"%qs and floating point code\", \"-mgeneral-regs-only\");
+ FAIL;
+ }
+
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+ "
+)
+
+(define_insn "*movsf_aarch64"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=w, ?r,w,w ,w,m,r,m ,r")
+ (match_operand:SF 1 "general_operand" "?rY, w,w,Ufc,m,w,m,rY,r"))]
+ "TARGET_FLOAT && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode))"
+ "@
+ fmov\\t%s0, %w1
+ fmov\\t%w0, %s1
+ fmov\\t%s0, %s1
+ fmov\\t%s0, %1
+ ldr\\t%s0, %1
+ str\\t%s1, %0
+ ldr\\t%w0, %1
+ str\\t%w1, %0
+ mov\\t%w0, %w1"
+ [(set_attr "type" "f_mcr,f_mrc,fmov,fconsts,\
+ f_loads,f_stores,f_loads,f_stores,fmov")]
+)
+
+(define_insn "*movdf_aarch64"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=w, ?r,w,w ,w,m,r,m ,r")
+ (match_operand:DF 1 "general_operand" "?rY, w,w,Ufc,m,w,m,rY,r"))]
+ "TARGET_FLOAT && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "@
+ fmov\\t%d0, %x1
+ fmov\\t%x0, %d1
+ fmov\\t%d0, %d1
+ fmov\\t%d0, %1
+ ldr\\t%d0, %1
+ str\\t%d1, %0
+ ldr\\t%x0, %1
+ str\\t%x1, %0
+ mov\\t%x0, %x1"
+ [(set_attr "type" "f_mcr,f_mrc,fmov,fconstd,\
+ f_loadd,f_stored,f_loadd,f_stored,mov_reg")]
+)
+
+(define_expand "movtf"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (match_operand:TF 1 "general_operand" ""))]
+ ""
+ "
+ if (!TARGET_FLOAT)
+ {
+ sorry (\"%qs and floating point code\", \"-mgeneral-regs-only\");
+ FAIL;
+ }
+
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (TFmode, operands[1]);
+ "
+)
+
+(define_insn "*movtf_aarch64"
+ [(set (match_operand:TF 0
+ "nonimmediate_operand" "=w,?&r,w ,?r,w,?w,w,m,?r ,Ump")
+ (match_operand:TF 1
+ "general_operand" " w,?r, ?r,w ,Y,Y ,m,w,Ump,?rY"))]
+ "TARGET_FLOAT && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode))"
+ "@
+ orr\\t%0.16b, %1.16b, %1.16b
+ #
+ #
+ #
+ movi\\t%0.2d, #0
+ fmov\\t%s0, wzr
+ ldr\\t%q0, %1
+ str\\t%q1, %0
+ ldp\\t%0, %H0, %1
+ stp\\t%1, %H1, %0"
+ [(set_attr "type" "logic_reg,multiple,f_mcr,f_mrc,fconstd,fconstd,\
+ f_loadd,f_stored,neon_load1_2reg,neon_store1_2reg")
+ (set_attr "length" "4,8,8,8,4,4,4,4,4,4")
+ (set_attr "fp" "*,*,yes,yes,*,yes,yes,yes,*,*")
+ (set_attr "simd" "yes,*,*,*,yes,*,*,*,*,*")]
+)
+
+(define_split
+ [(set (match_operand:TF 0 "register_operand" "")
+ (match_operand:TF 1 "aarch64_reg_or_imm" ""))]
+ "reload_completed && aarch64_split_128bit_move_p (operands[0], operands[1])"
+ [(const_int 0)]
+ {
+ aarch64_split_128bit_move (operands[0], operands[1]);
+ DONE;
+ }
+)
+
+;; Operands 1 and 3 are tied together by the final condition; so we allow
+;; fairly lax checking on the second memory operation.
+(define_insn "load_pair<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (match_operand:GPI 1 "aarch64_mem_pair_operand" "Ump"))
+ (set (match_operand:GPI 2 "register_operand" "=r")
+ (match_operand:GPI 3 "memory_operand" "m"))]
+ "rtx_equal_p (XEXP (operands[3], 0),
+ plus_constant (Pmode,
+ XEXP (operands[1], 0),
+ GET_MODE_SIZE (<MODE>mode)))"
+ "ldp\\t%<w>0, %<w>2, %1"
+ [(set_attr "type" "load2")]
+)
+
+;; Operands 0 and 2 are tied together by the final condition; so we allow
+;; fairly lax checking on the second memory operation.
+(define_insn "store_pair<mode>"
+ [(set (match_operand:GPI 0 "aarch64_mem_pair_operand" "=Ump")
+ (match_operand:GPI 1 "register_operand" "r"))
+ (set (match_operand:GPI 2 "memory_operand" "=m")
+ (match_operand:GPI 3 "register_operand" "r"))]
+ "rtx_equal_p (XEXP (operands[2], 0),
+ plus_constant (Pmode,
+ XEXP (operands[0], 0),
+ GET_MODE_SIZE (<MODE>mode)))"
+ "stp\\t%<w>1, %<w>3, %0"
+ [(set_attr "type" "store2")]
+)
+
+;; Operands 1 and 3 are tied together by the final condition; so we allow
+;; fairly lax checking on the second memory operation.
+(define_insn "load_pair<mode>"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (match_operand:GPF 1 "aarch64_mem_pair_operand" "Ump"))
+ (set (match_operand:GPF 2 "register_operand" "=w")
+ (match_operand:GPF 3 "memory_operand" "m"))]
+ "rtx_equal_p (XEXP (operands[3], 0),
+ plus_constant (Pmode,
+ XEXP (operands[1], 0),
+ GET_MODE_SIZE (<MODE>mode)))"
+ "ldp\\t%<w>0, %<w>2, %1"
+ [(set_attr "type" "neon_load1_2reg<q>")]
+)
+
+;; Operands 0 and 2 are tied together by the final condition; so we allow
+;; fairly lax checking on the second memory operation.
+(define_insn "store_pair<mode>"
+ [(set (match_operand:GPF 0 "aarch64_mem_pair_operand" "=Ump")
+ (match_operand:GPF 1 "register_operand" "w"))
+ (set (match_operand:GPF 2 "memory_operand" "=m")
+ (match_operand:GPF 3 "register_operand" "w"))]
+ "rtx_equal_p (XEXP (operands[2], 0),
+ plus_constant (Pmode,
+ XEXP (operands[0], 0),
+ GET_MODE_SIZE (<MODE>mode)))"
+ "stp\\t%<w>1, %<w>3, %0"
+ [(set_attr "type" "neon_store1_2reg<q>")]
+)
+
+;; Load pair with writeback. This is primarily used in function epilogues
+;; when restoring [fp,lr]
+(define_insn "loadwb_pair<GPI:mode>_<P:mode>"
+ [(parallel
+ [(set (match_operand:P 0 "register_operand" "=k")
+ (plus:P (match_operand:P 1 "register_operand" "0")
+ (match_operand:P 4 "const_int_operand" "n")))
+ (set (match_operand:GPI 2 "register_operand" "=r")
+ (mem:GPI (plus:P (match_dup 1)
+ (match_dup 4))))
+ (set (match_operand:GPI 3 "register_operand" "=r")
+ (mem:GPI (plus:P (match_dup 1)
+ (match_operand:P 5 "const_int_operand" "n"))))])]
+ "INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPI:MODE>mode)"
+ "ldp\\t%<w>2, %<w>3, [%1], %4"
+ [(set_attr "type" "load2")]
+)
+
+;; Store pair with writeback. This is primarily used in function prologues
+;; when saving [fp,lr]
+(define_insn "storewb_pair<GPI:mode>_<P:mode>"
+ [(parallel
+ [(set (match_operand:P 0 "register_operand" "=&k")
+ (plus:P (match_operand:P 1 "register_operand" "0")
+ (match_operand:P 4 "const_int_operand" "n")))
+ (set (mem:GPI (plus:P (match_dup 0)
+ (match_dup 4)))
+ (match_operand:GPI 2 "register_operand" "r"))
+ (set (mem:GPI (plus:P (match_dup 0)
+ (match_operand:P 5 "const_int_operand" "n")))
+ (match_operand:GPI 3 "register_operand" "r"))])]
+ "INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPI:MODE>mode)"
+ "stp\\t%<w>2, %<w>3, [%0, %4]!"
+ [(set_attr "type" "store2")]
+)
+
+;; -------------------------------------------------------------------
+;; Sign/Zero extension
+;; -------------------------------------------------------------------
+
+(define_expand "<optab>sidi2"
+ [(set (match_operand:DI 0 "register_operand")
+ (ANY_EXTEND:DI (match_operand:SI 1 "nonimmediate_operand")))]
+ ""
+)
+
+(define_insn "*extendsidi2_aarch64"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ sxtw\t%0, %w1
+ ldrsw\t%0, %1"
+ [(set_attr "type" "extend,load1")]
+)
+
+(define_insn "*zero_extendsidi2_aarch64"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ uxtw\t%0, %w1
+ ldr\t%w0, %1"
+ [(set_attr "type" "extend,load1")]
+)
+
+(define_expand "<ANY_EXTEND:optab><SHORT:mode><GPI:mode>2"
+ [(set (match_operand:GPI 0 "register_operand")
+ (ANY_EXTEND:GPI (match_operand:SHORT 1 "nonimmediate_operand")))]
+ ""
+)
+
+(define_insn "*extend<SHORT:mode><GPI:mode>2_aarch64"
+ [(set (match_operand:GPI 0 "register_operand" "=r,r")
+ (sign_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ sxt<SHORT:size>\t%<GPI:w>0, %w1
+ ldrs<SHORT:size>\t%<GPI:w>0, %1"
+ [(set_attr "type" "extend,load1")]
+)
+
+(define_insn "*zero_extend<SHORT:mode><GPI:mode>2_aarch64"
+ [(set (match_operand:GPI 0 "register_operand" "=r,r,*w")
+ (zero_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m,m")))]
+ ""
+ "@
+ uxt<SHORT:size>\t%<GPI:w>0, %w1
+ ldr<SHORT:size>\t%w0, %1
+ ldr\t%<SHORT:size>0, %1"
+ [(set_attr "type" "extend,load1,load1")]
+)
+
+(define_expand "<optab>qihi2"
+ [(set (match_operand:HI 0 "register_operand")
+ (ANY_EXTEND:HI (match_operand:QI 1 "nonimmediate_operand")))]
+ ""
+)
+
+(define_insn "*<optab>qihi2_aarch64"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (ANY_EXTEND:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ <su>xtb\t%w0, %w1
+ <ldrxt>b\t%w0, %1"
+ [(set_attr "type" "extend,load1")]
+)
+
+;; -------------------------------------------------------------------
+;; Simple arithmetic
+;; -------------------------------------------------------------------
+
+(define_expand "add<mode>3"
+ [(set
+ (match_operand:GPI 0 "register_operand" "")
+ (plus:GPI (match_operand:GPI 1 "register_operand" "")
+ (match_operand:GPI 2 "aarch64_pluslong_operand" "")))]
+ ""
+ "
+ if (! aarch64_plus_operand (operands[2], VOIDmode))
+ {
+ rtx subtarget = ((optimize && can_create_pseudo_p ())
+ ? gen_reg_rtx (<MODE>mode) : operands[0]);
+ HOST_WIDE_INT imm = INTVAL (operands[2]);
+
+ if (imm < 0)
+ imm = -(-imm & ~0xfff);
+ else
+ imm &= ~0xfff;
+
+ emit_insn (gen_add<mode>3 (subtarget, operands[1], GEN_INT (imm)));
+ operands[1] = subtarget;
+ operands[2] = GEN_INT (INTVAL (operands[2]) - imm);
+ }
+ "
+)
+
+(define_insn "*addsi3_aarch64"
+ [(set
+ (match_operand:SI 0 "register_operand" "=rk,rk,rk")
+ (plus:SI
+ (match_operand:SI 1 "register_operand" "%rk,rk,rk")
+ (match_operand:SI 2 "aarch64_plus_operand" "I,r,J")))]
+ ""
+ "@
+ add\\t%w0, %w1, %2
+ add\\t%w0, %w1, %w2
+ sub\\t%w0, %w1, #%n2"
+ [(set_attr "type" "alu_imm,alu_reg,alu_imm")]
+)
+
+;; zero_extend version of above
+(define_insn "*addsi3_aarch64_uxtw"
+ [(set
+ (match_operand:DI 0 "register_operand" "=rk,rk,rk")
+ (zero_extend:DI
+ (plus:SI (match_operand:SI 1 "register_operand" "%rk,rk,rk")
+ (match_operand:SI 2 "aarch64_plus_operand" "I,r,J"))))]
+ ""
+ "@
+ add\\t%w0, %w1, %2
+ add\\t%w0, %w1, %w2
+ sub\\t%w0, %w1, #%n2"
+ [(set_attr "type" "alu_imm,alu_reg,alu_imm")]
+)
+
+(define_insn "*adddi3_aarch64"
+ [(set
+ (match_operand:DI 0 "register_operand" "=rk,rk,rk,!w")
+ (plus:DI
+ (match_operand:DI 1 "register_operand" "%rk,rk,rk,!w")
+ (match_operand:DI 2 "aarch64_plus_operand" "I,r,J,!w")))]
+ ""
+ "@
+ add\\t%x0, %x1, %2
+ add\\t%x0, %x1, %x2
+ sub\\t%x0, %x1, #%n2
+ add\\t%d0, %d1, %d2"
+ [(set_attr "type" "alu_imm,alu_reg,alu_imm,alu_reg")
+ (set_attr "simd" "*,*,*,yes")]
+)
+
+(define_insn "*add<mode>3_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (plus:GPI (match_operand:GPI 1 "register_operand" "%r,r,r")
+ (match_operand:GPI 2 "aarch64_plus_operand" "r,I,J"))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r,r,r")
+ (plus:GPI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ adds\\t%<w>0, %<w>1, %<w>2
+ adds\\t%<w>0, %<w>1, %<w>2
+ subs\\t%<w>0, %<w>1, #%n2"
+ [(set_attr "type" "alus_reg,alus_imm,alus_imm")]
+)
+
+;; zero_extend version of above
+(define_insn "*addsi3_compare0_uxtw"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (plus:SI (match_operand:SI 1 "register_operand" "%r,r,r")
+ (match_operand:SI 2 "aarch64_plus_operand" "r,I,J"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r,r,r")
+ (zero_extend:DI (plus:SI (match_dup 1) (match_dup 2))))]
+ ""
+ "@
+ adds\\t%w0, %w1, %w2
+ adds\\t%w0, %w1, %w2
+ subs\\t%w0, %w1, #%n2"
+ [(set_attr "type" "alus_reg,alus_imm,alus_imm")]
+)
+
+(define_insn "*adds_mul_imm_<mode>"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (plus:GPI (mult:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))
+ (match_operand:GPI 3 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (mult:GPI (match_dup 1) (match_dup 2))
+ (match_dup 3)))]
+ ""
+ "adds\\t%<w>0, %<w>3, %<w>1, lsl %p2"
+ [(set_attr "type" "alus_shift_imm")]
+)
+
+(define_insn "*subs_mul_imm_<mode>"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (minus:GPI (match_operand:GPI 1 "register_operand" "r")
+ (mult:GPI
+ (match_operand:GPI 2 "register_operand" "r")
+ (match_operand:QI 3 "aarch64_pwr_2_<mode>" "n")))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI (match_dup 1)
+ (mult:GPI (match_dup 2) (match_dup 3))))]
+ ""
+ "subs\\t%<w>0, %<w>1, %<w>2, lsl %p3"
+ [(set_attr "type" "alus_shift_imm")]
+)
+
+(define_insn "*adds_<optab><ALLX:mode>_<GPI:mode>"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (plus:GPI
+ (ANY_EXTEND:GPI (match_operand:ALLX 1 "register_operand" "r"))
+ (match_operand:GPI 2 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (ANY_EXTEND:GPI (match_dup 1)) (match_dup 2)))]
+ ""
+ "adds\\t%<GPI:w>0, %<GPI:w>2, %<GPI:w>1, <su>xt<ALLX:size>"
+ [(set_attr "type" "alus_ext")]
+)
+
+(define_insn "*subs_<optab><ALLX:mode>_<GPI:mode>"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (minus:GPI (match_operand:GPI 1 "register_operand" "r")
+ (ANY_EXTEND:GPI
+ (match_operand:ALLX 2 "register_operand" "r")))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI (match_dup 1) (ANY_EXTEND:GPI (match_dup 2))))]
+ ""
+ "subs\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size>"
+ [(set_attr "type" "alus_ext")]
+)
+
+(define_insn "*adds_<optab><mode>_multp2"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (plus:GPI (ANY_EXTRACT:GPI
+ (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand 3 "const_int_operand" "n")
+ (const_int 0))
+ (match_operand:GPI 4 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (ANY_EXTRACT:GPI (mult:GPI (match_dup 1) (match_dup 2))
+ (match_dup 3)
+ (const_int 0))
+ (match_dup 4)))]
+ "aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])"
+ "adds\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2"
+ [(set_attr "type" "alus_ext")]
+)
+
+(define_insn "*subs_<optab><mode>_multp2"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (minus:GPI (match_operand:GPI 4 "register_operand" "r")
+ (ANY_EXTRACT:GPI
+ (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand 3 "const_int_operand" "n")
+ (const_int 0)))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI (match_dup 4) (ANY_EXTRACT:GPI
+ (mult:GPI (match_dup 1) (match_dup 2))
+ (match_dup 3)
+ (const_int 0))))]
+ "aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])"
+ "subs\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2"
+ [(set_attr "type" "alus_ext")]
+)
+
+(define_insn "*add<mode>3nr_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (plus:GPI (match_operand:GPI 0 "register_operand" "%r,r,r")
+ (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J"))
+ (const_int 0)))]
+ ""
+ "@
+ cmn\\t%<w>0, %<w>1
+ cmn\\t%<w>0, %<w>1
+ cmp\\t%<w>0, #%n1"
+ [(set_attr "type" "alus_reg,alus_imm,alus_imm")]
+)
+
+(define_insn "*compare_neg<mode>"
+ [(set (reg:CC_Z CC_REGNUM)
+ (compare:CC_Z
+ (neg:GPI (match_operand:GPI 0 "register_operand" "r"))
+ (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "cmn\\t%<w>1, %<w>0"
+ [(set_attr "type" "alus_reg")]
+)
+
+(define_insn "*add_<shift>_<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (ASHIFT:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))
+ (match_operand:GPI 3 "register_operand" "r")))]
+ ""
+ "add\\t%<w>0, %<w>3, %<w>1, <shift> %2"
+ [(set_attr "type" "alu_shift_imm")]
+)
+
+;; zero_extend version of above
+(define_insn "*add_<shift>_si_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (plus:SI (ASHIFT:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_si" "n"))
+ (match_operand:SI 3 "register_operand" "r"))))]
+ ""
+ "add\\t%w0, %w3, %w1, <shift> %2"
+ [(set_attr "type" "alu_shift_imm")]
+)
+
+(define_insn "*add_mul_imm_<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))
+ (match_operand:GPI 3 "register_operand" "r")))]
+ ""
+ "add\\t%<w>0, %<w>3, %<w>1, lsl %p2"
+ [(set_attr "type" "alu_shift_imm")]
+)
+
+(define_insn "*add_<optab><ALLX:mode>_<GPI:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (plus:GPI (ANY_EXTEND:GPI (match_operand:ALLX 1 "register_operand" "r"))
+ (match_operand:GPI 2 "register_operand" "r")))]
+ ""
+ "add\\t%<GPI:w>0, %<GPI:w>2, %<GPI:w>1, <su>xt<ALLX:size>"
+ [(set_attr "type" "alu_ext")]
+)
+
+;; zero_extend version of above
+(define_insn "*add_<optab><SHORT:mode>_si_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=rk")
+ (zero_extend:DI
+ (plus:SI (ANY_EXTEND:SI (match_operand:SHORT 1 "register_operand" "r"))
+ (match_operand:GPI 2 "register_operand" "r"))))]
+ ""
+ "add\\t%w0, %w2, %w1, <su>xt<SHORT:size>"
+ [(set_attr "type" "alu_ext")]
+)
+
+(define_insn "*add_<optab><ALLX:mode>_shft_<GPI:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (plus:GPI (ashift:GPI (ANY_EXTEND:GPI
+ (match_operand:ALLX 1 "register_operand" "r"))
+ (match_operand 2 "aarch64_imm3" "Ui3"))
+ (match_operand:GPI 3 "register_operand" "r")))]
+ ""
+ "add\\t%<GPI:w>0, %<GPI:w>3, %<GPI:w>1, <su>xt<ALLX:size> %2"
+ [(set_attr "type" "alu_ext")]
+)
+
+;; zero_extend version of above
+(define_insn "*add_<optab><SHORT:mode>_shft_si_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=rk")
+ (zero_extend:DI
+ (plus:SI (ashift:SI (ANY_EXTEND:SI
+ (match_operand:SHORT 1 "register_operand" "r"))
+ (match_operand 2 "aarch64_imm3" "Ui3"))
+ (match_operand:SI 3 "register_operand" "r"))))]
+ ""
+ "add\\t%w0, %w3, %w1, <su>xt<SHORT:size> %2"
+ [(set_attr "type" "alu_ext")]
+)
+
+(define_insn "*add_<optab><ALLX:mode>_mult_<GPI:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (plus:GPI (mult:GPI (ANY_EXTEND:GPI
+ (match_operand:ALLX 1 "register_operand" "r"))
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand:GPI 3 "register_operand" "r")))]
+ ""
+ "add\\t%<GPI:w>0, %<GPI:w>3, %<GPI:w>1, <su>xt<ALLX:size> %p2"
+ [(set_attr "type" "alu_ext")]
+)
+
+;; zero_extend version of above
+(define_insn "*add_<optab><SHORT:mode>_mult_si_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=rk")
+ (zero_extend:DI (plus:SI (mult:SI (ANY_EXTEND:SI
+ (match_operand:SHORT 1 "register_operand" "r"))
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand:SI 3 "register_operand" "r"))))]
+ ""
+ "add\\t%w0, %w3, %w1, <su>xt<SHORT:size> %p2"
+ [(set_attr "type" "alu_ext")]
+)
+
+(define_insn "*add_<optab><mode>_multp2"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (plus:GPI (ANY_EXTRACT:GPI
+ (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand 3 "const_int_operand" "n")
+ (const_int 0))
+ (match_operand:GPI 4 "register_operand" "r")))]
+ "aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])"
+ "add\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2"
+ [(set_attr "type" "alu_ext")]
+)
+
+;; zero_extend version of above
+(define_insn "*add_<optab>si_multp2_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=rk")
+ (zero_extend:DI
+ (plus:SI (ANY_EXTRACT:SI
+ (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand 3 "const_int_operand" "n")
+ (const_int 0))
+ (match_operand:SI 4 "register_operand" "r"))))]
+ "aarch64_is_extend_from_extract (SImode, operands[2], operands[3])"
+ "add\\t%w0, %w4, %w1, <su>xt%e3 %p2"
+ [(set_attr "type" "alu_ext")]
+)
+
+(define_insn "*add<mode>3_carryin"
+ [(set
+ (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (geu:GPI (reg:CC CC_REGNUM) (const_int 0))
+ (plus:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "register_operand" "r"))))]
+ ""
+ "adc\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "adc_reg")]
+)
+
+;; zero_extend version of above
+(define_insn "*addsi3_carryin_uxtw"
+ [(set
+ (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (plus:SI (geu:SI (reg:CC CC_REGNUM) (const_int 0))
+ (plus:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))))]
+ ""
+ "adc\\t%w0, %w1, %w2"
+ [(set_attr "type" "adc_reg")]
+)
+
+(define_insn "*add<mode>3_carryin_alt1"
+ [(set
+ (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (plus:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "register_operand" "r"))
+ (geu:GPI (reg:CC CC_REGNUM) (const_int 0))))]
+ ""
+ "adc\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "adc_reg")]
+)
+
+;; zero_extend version of above
+(define_insn "*addsi3_carryin_alt1_uxtw"
+ [(set
+ (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (plus:SI (plus:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r"))
+ (geu:SI (reg:CC CC_REGNUM) (const_int 0)))))]
+ ""
+ "adc\\t%w0, %w1, %w2"
+ [(set_attr "type" "adc_reg")]
+)
+
+(define_insn "*add<mode>3_carryin_alt2"
+ [(set
+ (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (plus:GPI
+ (geu:GPI (reg:CC CC_REGNUM) (const_int 0))
+ (match_operand:GPI 1 "register_operand" "r"))
+ (match_operand:GPI 2 "register_operand" "r")))]
+ ""
+ "adc\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "adc_reg")]
+)
+
+;; zero_extend version of above
+(define_insn "*addsi3_carryin_alt2_uxtw"
+ [(set
+ (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (plus:SI (plus:SI
+ (geu:SI (reg:CC CC_REGNUM) (const_int 0))
+ (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "register_operand" "r"))))]
+ ""
+ "adc\\t%w0, %w1, %w2"
+ [(set_attr "type" "adc_reg")]
+)
+
+(define_insn "*add<mode>3_carryin_alt3"
+ [(set
+ (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (plus:GPI
+ (geu:GPI (reg:CC CC_REGNUM) (const_int 0))
+ (match_operand:GPI 2 "register_operand" "r"))
+ (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "adc\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "adc_reg")]
+)
+
+;; zero_extend version of above
+(define_insn "*addsi3_carryin_alt3_uxtw"
+ [(set
+ (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (plus:SI (plus:SI
+ (geu:SI (reg:CC CC_REGNUM) (const_int 0))
+ (match_operand:SI 2 "register_operand" "r"))
+ (match_operand:SI 1 "register_operand" "r"))))]
+ ""
+ "adc\\t%w0, %w1, %w2"
+ [(set_attr "type" "adc_reg")]
+)
+
+(define_insn "*add_uxt<mode>_multp2"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (plus:GPI (and:GPI
+ (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand 3 "const_int_operand" "n"))
+ (match_operand:GPI 4 "register_operand" "r")))]
+ "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), INTVAL (operands[3])) != 0"
+ "*
+ operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
+ INTVAL (operands[3])));
+ return \"add\t%<w>0, %<w>4, %<w>1, uxt%e3 %p2\";"
+ [(set_attr "type" "alu_ext")]
+)
+
+;; zero_extend version of above
+(define_insn "*add_uxtsi_multp2_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=rk")
+ (zero_extend:DI
+ (plus:SI (and:SI
+ (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand 3 "const_int_operand" "n"))
+ (match_operand:SI 4 "register_operand" "r"))))]
+ "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), INTVAL (operands[3])) != 0"
+ "*
+ operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
+ INTVAL (operands[3])));
+ return \"add\t%w0, %w4, %w1, uxt%e3 %p2\";"
+ [(set_attr "type" "alu_ext")]
+)
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=rk")
+ (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "sub\\t%w0, %w1, %w2"
+ [(set_attr "type" "alu_reg")]
+)
+
+;; zero_extend version of above
+(define_insn "*subsi3_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=rk")
+ (zero_extend:DI
+ (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r"))))]
+ ""
+ "sub\\t%w0, %w1, %w2"
+ [(set_attr "type" "alu_reg")]
+)
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=rk,!w")
+ (minus:DI (match_operand:DI 1 "register_operand" "r,!w")
+ (match_operand:DI 2 "register_operand" "r,!w")))]
+ ""
+ "@
+ sub\\t%x0, %x1, %x2
+ sub\\t%d0, %d1, %d2"
+ [(set_attr "type" "alu_reg, neon_sub")
+ (set_attr "simd" "*,yes")]
+)
+
+
+(define_insn "*sub<mode>3_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ (minus:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI (match_dup 1) (match_dup 2)))]
+ ""
+ "subs\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "alus_reg")]
+)
+
+;; zero_extend version of above
+(define_insn "*subsi3_compare0_uxtw"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (minus:SI (match_dup 1) (match_dup 2))))]
+ ""
+ "subs\\t%w0, %w1, %w2"
+ [(set_attr "type" "alus_reg")]
+)
+
+(define_insn "*sub_<shift>_<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI (match_operand:GPI 3 "register_operand" "r")
+ (ASHIFT:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
+ ""
+ "sub\\t%<w>0, %<w>3, %<w>1, <shift> %2"
+ [(set_attr "type" "alu_shift_imm")]
+)
+
+;; zero_extend version of above
+(define_insn "*sub_<shift>_si_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (minus:SI (match_operand:SI 3 "register_operand" "r")
+ (ASHIFT:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_si" "n")))))]
+ ""
+ "sub\\t%w0, %w3, %w1, <shift> %2"
+ [(set_attr "type" "alu_shift_imm")]
+)
+
+(define_insn "*sub_mul_imm_<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI (match_operand:GPI 3 "register_operand" "r")
+ (mult:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))))]
+ ""
+ "sub\\t%<w>0, %<w>3, %<w>1, lsl %p2"
+ [(set_attr "type" "alu_shift_imm")]
+)
+
+;; zero_extend version of above
+(define_insn "*sub_mul_imm_si_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (minus:SI (match_operand:SI 3 "register_operand" "r")
+ (mult:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_pwr_2_si" "n")))))]
+ ""
+ "sub\\t%w0, %w3, %w1, lsl %p2"
+ [(set_attr "type" "alu_shift_imm")]
+)
+
+(define_insn "*sub_<optab><ALLX:mode>_<GPI:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (minus:GPI (match_operand:GPI 1 "register_operand" "r")
+ (ANY_EXTEND:GPI
+ (match_operand:ALLX 2 "register_operand" "r"))))]
+ ""
+ "sub\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size>"
+ [(set_attr "type" "alu_ext")]
+)
+
+;; zero_extend version of above
+(define_insn "*sub_<optab><SHORT:mode>_si_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=rk")
+ (zero_extend:DI
+ (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (ANY_EXTEND:SI
+ (match_operand:SHORT 2 "register_operand" "r")))))]
+ ""
+ "sub\\t%w0, %w1, %w2, <su>xt<SHORT:size>"
+ [(set_attr "type" "alu_ext")]
+)
+
+(define_insn "*sub_<optab><ALLX:mode>_shft_<GPI:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (minus:GPI (match_operand:GPI 1 "register_operand" "r")
+ (ashift:GPI (ANY_EXTEND:GPI
+ (match_operand:ALLX 2 "register_operand" "r"))
+ (match_operand 3 "aarch64_imm3" "Ui3"))))]
+ ""
+ "sub\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size> %3"
+ [(set_attr "type" "alu_ext")]
+)
+
+;; zero_extend version of above
+(define_insn "*sub_<optab><SHORT:mode>_shft_si_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=rk")
+ (zero_extend:DI
+ (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (ashift:SI (ANY_EXTEND:SI
+ (match_operand:SHORT 2 "register_operand" "r"))
+ (match_operand 3 "aarch64_imm3" "Ui3")))))]
+ ""
+ "sub\\t%w0, %w1, %w2, <su>xt<SHORT:size> %3"
+ [(set_attr "type" "alu_ext")]
+)
+
+(define_insn "*sub_<optab><mode>_multp2"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (minus:GPI (match_operand:GPI 4 "register_operand" "r")
+ (ANY_EXTRACT:GPI
+ (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand 3 "const_int_operand" "n")
+ (const_int 0))))]
+ "aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])"
+ "sub\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2"
+ [(set_attr "type" "alu_ext")]
+)
+
+;; zero_extend version of above
+(define_insn "*sub_<optab>si_multp2_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=rk")
+ (zero_extend:DI
+ (minus:SI (match_operand:SI 4 "register_operand" "r")
+ (ANY_EXTRACT:SI
+ (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand 3 "const_int_operand" "n")
+ (const_int 0)))))]
+ "aarch64_is_extend_from_extract (SImode, operands[2], operands[3])"
+ "sub\\t%w0, %w4, %w1, <su>xt%e3 %p2"
+ [(set_attr "type" "alu_ext")]
+)
+
+(define_insn "*sub<mode>3_carryin"
+ [(set
+ (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI (minus:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (ltu:GPI (reg:CC CC_REGNUM) (const_int 0)))
+ (match_operand:GPI 2 "register_operand" "r")))]
+ ""
+ "sbc\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "adc_reg")]
+)
+
+;; zero_extend version of the above
+(define_insn "*subsi3_carryin_uxtw"
+ [(set
+ (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (minus:SI (minus:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (ltu:SI (reg:CC CC_REGNUM) (const_int 0)))
+ (match_operand:SI 2 "register_operand" "r"))))]
+ ""
+ "sbc\\t%w0, %w1, %w2"
+ [(set_attr "type" "adc_reg")]
+)
+
+(define_insn "*sub_uxt<mode>_multp2"
+ [(set (match_operand:GPI 0 "register_operand" "=rk")
+ (minus:GPI (match_operand:GPI 4 "register_operand" "r")
+ (and:GPI
+ (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand 3 "const_int_operand" "n"))))]
+ "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),INTVAL (operands[3])) != 0"
+ "*
+ operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
+ INTVAL (operands[3])));
+ return \"sub\t%<w>0, %<w>4, %<w>1, uxt%e3 %p2\";"
+ [(set_attr "type" "alu_ext")]
+)
+
+;; zero_extend version of above
+(define_insn "*sub_uxtsi_multp2_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=rk")
+ (zero_extend:DI
+ (minus:SI (match_operand:SI 4 "register_operand" "r")
+ (and:SI
+ (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand 2 "aarch64_pwr_imm3" "Up3"))
+ (match_operand 3 "const_int_operand" "n")))))]
+ "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),INTVAL (operands[3])) != 0"
+ "*
+ operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
+ INTVAL (operands[3])));
+ return \"sub\t%w0, %w4, %w1, uxt%e3 %p2\";"
+ [(set_attr "type" "alu_ext")]
+)
+
+(define_insn_and_split "absdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r,w")
+ (abs:DI (match_operand:DI 1 "register_operand" "r,w")))
+ (clobber (match_scratch:DI 2 "=&r,X"))]
+ ""
+ "@
+ #
+ abs\\t%d0, %d1"
+ "reload_completed
+ && GP_REGNUM_P (REGNO (operands[0]))
+ && GP_REGNUM_P (REGNO (operands[1]))"
+ [(const_int 0)]
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[2],
+ gen_rtx_XOR (DImode,
+ gen_rtx_ASHIFTRT (DImode,
+ operands[1],
+ GEN_INT (63)),
+ operands[1])));
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operands[0],
+ gen_rtx_MINUS (DImode,
+ operands[2],
+ gen_rtx_ASHIFTRT (DImode,
+ operands[1],
+ GEN_INT (63)))));
+ DONE;
+ }
+ [(set_attr "type" "alu_reg")]
+)
+
+(define_insn "neg<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r,w")
+ (neg:GPI (match_operand:GPI 1 "register_operand" "r,w")))]
+ ""
+ "@
+ neg\\t%<w>0, %<w>1
+ neg\\t%<rtn>0<vas>, %<rtn>1<vas>"
+ [(set_attr "type" "alu_reg, neon_neg<q>")
+ (set_attr "simd" "*,yes")]
+)
+
+;; zero_extend version of above
+(define_insn "*negsi2_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (neg:SI (match_operand:SI 1 "register_operand" "r"))))]
+ ""
+ "neg\\t%w0, %w1"
+ [(set_attr "type" "alu_reg")]
+)
+
+(define_insn "*ngc<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI (neg:GPI (ltu:GPI (reg:CC CC_REGNUM) (const_int 0)))
+ (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "ngc\\t%<w>0, %<w>1"
+ [(set_attr "type" "adc_reg")]
+)
+
+(define_insn "*ngcsi_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (minus:SI (neg:SI (ltu:SI (reg:CC CC_REGNUM) (const_int 0)))
+ (match_operand:SI 1 "register_operand" "r"))))]
+ ""
+ "ngc\\t%w0, %w1"
+ [(set_attr "type" "adc_reg")]
+)
+
+(define_insn "*neg<mode>2_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ (neg:GPI (match_operand:GPI 1 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (neg:GPI (match_dup 1)))]
+ ""
+ "negs\\t%<w>0, %<w>1"
+ [(set_attr "type" "alus_reg")]
+)
+
+;; zero_extend version of above
+(define_insn "*negsi2_compare0_uxtw"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ (neg:SI (match_operand:SI 1 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (neg:SI (match_dup 1))))]
+ ""
+ "negs\\t%w0, %w1"
+ [(set_attr "type" "alus_reg")]
+)
+
+(define_insn "*neg_<shift><mode>3_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (neg:GPI (ASHIFT:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n")))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (neg:GPI (ASHIFT:GPI (match_dup 1) (match_dup 2))))]
+ ""
+ "negs\\t%<w>0, %<w>1, <shift> %2"
+ [(set_attr "type" "alus_shift_imm")]
+)
+
+(define_insn "*neg_<shift>_<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (neg:GPI (ASHIFT:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
+ ""
+ "neg\\t%<w>0, %<w>1, <shift> %2"
+ [(set_attr "type" "alu_shift_imm")]
+)
+
+;; zero_extend version of above
+(define_insn "*neg_<shift>_si2_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (neg:SI (ASHIFT:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_si" "n")))))]
+ ""
+ "neg\\t%w0, %w1, <shift> %2"
+ [(set_attr "type" "alu_shift_imm")]
+)
+
+(define_insn "*neg_mul_imm_<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (neg:GPI (mult:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))))]
+ ""
+ "neg\\t%<w>0, %<w>1, lsl %p2"
+ [(set_attr "type" "alu_shift_imm")]
+)
+
+;; zero_extend version of above
+(define_insn "*neg_mul_imm_si2_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (neg:SI (mult:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_pwr_2_si" "n")))))]
+ ""
+ "neg\\t%w0, %w1, lsl %p2"
+ [(set_attr "type" "alu_shift_imm")]
+)
+
+(define_insn "mul<mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "register_operand" "r")))]
+ ""
+ "mul\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "mul")]
+)
+
+;; zero_extend version of above
+(define_insn "*mulsi3_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r"))))]
+ ""
+ "mul\\t%w0, %w1, %w2"
+ [(set_attr "type" "mul")]
+)
+
+(define_insn "*madd<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "register_operand" "r"))
+ (match_operand:GPI 3 "register_operand" "r")))]
+ ""
+ "madd\\t%<w>0, %<w>1, %<w>2, %<w>3"
+ [(set_attr "type" "mla")]
+)
+
+;; zero_extend version of above
+(define_insn "*maddsi_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r"))
+ (match_operand:SI 3 "register_operand" "r"))))]
+ ""
+ "madd\\t%w0, %w1, %w2, %w3"
+ [(set_attr "type" "mla")]
+)
+
+(define_insn "*msub<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (minus:GPI (match_operand:GPI 3 "register_operand" "r")
+ (mult:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "register_operand" "r"))))]
+
+ ""
+ "msub\\t%<w>0, %<w>1, %<w>2, %<w>3"
+ [(set_attr "type" "mla")]
+)
+
+;; zero_extend version of above
+(define_insn "*msubsi_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (minus:SI (match_operand:SI 3 "register_operand" "r")
+ (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))))]
+
+ ""
+ "msub\\t%w0, %w1, %w2, %w3"
+ [(set_attr "type" "mla")]
+)
+
+(define_insn "*mul<mode>_neg"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (mult:GPI (neg:GPI (match_operand:GPI 1 "register_operand" "r"))
+ (match_operand:GPI 2 "register_operand" "r")))]
+
+ ""
+ "mneg\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "mul")]
+)
+
+;; zero_extend version of above
+(define_insn "*mulsi_neg_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (mult:SI (neg:SI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "register_operand" "r"))))]
+
+ ""
+ "mneg\\t%w0, %w1, %w2"
+ [(set_attr "type" "mul")]
+)
+
+(define_insn "<su_optab>mulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r"))
+ (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))))]
+ ""
+ "<su>mull\\t%0, %w1, %w2"
+ [(set_attr "type" "<su>mull")]
+)
+
+(define_insn "<su_optab>maddsidi4"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (mult:DI
+ (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r"))
+ (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r")))
+ (match_operand:DI 3 "register_operand" "r")))]
+ ""
+ "<su>maddl\\t%0, %w1, %w2, %3"
+ [(set_attr "type" "<su>mlal")]
+)
+
+(define_insn "<su_optab>msubsidi4"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI
+ (match_operand:DI 3 "register_operand" "r")
+ (mult:DI (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r"))
+ (ANY_EXTEND:DI
+ (match_operand:SI 2 "register_operand" "r")))))]
+ ""
+ "<su>msubl\\t%0, %w1, %w2, %3"
+ [(set_attr "type" "<su>mlal")]
+)
+
+(define_insn "*<su_optab>mulsidi_neg"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (neg:DI
+ (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r")))
+ (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))))]
+ ""
+ "<su>mnegl\\t%0, %w1, %w2"
+ [(set_attr "type" "<su>mull")]
+)
+
+(define_insn "<su>muldi3_highpart"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI
+ (ANY_EXTEND:TI (match_operand:DI 1 "register_operand" "r"))
+ (ANY_EXTEND:TI (match_operand:DI 2 "register_operand" "r")))
+ (const_int 64))))]
+ ""
+ "<su>mulh\\t%0, %1, %2"
+ [(set_attr "type" "<su>mull")]
+)
+
+(define_insn "<su_optab>div<mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (ANY_DIV:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "register_operand" "r")))]
+ ""
+ "<su>div\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "<su>div")]
+)
+
+;; zero_extend version of above
+(define_insn "*<su_optab>divsi3_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (ANY_DIV:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r"))))]
+ ""
+ "<su>div\\t%w0, %w1, %w2"
+ [(set_attr "type" "<su>div")]
+)
+
+;; -------------------------------------------------------------------
+;; Comparison insns
+;; -------------------------------------------------------------------
+
+(define_insn "*cmp<mode>"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC (match_operand:GPI 0 "register_operand" "r,r,r")
+ (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J")))]
+ ""
+ "@
+ cmp\\t%<w>0, %<w>1
+ cmp\\t%<w>0, %<w>1
+ cmn\\t%<w>0, #%n1"
+ [(set_attr "type" "alus_reg,alus_imm,alus_imm")]
+)
+
+(define_insn "*cmp<mode>"
+ [(set (reg:CCFP CC_REGNUM)
+ (compare:CCFP (match_operand:GPF 0 "register_operand" "w,w")
+ (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))]
+ "TARGET_FLOAT"
+ "@
+ fcmp\\t%<s>0, #0.0
+ fcmp\\t%<s>0, %<s>1"
+ [(set_attr "type" "fcmp<s>")]
+)
+
+(define_insn "*cmpe<mode>"
+ [(set (reg:CCFPE CC_REGNUM)
+ (compare:CCFPE (match_operand:GPF 0 "register_operand" "w,w")
+ (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))]
+ "TARGET_FLOAT"
+ "@
+ fcmpe\\t%<s>0, #0.0
+ fcmpe\\t%<s>0, %<s>1"
+ [(set_attr "type" "fcmp<s>")]
+)
+
+(define_insn "*cmp_swp_<shift>_reg<mode>"
+ [(set (reg:CC_SWP CC_REGNUM)
+ (compare:CC_SWP (ASHIFT:GPI
+ (match_operand:GPI 0 "register_operand" "r")
+ (match_operand:QI 1 "aarch64_shift_imm_<mode>" "n"))
+ (match_operand:GPI 2 "aarch64_reg_or_zero" "rZ")))]
+ ""
+ "cmp\\t%<w>2, %<w>0, <shift> %1"
+ [(set_attr "type" "alus_shift_imm")]
+)
+
+(define_insn "*cmp_swp_<optab><ALLX:mode>_reg<GPI:mode>"
+ [(set (reg:CC_SWP CC_REGNUM)
+ (compare:CC_SWP (ANY_EXTEND:GPI
+ (match_operand:ALLX 0 "register_operand" "r"))
+ (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "cmp\\t%<GPI:w>1, %<GPI:w>0, <su>xt<ALLX:size>"
+ [(set_attr "type" "alus_ext")]
+)
+
+(define_insn "*cmp_swp_<optab><ALLX:mode>_shft_<GPI:mode>"
+ [(set (reg:CC_SWP CC_REGNUM)
+ (compare:CC_SWP (ashift:GPI
+ (ANY_EXTEND:GPI
+ (match_operand:ALLX 0 "register_operand" "r"))
+ (match_operand 1 "aarch64_imm3" "Ui3"))
+ (match_operand:GPI 2 "register_operand" "r")))]
+ ""
+ "cmp\\t%<GPI:w>2, %<GPI:w>0, <su>xt<ALLX:size> %1"
+ [(set_attr "type" "alus_ext")]
+)
+
+;; -------------------------------------------------------------------
+;; Store-flag and conditional select insns
+;; -------------------------------------------------------------------
+
+(define_expand "cstore<mode>4"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operator:SI 1 "aarch64_comparison_operator"
+ [(match_operand:GPI 2 "register_operand" "")
+ (match_operand:GPI 3 "aarch64_plus_operand" "")]))]
+ ""
+ "
+ operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+ operands[3] = const0_rtx;
+ "
+)
+
+(define_expand "cstore<mode>4"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operator:SI 1 "aarch64_comparison_operator"
+ [(match_operand:GPF 2 "register_operand" "")
+ (match_operand:GPF 3 "register_operand" "")]))]
+ ""
+ "
+ operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+ operands[3] = const0_rtx;
+ "
+)
+
+(define_insn "*cstore<mode>_insn"
+ [(set (match_operand:ALLI 0 "register_operand" "=r")
+ (match_operator:ALLI 1 "aarch64_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)]))]
+ ""
+ "cset\\t%<w>0, %m1"
+ [(set_attr "type" "csel")]
+)
+
+;; zero_extend version of the above
+(define_insn "*cstoresi_insn_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (match_operator:SI 1 "aarch64_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "cset\\t%w0, %m1"
+ [(set_attr "type" "csel")]
+)
+
+(define_insn "cstore<mode>_neg"
+ [(set (match_operand:ALLI 0 "register_operand" "=r")
+ (neg:ALLI (match_operator:ALLI 1 "aarch64_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "csetm\\t%<w>0, %m1"
+ [(set_attr "type" "csel")]
+)
+
+;; zero_extend version of the above
+(define_insn "*cstoresi_neg_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (neg:SI (match_operator:SI 1 "aarch64_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)]))))]
+ ""
+ "csetm\\t%w0, %m1"
+ [(set_attr "type" "csel")]
+)
+
+(define_expand "cmov<mode>6"
+ [(set (match_operand:GPI 0 "register_operand" "")
+ (if_then_else:GPI
+ (match_operator 1 "aarch64_comparison_operator"
+ [(match_operand:GPI 2 "register_operand" "")
+ (match_operand:GPI 3 "aarch64_plus_operand" "")])
+ (match_operand:GPI 4 "register_operand" "")
+ (match_operand:GPI 5 "register_operand" "")))]
+ ""
+ "
+ operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+ operands[3] = const0_rtx;
+ "
+)
+
+(define_expand "cmov<mode>6"
+ [(set (match_operand:GPF 0 "register_operand" "")
+ (if_then_else:GPF
+ (match_operator 1 "aarch64_comparison_operator"
+ [(match_operand:GPF 2 "register_operand" "")
+ (match_operand:GPF 3 "register_operand" "")])
+ (match_operand:GPF 4 "register_operand" "")
+ (match_operand:GPF 5 "register_operand" "")))]
+ ""
+ "
+ operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+ operands[3] = const0_rtx;
+ "
+)
+
+(define_insn "*cmov<mode>_insn"
+ [(set (match_operand:ALLI 0 "register_operand" "=r,r,r,r,r,r,r")
+ (if_then_else:ALLI
+ (match_operator 1 "aarch64_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (match_operand:ALLI 3 "aarch64_reg_zero_or_m1_or_1" "rZ,rZ,UsM,rZ,Ui1,UsM,Ui1")
+ (match_operand:ALLI 4 "aarch64_reg_zero_or_m1_or_1" "rZ,UsM,rZ,Ui1,rZ,UsM,Ui1")))]
+ "!((operands[3] == const1_rtx && operands[4] == constm1_rtx)
+ || (operands[3] == constm1_rtx && operands[4] == const1_rtx))"
+ ;; Final two alternatives should be unreachable, but included for completeness
+ "@
+ csel\\t%<w>0, %<w>3, %<w>4, %m1
+ csinv\\t%<w>0, %<w>3, <w>zr, %m1
+ csinv\\t%<w>0, %<w>4, <w>zr, %M1
+ csinc\\t%<w>0, %<w>3, <w>zr, %m1
+ csinc\\t%<w>0, %<w>4, <w>zr, %M1
+ mov\\t%<w>0, -1
+ mov\\t%<w>0, 1"
+ [(set_attr "type" "csel")]
+)
+
+;; zero_extend version of above
+(define_insn "*cmovsi_insn_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r,r,r")
+ (zero_extend:DI
+ (if_then_else:SI
+ (match_operator 1 "aarch64_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (match_operand:SI 3 "aarch64_reg_zero_or_m1_or_1" "rZ,rZ,UsM,rZ,Ui1,UsM,Ui1")
+ (match_operand:SI 4 "aarch64_reg_zero_or_m1_or_1" "rZ,UsM,rZ,Ui1,rZ,UsM,Ui1"))))]
+ "!((operands[3] == const1_rtx && operands[4] == constm1_rtx)
+ || (operands[3] == constm1_rtx && operands[4] == const1_rtx))"
+ ;; Final two alternatives should be unreachable, but included for completeness
+ "@
+ csel\\t%w0, %w3, %w4, %m1
+ csinv\\t%w0, %w3, wzr, %m1
+ csinv\\t%w0, %w4, wzr, %M1
+ csinc\\t%w0, %w3, wzr, %m1
+ csinc\\t%w0, %w4, wzr, %M1
+ mov\\t%w0, -1
+ mov\\t%w0, 1"
+ [(set_attr "type" "csel")]
+)
+
+(define_insn "*cmov<mode>_insn"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (if_then_else:GPF
+ (match_operator 1 "aarch64_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (match_operand:GPF 3 "register_operand" "w")
+ (match_operand:GPF 4 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fcsel\\t%<s>0, %<s>3, %<s>4, %m1"
+ [(set_attr "type" "fcsel")]
+)
+
+(define_expand "mov<mode>cc"
+ [(set (match_operand:ALLI 0 "register_operand" "")
+ (if_then_else:ALLI (match_operand 1 "aarch64_comparison_operator" "")
+ (match_operand:ALLI 2 "register_operand" "")
+ (match_operand:ALLI 3 "register_operand" "")))]
+ ""
+ {
+ rtx ccreg;
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (code == UNEQ || code == LTGT)
+ FAIL;
+
+ ccreg = aarch64_gen_compare_reg (code, XEXP (operands[1], 0),
+ XEXP (operands[1], 1));
+ operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
+ }
+)
+
+(define_expand "mov<GPF:mode><GPI:mode>cc"
+ [(set (match_operand:GPI 0 "register_operand" "")
+ (if_then_else:GPI (match_operand 1 "aarch64_comparison_operator" "")
+ (match_operand:GPF 2 "register_operand" "")
+ (match_operand:GPF 3 "register_operand" "")))]
+ ""
+ {
+ rtx ccreg;
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (code == UNEQ || code == LTGT)
+ FAIL;
+
+ ccreg = aarch64_gen_compare_reg (code, XEXP (operands[1], 0),
+ XEXP (operands[1], 1));
+ operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
+ }
+)
+
+(define_insn "*csinc2<mode>_insn"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI (match_operator:GPI 2 "aarch64_comparison_operator"
+ [(match_operand:CC 3 "cc_register" "") (const_int 0)])
+ (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "csinc\\t%<w>0, %<w>1, %<w>1, %M2"
+ [(set_attr "type" "csel")]
+)
+
+(define_insn "csinc3<mode>_insn"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (if_then_else:GPI
+ (match_operator:GPI 1 "aarch64_comparison_operator"
+ [(match_operand:CC 2 "cc_register" "") (const_int 0)])
+ (plus:GPI (match_operand:GPI 3 "register_operand" "r")
+ (const_int 1))
+ (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
+ ""
+ "csinc\\t%<w>0, %<w>4, %<w>3, %M1"
+ [(set_attr "type" "csel")]
+)
+
+(define_insn "*csinv3<mode>_insn"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (if_then_else:GPI
+ (match_operator:GPI 1 "aarch64_comparison_operator"
+ [(match_operand:CC 2 "cc_register" "") (const_int 0)])
+ (not:GPI (match_operand:GPI 3 "register_operand" "r"))
+ (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
+ ""
+ "csinv\\t%<w>0, %<w>4, %<w>3, %M1"
+ [(set_attr "type" "csel")]
+)
+
+(define_insn "*csneg3<mode>_insn"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (if_then_else:GPI
+ (match_operator:GPI 1 "aarch64_comparison_operator"
+ [(match_operand:CC 2 "cc_register" "") (const_int 0)])
+ (neg:GPI (match_operand:GPI 3 "register_operand" "r"))
+ (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
+ ""
+ "csneg\\t%<w>0, %<w>4, %<w>3, %M1"
+ [(set_attr "type" "csel")]
+)
+
+;; -------------------------------------------------------------------
+;; Logical operations
+;; -------------------------------------------------------------------
+
+(define_insn "<optab><mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=r,rk")
+ (LOGICAL:GPI (match_operand:GPI 1 "register_operand" "%r,r")
+ (match_operand:GPI 2 "aarch64_logical_operand" "r,<lconst>")))]
+ ""
+ "<logical>\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "logic_reg,logic_imm")]
+)
+
+;; zero_extend version of above
+(define_insn "*<optab>si3_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r,rk")
+ (zero_extend:DI
+ (LOGICAL:SI (match_operand:SI 1 "register_operand" "%r,r")
+ (match_operand:SI 2 "aarch64_logical_operand" "r,K"))))]
+ ""
+ "<logical>\\t%w0, %w1, %w2"
+ [(set_attr "type" "logic_reg,logic_imm")]
+)
+
+(define_insn "*and<mode>3_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (and:GPI (match_operand:GPI 1 "register_operand" "%r,r")
+ (match_operand:GPI 2 "aarch64_logical_operand" "r,<lconst>"))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r,r")
+ (and:GPI (match_dup 1) (match_dup 2)))]
+ ""
+ "ands\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "logics_reg,logics_imm")]
+)
+
+;; zero_extend version of above
+(define_insn "*andsi3_compare0_uxtw"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (and:SI (match_operand:SI 1 "register_operand" "%r,r")
+ (match_operand:SI 2 "aarch64_logical_operand" "r,K"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI (and:SI (match_dup 1) (match_dup 2))))]
+ ""
+ "ands\\t%w0, %w1, %w2"
+ [(set_attr "type" "logics_reg,logics_imm")]
+)
+
+(define_insn "*and_<SHIFT:optab><mode>3_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (and:GPI (SHIFT:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))
+ (match_operand:GPI 3 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (and:GPI (SHIFT:GPI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ ""
+ "ands\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2"
+ [(set_attr "type" "logics_shift_imm")]
+)
+
+;; zero_extend version of above
+(define_insn "*and_<SHIFT:optab>si3_compare0_uxtw"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (and:SI (SHIFT:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_si" "n"))
+ (match_operand:SI 3 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (and:SI (SHIFT:SI (match_dup 1) (match_dup 2))
+ (match_dup 3))))]
+ ""
+ "ands\\t%w0, %w3, %w1, <SHIFT:shift> %2"
+ [(set_attr "type" "logics_shift_imm")]
+)
+
+(define_insn "*<LOGICAL:optab>_<SHIFT:optab><mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (LOGICAL:GPI (SHIFT:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))
+ (match_operand:GPI 3 "register_operand" "r")))]
+ ""
+ "<LOGICAL:logical>\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2"
+ [(set_attr "type" "logic_shift_imm")]
+)
+
+;; zero_extend version of above
+(define_insn "*<LOGICAL:optab>_<SHIFT:optab>si3_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (LOGICAL:SI (SHIFT:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_si" "n"))
+ (match_operand:SI 3 "register_operand" "r"))))]
+ ""
+ "<LOGICAL:logical>\\t%w0, %w3, %w1, <SHIFT:shift> %2"
+ [(set_attr "type" "logic_shift_imm")]
+)
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (not:GPI (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "mvn\\t%<w>0, %<w>1"
+ [(set_attr "type" "logic_reg")]
+)
+
+(define_insn "*one_cmpl_<optab><mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (not:GPI (SHIFT:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
+ ""
+ "mvn\\t%<w>0, %<w>1, <shift> %2"
+ [(set_attr "type" "logic_shift_imm")]
+)
+
+(define_insn "*<LOGICAL:optab>_one_cmpl<mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (LOGICAL:GPI (not:GPI
+ (match_operand:GPI 1 "register_operand" "r"))
+ (match_operand:GPI 2 "register_operand" "r")))]
+ ""
+ "<LOGICAL:nlogical>\\t%<w>0, %<w>2, %<w>1"
+ [(set_attr "type" "logic_reg")]
+)
+
+(define_insn "*and_one_cmpl<mode>3_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (and:GPI (not:GPI
+ (match_operand:GPI 1 "register_operand" "r"))
+ (match_operand:GPI 2 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (and:GPI (not:GPI (match_dup 1)) (match_dup 2)))]
+ ""
+ "bics\\t%<w>0, %<w>2, %<w>1"
+ [(set_attr "type" "logics_reg")]
+)
+
+;; zero_extend version of above
+(define_insn "*and_one_cmplsi3_compare0_uxtw"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (and:SI (not:SI
+ (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (and:SI (not:SI (match_dup 1)) (match_dup 2))))]
+ ""
+ "bics\\t%w0, %w2, %w1"
+ [(set_attr "type" "logics_reg")]
+)
+
+(define_insn "*<LOGICAL:optab>_one_cmpl_<SHIFT:optab><mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (LOGICAL:GPI (not:GPI
+ (SHIFT:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n")))
+ (match_operand:GPI 3 "register_operand" "r")))]
+ ""
+ "<LOGICAL:nlogical>\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2"
+ [(set_attr "type" "logics_shift_imm")]
+)
+
+(define_insn "*and_one_cmpl_<SHIFT:optab><mode>3_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (and:GPI (not:GPI
+ (SHIFT:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n")))
+ (match_operand:GPI 3 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:GPI 0 "register_operand" "=r")
+ (and:GPI (not:GPI
+ (SHIFT:GPI
+ (match_dup 1) (match_dup 2))) (match_dup 3)))]
+ ""
+ "bics\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2"
+ [(set_attr "type" "logics_shift_imm")]
+)
+
+;; zero_extend version of above
+(define_insn "*and_one_cmpl_<SHIFT:optab>si3_compare0_uxtw"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (and:SI (not:SI
+ (SHIFT:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_shift_imm_si" "n")))
+ (match_operand:SI 3 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (and:SI
+ (not:SI
+ (SHIFT:SI (match_dup 1) (match_dup 2))) (match_dup 3))))]
+ ""
+ "bics\\t%w0, %w3, %w1, <SHIFT:shift> %2"
+ [(set_attr "type" "logics_shift_imm")]
+)
+
+(define_insn "clz<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (clz:GPI (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "clz\\t%<w>0, %<w>1"
+ [(set_attr "type" "clz")]
+)
+
+(define_expand "ffs<mode>2"
+ [(match_operand:GPI 0 "register_operand")
+ (match_operand:GPI 1 "register_operand")]
+ ""
+ {
+ rtx ccreg = aarch64_gen_compare_reg (EQ, operands[1], const0_rtx);
+ rtx x = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
+
+ emit_insn (gen_rbit<mode>2 (operands[0], operands[1]));
+ emit_insn (gen_clz<mode>2 (operands[0], operands[0]));
+ emit_insn (gen_csinc3<mode>_insn (operands[0], x, ccreg, operands[0], const0_rtx));
+ DONE;
+ }
+)
+
+(define_insn "clrsb<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (unspec:GPI [(match_operand:GPI 1 "register_operand" "r")] UNSPEC_CLS))]
+ ""
+ "cls\\t%<w>0, %<w>1"
+ [(set_attr "type" "clz")]
+)
+
+(define_insn "rbit<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (unspec:GPI [(match_operand:GPI 1 "register_operand" "r")] UNSPEC_RBIT))]
+ ""
+ "rbit\\t%<w>0, %<w>1"
+ [(set_attr "type" "rbit")]
+)
+
+(define_expand "ctz<mode>2"
+ [(match_operand:GPI 0 "register_operand")
+ (match_operand:GPI 1 "register_operand")]
+ ""
+ {
+ emit_insn (gen_rbit<mode>2 (operands[0], operands[1]));
+ emit_insn (gen_clz<mode>2 (operands[0], operands[0]));
+ DONE;
+ }
+)
+
+(define_insn "*and<mode>3nr_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (and:GPI (match_operand:GPI 0 "register_operand" "%r,r")
+ (match_operand:GPI 1 "aarch64_logical_operand" "r,<lconst>"))
+ (const_int 0)))]
+ ""
+ "tst\\t%<w>0, %<w>1"
+ [(set_attr "type" "logics_reg")]
+)
+
+(define_insn "*and_<SHIFT:optab><mode>3nr_compare0"
+ [(set (reg:CC_NZ CC_REGNUM)
+ (compare:CC_NZ
+ (and:GPI (SHIFT:GPI
+ (match_operand:GPI 0 "register_operand" "r")
+ (match_operand:QI 1 "aarch64_shift_imm_<mode>" "n"))
+ (match_operand:GPI 2 "register_operand" "r"))
+ (const_int 0)))]
+ ""
+ "tst\\t%<w>2, %<w>0, <SHIFT:shift> %1"
+ [(set_attr "type" "logics_shift_imm")]
+)
+
+;; -------------------------------------------------------------------
+;; Shifts
+;; -------------------------------------------------------------------
+
+(define_expand "<optab><mode>3"
+ [(set (match_operand:GPI 0 "register_operand")
+ (ASHIFT:GPI (match_operand:GPI 1 "register_operand")
+ (match_operand:QI 2 "nonmemory_operand")))]
+ ""
+ {
+ if (CONST_INT_P (operands[2]))
+ {
+ operands[2] = GEN_INT (INTVAL (operands[2])
+ & (GET_MODE_BITSIZE (<MODE>mode) - 1));
+
+ if (operands[2] == const0_rtx)
+ {
+ emit_insn (gen_mov<mode> (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+)
+
+(define_expand "ashl<mode>3"
+ [(set (match_operand:SHORT 0 "register_operand")
+ (ashift:SHORT (match_operand:SHORT 1 "register_operand")
+ (match_operand:QI 2 "nonmemory_operand")))]
+ ""
+ {
+ if (CONST_INT_P (operands[2]))
+ {
+ operands[2] = GEN_INT (INTVAL (operands[2])
+ & (GET_MODE_BITSIZE (<MODE>mode) - 1));
+
+ if (operands[2] == const0_rtx)
+ {
+ emit_insn (gen_mov<mode> (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+)
+
+(define_expand "rotr<mode>3"
+ [(set (match_operand:GPI 0 "register_operand")
+ (rotatert:GPI (match_operand:GPI 1 "register_operand")
+ (match_operand:QI 2 "nonmemory_operand")))]
+ ""
+ {
+ if (CONST_INT_P (operands[2]))
+ {
+ operands[2] = GEN_INT (INTVAL (operands[2])
+ & (GET_MODE_BITSIZE (<MODE>mode) - 1));
+
+ if (operands[2] == const0_rtx)
+ {
+ emit_insn (gen_mov<mode> (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+)
+
+(define_expand "rotl<mode>3"
+ [(set (match_operand:GPI 0 "register_operand")
+ (rotatert:GPI (match_operand:GPI 1 "register_operand")
+ (match_operand:QI 2 "nonmemory_operand")))]
+ ""
+ {
+ /* (SZ - cnt) % SZ == -cnt % SZ */
+ if (CONST_INT_P (operands[2]))
+ {
+ operands[2] = GEN_INT ((-INTVAL (operands[2]))
+ & (GET_MODE_BITSIZE (<MODE>mode) - 1));
+ if (operands[2] == const0_rtx)
+ {
+ emit_insn (gen_mov<mode> (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ else
+ operands[2] = expand_simple_unop (QImode, NEG, operands[2],
+ NULL_RTX, 1);
+ }
+)
+
+;; Logical left shift using SISD or Integer instruction
+(define_insn "*aarch64_ashl_sisd_or_int_<mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=w,w,r")
+ (ashift:GPI
+ (match_operand:GPI 1 "register_operand" "w,w,r")
+ (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>" "Us<cmode>,w,rUs<cmode>")))]
+ ""
+ "@
+ shl\t%<rtn>0<vas>, %<rtn>1<vas>, %2
+ ushl\t%<rtn>0<vas>, %<rtn>1<vas>, %<rtn>2<vas>
+ lsl\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "simd" "yes,yes,no")
+ (set_attr "type" "neon_shift_imm<q>, neon_shift_reg<q>,shift_reg")]
+)
+
+;; Logical right shift using SISD or Integer instruction
+(define_insn "*aarch64_lshr_sisd_or_int_<mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=w,w,r")
+ (lshiftrt:GPI
+ (match_operand:GPI 1 "register_operand" "w,w,r")
+ (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>" "Us<cmode>,w,rUs<cmode>")))]
+ ""
+ "@
+ ushr\t%<rtn>0<vas>, %<rtn>1<vas>, %2
+ #
+ lsr\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "simd" "yes,yes,no")
+ (set_attr "type" "neon_shift_imm<q>,neon_shift_reg<q>,shift_reg")]
+)
+
+(define_split
+ [(set (match_operand:DI 0 "aarch64_simd_register")
+ (lshiftrt:DI
+ (match_operand:DI 1 "aarch64_simd_register")
+ (match_operand:QI 2 "aarch64_simd_register")))]
+ "TARGET_SIMD && reload_completed"
+ [(set (match_dup 2)
+ (unspec:QI [(match_dup 2)] UNSPEC_SISD_NEG))
+ (set (match_dup 0)
+ (unspec:DI [(match_dup 1) (match_dup 2)] UNSPEC_SISD_USHL))]
+ ""
+)
+
+(define_split
+ [(set (match_operand:SI 0 "aarch64_simd_register")
+ (lshiftrt:SI
+ (match_operand:SI 1 "aarch64_simd_register")
+ (match_operand:QI 2 "aarch64_simd_register")))]
+ "TARGET_SIMD && reload_completed"
+ [(set (match_dup 2)
+ (unspec:QI [(match_dup 2)] UNSPEC_SISD_NEG))
+ (set (match_dup 0)
+ (unspec:SI [(match_dup 1) (match_dup 2)] UNSPEC_USHL_2S))]
+ ""
+)
+
+;; Arithmetic right shift using SISD or Integer instruction
+(define_insn "*aarch64_ashr_sisd_or_int_<mode>3"
+ [(set (match_operand:GPI 0 "register_operand" "=w,w,r")
+ (ashiftrt:GPI
+ (match_operand:GPI 1 "register_operand" "w,w,r")
+ (match_operand:QI 2 "aarch64_reg_or_shift_imm_di" "Us<cmode>,w,rUs<cmode>")))]
+ ""
+ "@
+ sshr\t%<rtn>0<vas>, %<rtn>1<vas>, %2
+ #
+ asr\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "simd" "yes,yes,no")
+ (set_attr "type" "neon_shift_imm<q>,neon_shift_reg<q>,shift_reg")]
+)
+
+(define_split
+ [(set (match_operand:DI 0 "aarch64_simd_register")
+ (ashiftrt:DI
+ (match_operand:DI 1 "aarch64_simd_register")
+ (match_operand:QI 2 "aarch64_simd_register")))]
+ "TARGET_SIMD && reload_completed"
+ [(set (match_dup 2)
+ (unspec:QI [(match_dup 2)] UNSPEC_SISD_NEG))
+ (set (match_dup 0)
+ (unspec:DI [(match_dup 1) (match_dup 2)] UNSPEC_SISD_SSHL))]
+ ""
+)
+
+(define_split
+ [(set (match_operand:SI 0 "aarch64_simd_register")
+ (ashiftrt:SI
+ (match_operand:SI 1 "aarch64_simd_register")
+ (match_operand:QI 2 "aarch64_simd_register")))]
+ "TARGET_SIMD && reload_completed"
+ [(set (match_dup 2)
+ (unspec:QI [(match_dup 2)] UNSPEC_SISD_NEG))
+ (set (match_dup 0)
+ (unspec:SI [(match_dup 1) (match_dup 2)] UNSPEC_SSHL_2S))]
+ ""
+)
+
+(define_insn "*aarch64_sisd_ushl"
+ [(set (match_operand:DI 0 "register_operand" "=w")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "w")
+ (match_operand:QI 2 "register_operand" "w")]
+ UNSPEC_SISD_USHL))]
+ "TARGET_SIMD"
+ "ushl\t%d0, %d1, %d2"
+ [(set_attr "simd" "yes")
+ (set_attr "type" "neon_shift_reg")]
+)
+
+(define_insn "*aarch64_ushl_2s"
+ [(set (match_operand:SI 0 "register_operand" "=w")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "w")
+ (match_operand:QI 2 "register_operand" "w")]
+ UNSPEC_USHL_2S))]
+ "TARGET_SIMD"
+ "ushl\t%0.2s, %1.2s, %2.2s"
+ [(set_attr "simd" "yes")
+ (set_attr "type" "neon_shift_reg")]
+)
+
+(define_insn "*aarch64_sisd_sshl"
+ [(set (match_operand:DI 0 "register_operand" "=w")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "w")
+ (match_operand:QI 2 "register_operand" "w")]
+ UNSPEC_SISD_SSHL))]
+ "TARGET_SIMD"
+ "sshl\t%d0, %d1, %d2"
+ [(set_attr "simd" "yes")
+ (set_attr "type" "neon_shift_reg")]
+)
+
+(define_insn "*aarch64_sshl_2s"
+ [(set (match_operand:SI 0 "register_operand" "=w")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "w")
+ (match_operand:QI 2 "register_operand" "w")]
+ UNSPEC_SSHL_2S))]
+ "TARGET_SIMD"
+ "sshl\t%0.2s, %1.2s, %2.2s"
+ [(set_attr "simd" "yes")
+ (set_attr "type" "neon_shift_reg")]
+)
+
+(define_insn "*aarch64_sisd_neg_qi"
+ [(set (match_operand:QI 0 "register_operand" "=w")
+ (unspec:QI [(match_operand:QI 1 "register_operand" "w")]
+ UNSPEC_SISD_NEG))]
+ "TARGET_SIMD"
+ "neg\t%d0, %d1"
+ [(set_attr "simd" "yes")
+ (set_attr "type" "neon_neg")]
+)
+
+;; Rotate right
+(define_insn "*ror<mode>3_insn"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (rotatert:GPI
+ (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>" "rUs<cmode>")))]
+ ""
+ "ror\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "shift_reg")]
+)
+
+;; zero_extend version of above
+(define_insn "*<optab>si3_insn_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (SHIFT:SI
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_reg_or_shift_imm_si" "rUss"))))]
+ ""
+ "<shift>\\t%w0, %w1, %w2"
+ [(set_attr "type" "shift_reg")]
+)
+
+(define_insn "*ashl<mode>3_insn"
+ [(set (match_operand:SHORT 0 "register_operand" "=r")
+ (ashift:SHORT (match_operand:SHORT 1 "register_operand" "r")
+ (match_operand:QI 2 "aarch64_reg_or_shift_imm_si" "rUss")))]
+ ""
+ "lsl\\t%<w>0, %<w>1, %<w>2"
+ [(set_attr "type" "shift_reg")]
+)
+
+(define_insn "*<optab><mode>3_insn"
+ [(set (match_operand:SHORT 0 "register_operand" "=r")
+ (ASHIFT:SHORT (match_operand:SHORT 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n")))]
+ "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<MODE>mode)"
+{
+ operands[3] = GEN_INT (<sizen> - UINTVAL (operands[2]));
+ return "<bfshift>\t%w0, %w1, %2, %3";
+}
+ [(set_attr "type" "bfm")]
+)
+
+(define_insn "*extr<mode>5_insn"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (ior:GPI (ashift:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 3 "const_int_operand" "n"))
+ (lshiftrt:GPI (match_operand:GPI 2 "register_operand" "r")
+ (match_operand 4 "const_int_operand" "n"))))]
+ "UINTVAL (operands[3]) < GET_MODE_BITSIZE (<MODE>mode) &&
+ (UINTVAL (operands[3]) + UINTVAL (operands[4]) == GET_MODE_BITSIZE (<MODE>mode))"
+ "extr\\t%<w>0, %<w>1, %<w>2, %4"
+ [(set_attr "type" "shift_imm")]
+)
+
+;; zero_extend version of the above
+(define_insn "*extrsi5_insn_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (ior:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand 3 "const_int_operand" "n"))
+ (lshiftrt:SI (match_operand:SI 2 "register_operand" "r")
+ (match_operand 4 "const_int_operand" "n")))))]
+ "UINTVAL (operands[3]) < 32 &&
+ (UINTVAL (operands[3]) + UINTVAL (operands[4]) == 32)"
+ "extr\\t%w0, %w1, %w2, %4"
+ [(set_attr "type" "shift_imm")]
+)
+
+(define_insn "*ror<mode>3_insn"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (rotate:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n")))]
+ "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<MODE>mode)"
+{
+ operands[3] = GEN_INT (<sizen> - UINTVAL (operands[2]));
+ return "ror\\t%<w>0, %<w>1, %3";
+}
+ [(set_attr "type" "shift_imm")]
+)
+
+;; zero_extend version of the above
+(define_insn "*rorsi3_insn_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (rotate:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n"))))]
+ "UINTVAL (operands[2]) < 32"
+{
+ operands[3] = GEN_INT (32 - UINTVAL (operands[2]));
+ return "ror\\t%w0, %w1, %3";
+}
+ [(set_attr "type" "shift_imm")]
+)
+
+(define_insn "*<ANY_EXTEND:optab><GPI:mode>_ashl<SHORT:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (ANY_EXTEND:GPI
+ (ashift:SHORT (match_operand:SHORT 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n"))))]
+ "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<SHORT:MODE>mode)"
+{
+ operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
+ return "<su>bfiz\t%<GPI:w>0, %<GPI:w>1, %2, %3";
+}
+ [(set_attr "type" "bfm")]
+)
+
+(define_insn "*zero_extend<GPI:mode>_lshr<SHORT:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (zero_extend:GPI
+ (lshiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n"))))]
+ "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<SHORT:MODE>mode)"
+{
+ operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
+ return "ubfx\t%<GPI:w>0, %<GPI:w>1, %2, %3";
+}
+ [(set_attr "type" "bfm")]
+)
+
+(define_insn "*extend<GPI:mode>_ashr<SHORT:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (sign_extend:GPI
+ (ashiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n"))))]
+ "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<SHORT:MODE>mode)"
+{
+ operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
+ return "sbfx\\t%<GPI:w>0, %<GPI:w>1, %2, %3";
+}
+ [(set_attr "type" "bfm")]
+)
+
+;; -------------------------------------------------------------------
+;; Bitfields
+;; -------------------------------------------------------------------
+
+(define_expand "<optab>"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ANY_EXTRACT:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n")
+ (match_operand 3 "const_int_operand" "n")))]
+ ""
+ ""
+)
+
+(define_insn "*<optab><mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (ANY_EXTRACT:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n")
+ (match_operand 3 "const_int_operand" "n")))]
+ ""
+ "<su>bfx\\t%<w>0, %<w>1, %3, %2"
+ [(set_attr "type" "bfm")]
+)
+
+;; Bitfield Insert (insv)
+(define_expand "insv<mode>"
+ [(set (zero_extract:GPI (match_operand:GPI 0 "register_operand")
+ (match_operand 1 "const_int_operand")
+ (match_operand 2 "const_int_operand"))
+ (match_operand:GPI 3 "general_operand"))]
+ ""
+{
+ unsigned HOST_WIDE_INT width = UINTVAL (operands[1]);
+ unsigned HOST_WIDE_INT pos = UINTVAL (operands[2]);
+ rtx value = operands[3];
+
+ if (width == 0 || (pos + width) > GET_MODE_BITSIZE (<MODE>mode))
+ FAIL;
+
+ if (CONST_INT_P (value))
+ {
+ unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
+
+ /* Prefer AND/OR for inserting all zeros or all ones. */
+ if ((UINTVAL (value) & mask) == 0
+ || (UINTVAL (value) & mask) == mask)
+ FAIL;
+
+ /* 16-bit aligned 16-bit wide insert is handled by insv_imm. */
+ if (width == 16 && (pos % 16) == 0)
+ DONE;
+ }
+ operands[3] = force_reg (<MODE>mode, value);
+})
+
+(define_insn "*insv_reg<mode>"
+ [(set (zero_extract:GPI (match_operand:GPI 0 "register_operand" "+r")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))
+ (match_operand:GPI 3 "register_operand" "r"))]
+ "!(UINTVAL (operands[1]) == 0
+ || (UINTVAL (operands[2]) + UINTVAL (operands[1])
+ > GET_MODE_BITSIZE (<MODE>mode)))"
+ "bfi\\t%<w>0, %<w>3, %2, %1"
+ [(set_attr "type" "bfm")]
+)
+
+(define_insn "*extr_insv_lower_reg<mode>"
+ [(set (zero_extract:GPI (match_operand:GPI 0 "register_operand" "+r")
+ (match_operand 1 "const_int_operand" "n")
+ (const_int 0))
+ (zero_extract:GPI (match_operand:GPI 2 "register_operand" "+r")
+ (match_dup 1)
+ (match_operand 3 "const_int_operand" "n")))]
+ "!(UINTVAL (operands[1]) == 0
+ || (UINTVAL (operands[3]) + UINTVAL (operands[1])
+ > GET_MODE_BITSIZE (<MODE>mode)))"
+ "bfxil\\t%<w>0, %<w>2, %3, %1"
+ [(set_attr "type" "bfm")]
+)
+
+(define_insn "*<optab><ALLX:mode>_shft_<GPI:mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (ashift:GPI (ANY_EXTEND:GPI
+ (match_operand:ALLX 1 "register_operand" "r"))
+ (match_operand 2 "const_int_operand" "n")))]
+ "UINTVAL (operands[2]) < <GPI:sizen>"
+{
+ operands[3] = (<ALLX:sizen> <= (<GPI:sizen> - UINTVAL (operands[2])))
+ ? GEN_INT (<ALLX:sizen>)
+ : GEN_INT (<GPI:sizen> - UINTVAL (operands[2]));
+ return "<su>bfiz\t%<GPI:w>0, %<GPI:w>1, %2, %3";
+}
+ [(set_attr "type" "bfm")]
+)
+
+;; XXX We should match (any_extend (ashift)) here, like (and (ashift)) below
+
+(define_insn "*andim_ashift<mode>_bfiz"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (and:GPI (ashift:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand 2 "const_int_operand" "n"))
+ (match_operand 3 "const_int_operand" "n")))]
+ "exact_log2 ((INTVAL (operands[3]) >> INTVAL (operands[2])) + 1) >= 0
+ && (INTVAL (operands[3]) & ((1 << INTVAL (operands[2])) - 1)) == 0"
+ "ubfiz\\t%<w>0, %<w>1, %2, %P3"
+ [(set_attr "type" "bfm")]
+)
+
+(define_insn "bswap<mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (bswap:GPI (match_operand:GPI 1 "register_operand" "r")))]
+ ""
+ "rev\\t%<w>0, %<w>1"
+ [(set_attr "type" "rev")]
+)
+
+(define_insn "bswaphi2"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (bswap:HI (match_operand:HI 1 "register_operand" "r")))]
+ ""
+ "rev16\\t%w0, %w1"
+ [(set_attr "type" "rev")]
+)
+
+;; zero_extend version of above
+(define_insn "*bswapsi2_uxtw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (bswap:SI (match_operand:SI 1 "register_operand" "r"))))]
+ ""
+ "rev\\t%w0, %w1"
+ [(set_attr "type" "rev")]
+)
+
+;; -------------------------------------------------------------------
+;; Floating-point intrinsics
+;; -------------------------------------------------------------------
+
+;; frint floating-point round to integral standard patterns.
+;; Expands to btrunc, ceil, floor, nearbyint, rint, round.
+
+(define_insn "<frint_pattern><mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+ FRINT))]
+ "TARGET_FLOAT"
+ "frint<frint_suffix>\\t%<s>0, %<s>1"
+ [(set_attr "type" "f_rint<s>")]
+)
+
+;; frcvt floating-point round to integer and convert standard patterns.
+;; Expands to lbtrunc, lceil, lfloor, lround.
+(define_insn "l<fcvt_pattern><su_optab><GPF:mode><GPI:mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (FIXUORS:GPI (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
+ FCVT)))]
+ "TARGET_FLOAT"
+ "fcvt<frint_suffix><su>\\t%<GPI:w>0, %<GPF:s>1"
+ [(set_attr "type" "f_cvtf2i")]
+)
+
+;; fma - no throw
+
+(define_insn "fma<mode>4"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (fma:GPF (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")
+ (match_operand:GPF 3 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
+ [(set_attr "type" "fmac<s>")]
+)
+
+(define_insn "fnma<mode>4"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (fma:GPF (neg:GPF (match_operand:GPF 1 "register_operand" "w"))
+ (match_operand:GPF 2 "register_operand" "w")
+ (match_operand:GPF 3 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fmsub\\t%<s>0, %<s>1, %<s>2, %<s>3"
+ [(set_attr "type" "fmac<s>")]
+)
+
+(define_insn "fms<mode>4"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (fma:GPF (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")
+ (neg:GPF (match_operand:GPF 3 "register_operand" "w"))))]
+ "TARGET_FLOAT"
+ "fnmsub\\t%<s>0, %<s>1, %<s>2, %<s>3"
+ [(set_attr "type" "fmac<s>")]
+)
+
+(define_insn "fnms<mode>4"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (fma:GPF (neg:GPF (match_operand:GPF 1 "register_operand" "w"))
+ (match_operand:GPF 2 "register_operand" "w")
+ (neg:GPF (match_operand:GPF 3 "register_operand" "w"))))]
+ "TARGET_FLOAT"
+ "fnmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
+ [(set_attr "type" "fmac<s>")]
+)
+
+;; If signed zeros are ignored, -(a * b + c) = -a * b - c.
+(define_insn "*fnmadd<mode>4"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (neg:GPF (fma:GPF (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")
+ (match_operand:GPF 3 "register_operand" "w"))))]
+ "!HONOR_SIGNED_ZEROS (<MODE>mode) && TARGET_FLOAT"
+ "fnmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
+ [(set_attr "type" "fmac<s>")]
+)
+
+;; -------------------------------------------------------------------
+;; Floating-point conversions
+;; -------------------------------------------------------------------
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=w")
+ (float_extend:DF (match_operand:SF 1 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fcvt\\t%d0, %s1"
+ [(set_attr "type" "f_cvt")]
+)
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=w")
+ (float_truncate:SF (match_operand:DF 1 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fcvt\\t%s0, %d1"
+ [(set_attr "type" "f_cvt")]
+)
+
+(define_insn "fix_trunc<GPF:mode><GPI:mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (fix:GPI (match_operand:GPF 1 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fcvtzs\\t%<GPI:w>0, %<GPF:s>1"
+ [(set_attr "type" "f_cvtf2i")]
+)
+
+(define_insn "fixuns_trunc<GPF:mode><GPI:mode>2"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (unsigned_fix:GPI (match_operand:GPF 1 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fcvtzu\\t%<GPI:w>0, %<GPF:s>1"
+ [(set_attr "type" "f_cvtf2i")]
+)
+
+(define_insn "float<GPI:mode><GPF:mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (float:GPF (match_operand:GPI 1 "register_operand" "r")))]
+ "TARGET_FLOAT"
+ "scvtf\\t%<GPF:s>0, %<GPI:w>1"
+ [(set_attr "type" "f_cvti2f")]
+)
+
+(define_insn "floatuns<GPI:mode><GPF:mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (unsigned_float:GPF (match_operand:GPI 1 "register_operand" "r")))]
+ "TARGET_FLOAT"
+ "ucvtf\\t%<GPF:s>0, %<GPI:w>1"
+ [(set_attr "type" "f_cvt")]
+)
+
+;; -------------------------------------------------------------------
+;; Floating-point arithmetic
+;; -------------------------------------------------------------------
+
+(define_insn "add<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (plus:GPF
+ (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fadd\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "type" "fadd<s>")]
+)
+
+(define_insn "sub<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (minus:GPF
+ (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fsub\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "type" "fadd<s>")]
+)
+
+(define_insn "mul<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (mult:GPF
+ (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fmul\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "type" "fmul<s>")]
+)
+
+(define_insn "*fnmul<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (mult:GPF
+ (neg:GPF (match_operand:GPF 1 "register_operand" "w"))
+ (match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fnmul\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "type" "fmul<s>")]
+)
+
+(define_insn "div<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (div:GPF
+ (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fdiv\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "type" "fdiv<s>")]
+)
+
+(define_insn "neg<mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (neg:GPF (match_operand:GPF 1 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fneg\\t%<s>0, %<s>1"
+ [(set_attr "type" "ffarith<s>")]
+)
+
+(define_insn "sqrt<mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (sqrt:GPF (match_operand:GPF 1 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fsqrt\\t%<s>0, %<s>1"
+ [(set_attr "type" "fsqrt<s>")]
+)
+
+(define_insn "abs<mode>2"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (abs:GPF (match_operand:GPF 1 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fabs\\t%<s>0, %<s>1"
+ [(set_attr "type" "ffarith<s>")]
+)
+
+;; Given that smax/smin do not specify the result when either input is NaN,
+;; we could use either FMAXNM or FMAX for smax, and either FMINNM or FMIN
+;; for smin.
+
+(define_insn "smax<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (smax:GPF (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fmaxnm\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "type" "f_minmax<s>")]
+)
+
+(define_insn "smin<mode>3"
+ [(set (match_operand:GPF 0 "register_operand" "=w")
+ (smin:GPF (match_operand:GPF 1 "register_operand" "w")
+ (match_operand:GPF 2 "register_operand" "w")))]
+ "TARGET_FLOAT"
+ "fminnm\\t%<s>0, %<s>1, %<s>2"
+ [(set_attr "type" "f_minmax<s>")]
+)
+
+;; -------------------------------------------------------------------
+;; Reload support
+;; -------------------------------------------------------------------
+
+(define_expand "aarch64_reload_mov<mode>"
+ [(set (match_operand:TX 0 "register_operand" "=w")
+ (match_operand:TX 1 "register_operand" "w"))
+ (clobber (match_operand:DI 2 "register_operand" "=&r"))
+ ]
+ ""
+ {
+ rtx op0 = simplify_gen_subreg (TImode, operands[0], <MODE>mode, 0);
+ rtx op1 = simplify_gen_subreg (TImode, operands[1], <MODE>mode, 0);
+ gen_aarch64_movtilow_tilow (op0, op1);
+ gen_aarch64_movdi_tihigh (operands[2], op1);
+ gen_aarch64_movtihigh_di (op0, operands[2]);
+ DONE;
+ }
+)
+
+;; The following secondary reload helpers patterns are invoked
+;; after or during reload as we don't want these patterns to start
+;; kicking in during the combiner.
+
+(define_insn "aarch64_movdi_<mode>low"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI (match_operand:TX 1 "register_operand" "w")))]
+ "reload_completed || reload_in_progress"
+ "fmov\\t%x0, %d1"
+ [(set_attr "type" "f_mrc")
+ (set_attr "length" "4")
+ ])
+
+(define_insn "aarch64_movdi_<mode>high"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TX (match_operand:TX 1 "register_operand" "w")
+ (const_int 64))))]
+ "reload_completed || reload_in_progress"
+ "fmov\\t%x0, %1.d[1]"
+ [(set_attr "type" "f_mrc")
+ (set_attr "length" "4")
+ ])
+
+(define_insn "aarch64_mov<mode>high_di"
+ [(set (zero_extract:TX (match_operand:TX 0 "register_operand" "+w")
+ (const_int 64) (const_int 64))
+ (zero_extend:TX (match_operand:DI 1 "register_operand" "r")))]
+ "reload_completed || reload_in_progress"
+ "fmov\\t%0.d[1], %x1"
+ [(set_attr "type" "f_mcr")
+ (set_attr "length" "4")
+ ])
+
+(define_insn "aarch64_mov<mode>low_di"
+ [(set (match_operand:TX 0 "register_operand" "=w")
+ (zero_extend:TX (match_operand:DI 1 "register_operand" "r")))]
+ "reload_completed || reload_in_progress"
+ "fmov\\t%d0, %x1"
+ [(set_attr "type" "f_mcr")
+ (set_attr "length" "4")
+ ])
+
+(define_insn "aarch64_movtilow_tilow"
+ [(set (match_operand:TI 0 "register_operand" "=w")
+ (zero_extend:TI
+ (truncate:DI (match_operand:TI 1 "register_operand" "w"))))]
+ "reload_completed || reload_in_progress"
+ "fmov\\t%d0, %d1"
+ [(set_attr "type" "f_mcr")
+ (set_attr "length" "4")
+ ])
+
+;; There is a deliberate reason why the parameters of high and lo_sum's
+;; don't have modes for ADRP and ADD instructions. This is to allow high
+;; and lo_sum's to be used with the labels defining the jump tables in
+;; rodata section.
+
+(define_expand "add_losym"
+ [(set (match_operand 0 "register_operand" "=r")
+ (lo_sum (match_operand 1 "register_operand" "r")
+ (match_operand 2 "aarch64_valid_symref" "S")))]
+ ""
+{
+ enum machine_mode mode = GET_MODE (operands[0]);
+
+ emit_insn ((mode == DImode
+ ? gen_add_losym_di
+ : gen_add_losym_si) (operands[0],
+ operands[1],
+ operands[2]));
+ DONE;
+})
+
+(define_insn "add_losym_<mode>"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (lo_sum:P (match_operand:P 1 "register_operand" "r")
+ (match_operand 2 "aarch64_valid_symref" "S")))]
+ ""
+ "add\\t%<w>0, %<w>1, :lo12:%a2"
+ [(set_attr "type" "alu_reg")]
+)
+
+(define_insn "ldr_got_small_<mode>"
+ [(set (match_operand:PTR 0 "register_operand" "=r")
+ (unspec:PTR [(mem:PTR (lo_sum:PTR
+ (match_operand:PTR 1 "register_operand" "r")
+ (match_operand:PTR 2 "aarch64_valid_symref" "S")))]
+ UNSPEC_GOTSMALLPIC))]
+ ""
+ "ldr\\t%<w>0, [%1, #:got_lo12:%a2]"
+ [(set_attr "type" "load1")]
+)
+
+(define_insn "ldr_got_small_sidi"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI
+ (unspec:SI [(mem:SI (lo_sum:DI
+ (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "aarch64_valid_symref" "S")))]
+ UNSPEC_GOTSMALLPIC)))]
+ "TARGET_ILP32"
+ "ldr\\t%w0, [%1, #:got_lo12:%a2]"
+ [(set_attr "type" "load1")]
+)
+
+(define_insn "ldr_got_tiny"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "aarch64_valid_symref" "S")]
+ UNSPEC_GOTTINYPIC))]
+ ""
+ "ldr\\t%0, %L1"
+ [(set_attr "type" "load1")]
+)
+
+(define_insn "aarch64_load_tp_hard"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(const_int 0)] UNSPEC_TLS))]
+ ""
+ "mrs\\t%0, tpidr_el0"
+ [(set_attr "type" "mrs")]
+)
+
+;; The TLS ABI specifically requires that the compiler does not schedule
+;; instructions in the TLS stubs, in order to enable linker relaxation.
+;; Therefore we treat the stubs as an atomic sequence.
+(define_expand "tlsgd_small"
+ [(parallel [(set (match_operand 0 "register_operand" "")
+ (call (mem:DI (match_dup 2)) (const_int 1)))
+ (unspec:DI [(match_operand:DI 1 "aarch64_valid_symref" "")] UNSPEC_GOTSMALLTLS)
+ (clobber (reg:DI LR_REGNUM))])]
+ ""
+{
+ operands[2] = aarch64_tls_get_addr ();
+})
+
+(define_insn "*tlsgd_small"
+ [(set (match_operand 0 "register_operand" "")
+ (call (mem:DI (match_operand:DI 2 "" "")) (const_int 1)))
+ (unspec:DI [(match_operand:DI 1 "aarch64_valid_symref" "S")] UNSPEC_GOTSMALLTLS)
+ (clobber (reg:DI LR_REGNUM))
+ ]
+ ""
+ "adrp\\tx0, %A1\;add\\tx0, x0, %L1\;bl\\t%2\;nop"
+ [(set_attr "type" "call")
+ (set_attr "length" "16")])
+
+(define_insn "tlsie_small"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "aarch64_tls_ie_symref" "S")]
+ UNSPEC_GOTSMALLTLS))]
+ ""
+ "adrp\\t%0, %A1\;ldr\\t%0, [%0, #%L1]"
+ [(set_attr "type" "load1")
+ (set_attr "length" "8")]
+)
+
+(define_insn "tlsle_small"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "aarch64_tls_le_symref" "S")]
+ UNSPEC_GOTSMALLTLS))]
+ ""
+ "add\\t%0, %1, #%G2\;add\\t%0, %0, #%L2"
+ [(set_attr "type" "alu_reg")
+ (set_attr "length" "8")]
+)
+
+(define_insn "tlsdesc_small"
+ [(set (reg:DI R0_REGNUM)
+ (unspec:DI [(match_operand:DI 0 "aarch64_valid_symref" "S")]
+ UNSPEC_TLSDESC))
+ (clobber (reg:DI LR_REGNUM))
+ (clobber (match_scratch:DI 1 "=r"))]
+ "TARGET_TLS_DESC"
+ "adrp\\tx0, %A0\;ldr\\t%1, [x0, #%L0]\;add\\tx0, x0, %L0\;.tlsdesccall\\t%0\;blr\\t%1"
+ [(set_attr "type" "call")
+ (set_attr "length" "16")])
+
+(define_insn "stack_tie"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK [(match_operand:DI 0 "register_operand" "rk")
+ (match_operand:DI 1 "register_operand" "rk")]
+ UNSPEC_PRLG_STK))]
+ ""
+ ""
+ [(set_attr "length" "0")]
+)
+
+;; Named pattern for expanding thread pointer reference.
+(define_expand "get_thread_pointerdi"
+ [(match_operand:DI 0 "register_operand" "=r")]
+ ""
+{
+ rtx tmp = aarch64_load_tp (operands[0]);
+ if (tmp != operands[0])
+ emit_move_insn (operands[0], tmp);
+ DONE;
+})
+
+;; AdvSIMD Stuff
+(include "aarch64-simd.md")
+
+;; Atomic Operations
+(include "atomics.md")
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64.opt b/gcc-4.9/gcc/config/aarch64/aarch64.opt
new file mode 100644
index 000000000..f5a15b729
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/aarch64.opt
@@ -0,0 +1,118 @@
+; Machine description for AArch64 architecture.
+; Copyright (C) 2009-2014 Free Software Foundation, Inc.
+; Contributed by ARM Ltd.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it
+; under the terms of the GNU General Public License as published by
+; the Free Software Foundation; either version 3, or (at your option)
+; any later version.
+;
+; GCC is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+; General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+HeaderInclude
+config/aarch64/aarch64-opts.h
+
+; The TLS dialect names to use with -mtls-dialect.
+
+Enum
+Name(tls_type) Type(enum aarch64_tls_type)
+The possible TLS dialects:
+
+EnumValue
+Enum(tls_type) String(trad) Value(TLS_TRADITIONAL)
+
+EnumValue
+Enum(tls_type) String(desc) Value(TLS_DESCRIPTORS)
+
+; The code model option names for -mcmodel.
+
+Enum
+Name(cmodel) Type(enum aarch64_code_model)
+The code model option names for -mcmodel:
+
+EnumValue
+Enum(cmodel) String(tiny) Value(AARCH64_CMODEL_TINY)
+
+EnumValue
+Enum(cmodel) String(small) Value(AARCH64_CMODEL_SMALL)
+
+EnumValue
+Enum(cmodel) String(large) Value(AARCH64_CMODEL_LARGE)
+
+; The cpu/arch option names to use in cpu/arch selection.
+
+Variable
+const char *aarch64_arch_string
+
+Variable
+const char *aarch64_cpu_string
+
+Variable
+const char *aarch64_tune_string
+
+mbig-endian
+Target Report RejectNegative Mask(BIG_END)
+Assume target CPU is configured as big endian
+
+mgeneral-regs-only
+Target Report RejectNegative Mask(GENERAL_REGS_ONLY)
+Generate code which uses only the general registers
+
+mlittle-endian
+Target Report RejectNegative InverseMask(BIG_END)
+Assume target CPU is configured as little endian
+
+mcmodel=
+Target RejectNegative Joined Enum(cmodel) Var(aarch64_cmodel_var) Init(AARCH64_CMODEL_SMALL)
+Specify the code model
+
+mstrict-align
+Target Report RejectNegative Mask(STRICT_ALIGN)
+Don't assume that unaligned accesses are handled by the system
+
+momit-leaf-frame-pointer
+Target Report Save Var(flag_omit_leaf_frame_pointer) Init(1)
+Omit the frame pointer in leaf functions
+
+mtls-dialect=
+Target RejectNegative Joined Enum(tls_type) Var(aarch64_tls_dialect) Init(TLS_DESCRIPTORS)
+Specify TLS dialect
+
+march=
+Target RejectNegative ToLower Joined Var(aarch64_arch_string)
+-march=ARCH Use features of architecture ARCH
+
+mcpu=
+Target RejectNegative ToLower Joined Var(aarch64_cpu_string)
+-mcpu=CPU Use features of and optimize for CPU
+
+mtune=
+Target RejectNegative ToLower Joined Var(aarch64_tune_string)
+-mtune=CPU Optimize for CPU
+
+mabi=
+Target RejectNegative Joined Enum(aarch64_abi) Var(aarch64_abi) Init(AARCH64_ABI_DEFAULT)
+-mabi=ABI Generate code that conforms to the specified ABI
+
+mlra
+Target Report Var(aarch64_lra_flag) Init(1) Save
+Use LRA instead of reload (transitional)
+
+Enum
+Name(aarch64_abi) Type(int)
+Known AArch64 ABIs (for use with the -mabi= option):
+
+EnumValue
+Enum(aarch64_abi) String(ilp32) Value(AARCH64_ABI_ILP32)
+
+EnumValue
+Enum(aarch64_abi) String(lp64) Value(AARCH64_ABI_LP64)
diff --git a/gcc-4.9/gcc/config/aarch64/arm_neon.h b/gcc-4.9/gcc/config/aarch64/arm_neon.h
new file mode 100644
index 000000000..747a292ba
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/arm_neon.h
@@ -0,0 +1,25403 @@
+/* ARM NEON intrinsics include file.
+
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _AARCH64_NEON_H_
+#define _AARCH64_NEON_H_
+
+#include <stdint.h>
+
+#define __AARCH64_UINT64_C(__C) ((uint64_t) __C)
+#define __AARCH64_INT64_C(__C) ((int64_t) __C)
+
+typedef __builtin_aarch64_simd_qi int8x8_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_hi int16x4_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_si int32x2_t
+ __attribute__ ((__vector_size__ (8)));
+typedef int64_t int64x1_t;
+typedef int32_t int32x1_t;
+typedef int16_t int16x1_t;
+typedef int8_t int8x1_t;
+typedef double float64x1_t;
+typedef __builtin_aarch64_simd_sf float32x2_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_poly8 poly8x8_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_poly16 poly16x4_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_uqi uint8x8_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_uhi uint16x4_t
+ __attribute__ ((__vector_size__ (8)));
+typedef __builtin_aarch64_simd_usi uint32x2_t
+ __attribute__ ((__vector_size__ (8)));
+typedef uint64_t uint64x1_t;
+typedef uint32_t uint32x1_t;
+typedef uint16_t uint16x1_t;
+typedef uint8_t uint8x1_t;
+typedef __builtin_aarch64_simd_qi int8x16_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_hi int16x8_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_si int32x4_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_di int64x2_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_sf float32x4_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_df float64x2_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_poly8 poly8x16_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_poly16 poly16x8_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_poly64 poly64x2_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_uqi uint8x16_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_uhi uint16x8_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_usi uint32x4_t
+ __attribute__ ((__vector_size__ (16)));
+typedef __builtin_aarch64_simd_udi uint64x2_t
+ __attribute__ ((__vector_size__ (16)));
+
+typedef float float32_t;
+typedef double float64_t;
+typedef __builtin_aarch64_simd_poly8 poly8_t;
+typedef __builtin_aarch64_simd_poly16 poly16_t;
+typedef __builtin_aarch64_simd_poly64 poly64_t;
+typedef __builtin_aarch64_simd_poly128 poly128_t;
+
+typedef struct int8x8x2_t
+{
+ int8x8_t val[2];
+} int8x8x2_t;
+
+typedef struct int8x16x2_t
+{
+ int8x16_t val[2];
+} int8x16x2_t;
+
+typedef struct int16x4x2_t
+{
+ int16x4_t val[2];
+} int16x4x2_t;
+
+typedef struct int16x8x2_t
+{
+ int16x8_t val[2];
+} int16x8x2_t;
+
+typedef struct int32x2x2_t
+{
+ int32x2_t val[2];
+} int32x2x2_t;
+
+typedef struct int32x4x2_t
+{
+ int32x4_t val[2];
+} int32x4x2_t;
+
+typedef struct int64x1x2_t
+{
+ int64x1_t val[2];
+} int64x1x2_t;
+
+typedef struct int64x2x2_t
+{
+ int64x2_t val[2];
+} int64x2x2_t;
+
+typedef struct uint8x8x2_t
+{
+ uint8x8_t val[2];
+} uint8x8x2_t;
+
+typedef struct uint8x16x2_t
+{
+ uint8x16_t val[2];
+} uint8x16x2_t;
+
+typedef struct uint16x4x2_t
+{
+ uint16x4_t val[2];
+} uint16x4x2_t;
+
+typedef struct uint16x8x2_t
+{
+ uint16x8_t val[2];
+} uint16x8x2_t;
+
+typedef struct uint32x2x2_t
+{
+ uint32x2_t val[2];
+} uint32x2x2_t;
+
+typedef struct uint32x4x2_t
+{
+ uint32x4_t val[2];
+} uint32x4x2_t;
+
+typedef struct uint64x1x2_t
+{
+ uint64x1_t val[2];
+} uint64x1x2_t;
+
+typedef struct uint64x2x2_t
+{
+ uint64x2_t val[2];
+} uint64x2x2_t;
+
+typedef struct float32x2x2_t
+{
+ float32x2_t val[2];
+} float32x2x2_t;
+
+typedef struct float32x4x2_t
+{
+ float32x4_t val[2];
+} float32x4x2_t;
+
+typedef struct float64x2x2_t
+{
+ float64x2_t val[2];
+} float64x2x2_t;
+
+typedef struct float64x1x2_t
+{
+ float64x1_t val[2];
+} float64x1x2_t;
+
+typedef struct poly8x8x2_t
+{
+ poly8x8_t val[2];
+} poly8x8x2_t;
+
+typedef struct poly8x16x2_t
+{
+ poly8x16_t val[2];
+} poly8x16x2_t;
+
+typedef struct poly16x4x2_t
+{
+ poly16x4_t val[2];
+} poly16x4x2_t;
+
+typedef struct poly16x8x2_t
+{
+ poly16x8_t val[2];
+} poly16x8x2_t;
+
+typedef struct int8x8x3_t
+{
+ int8x8_t val[3];
+} int8x8x3_t;
+
+typedef struct int8x16x3_t
+{
+ int8x16_t val[3];
+} int8x16x3_t;
+
+typedef struct int16x4x3_t
+{
+ int16x4_t val[3];
+} int16x4x3_t;
+
+typedef struct int16x8x3_t
+{
+ int16x8_t val[3];
+} int16x8x3_t;
+
+typedef struct int32x2x3_t
+{
+ int32x2_t val[3];
+} int32x2x3_t;
+
+typedef struct int32x4x3_t
+{
+ int32x4_t val[3];
+} int32x4x3_t;
+
+typedef struct int64x1x3_t
+{
+ int64x1_t val[3];
+} int64x1x3_t;
+
+typedef struct int64x2x3_t
+{
+ int64x2_t val[3];
+} int64x2x3_t;
+
+typedef struct uint8x8x3_t
+{
+ uint8x8_t val[3];
+} uint8x8x3_t;
+
+typedef struct uint8x16x3_t
+{
+ uint8x16_t val[3];
+} uint8x16x3_t;
+
+typedef struct uint16x4x3_t
+{
+ uint16x4_t val[3];
+} uint16x4x3_t;
+
+typedef struct uint16x8x3_t
+{
+ uint16x8_t val[3];
+} uint16x8x3_t;
+
+typedef struct uint32x2x3_t
+{
+ uint32x2_t val[3];
+} uint32x2x3_t;
+
+typedef struct uint32x4x3_t
+{
+ uint32x4_t val[3];
+} uint32x4x3_t;
+
+typedef struct uint64x1x3_t
+{
+ uint64x1_t val[3];
+} uint64x1x3_t;
+
+typedef struct uint64x2x3_t
+{
+ uint64x2_t val[3];
+} uint64x2x3_t;
+
+typedef struct float32x2x3_t
+{
+ float32x2_t val[3];
+} float32x2x3_t;
+
+typedef struct float32x4x3_t
+{
+ float32x4_t val[3];
+} float32x4x3_t;
+
+typedef struct float64x2x3_t
+{
+ float64x2_t val[3];
+} float64x2x3_t;
+
+typedef struct float64x1x3_t
+{
+ float64x1_t val[3];
+} float64x1x3_t;
+
+typedef struct poly8x8x3_t
+{
+ poly8x8_t val[3];
+} poly8x8x3_t;
+
+typedef struct poly8x16x3_t
+{
+ poly8x16_t val[3];
+} poly8x16x3_t;
+
+typedef struct poly16x4x3_t
+{
+ poly16x4_t val[3];
+} poly16x4x3_t;
+
+typedef struct poly16x8x3_t
+{
+ poly16x8_t val[3];
+} poly16x8x3_t;
+
+typedef struct int8x8x4_t
+{
+ int8x8_t val[4];
+} int8x8x4_t;
+
+typedef struct int8x16x4_t
+{
+ int8x16_t val[4];
+} int8x16x4_t;
+
+typedef struct int16x4x4_t
+{
+ int16x4_t val[4];
+} int16x4x4_t;
+
+typedef struct int16x8x4_t
+{
+ int16x8_t val[4];
+} int16x8x4_t;
+
+typedef struct int32x2x4_t
+{
+ int32x2_t val[4];
+} int32x2x4_t;
+
+typedef struct int32x4x4_t
+{
+ int32x4_t val[4];
+} int32x4x4_t;
+
+typedef struct int64x1x4_t
+{
+ int64x1_t val[4];
+} int64x1x4_t;
+
+typedef struct int64x2x4_t
+{
+ int64x2_t val[4];
+} int64x2x4_t;
+
+typedef struct uint8x8x4_t
+{
+ uint8x8_t val[4];
+} uint8x8x4_t;
+
+typedef struct uint8x16x4_t
+{
+ uint8x16_t val[4];
+} uint8x16x4_t;
+
+typedef struct uint16x4x4_t
+{
+ uint16x4_t val[4];
+} uint16x4x4_t;
+
+typedef struct uint16x8x4_t
+{
+ uint16x8_t val[4];
+} uint16x8x4_t;
+
+typedef struct uint32x2x4_t
+{
+ uint32x2_t val[4];
+} uint32x2x4_t;
+
+typedef struct uint32x4x4_t
+{
+ uint32x4_t val[4];
+} uint32x4x4_t;
+
+typedef struct uint64x1x4_t
+{
+ uint64x1_t val[4];
+} uint64x1x4_t;
+
+typedef struct uint64x2x4_t
+{
+ uint64x2_t val[4];
+} uint64x2x4_t;
+
+typedef struct float32x2x4_t
+{
+ float32x2_t val[4];
+} float32x2x4_t;
+
+typedef struct float32x4x4_t
+{
+ float32x4_t val[4];
+} float32x4x4_t;
+
+typedef struct float64x2x4_t
+{
+ float64x2_t val[4];
+} float64x2x4_t;
+
+typedef struct float64x1x4_t
+{
+ float64x1_t val[4];
+} float64x1x4_t;
+
+typedef struct poly8x8x4_t
+{
+ poly8x8_t val[4];
+} poly8x8x4_t;
+
+typedef struct poly8x16x4_t
+{
+ poly8x16_t val[4];
+} poly8x16x4_t;
+
+typedef struct poly16x4x4_t
+{
+ poly16x4_t val[4];
+} poly16x4x4_t;
+
+typedef struct poly16x8x4_t
+{
+ poly16x8_t val[4];
+} poly16x8x4_t;
+
+/* vget_lane internal macros. */
+
+#define __aarch64_vget_lane_any(__size, __cast_ret, __cast_a, __a, __b) \
+ (__cast_ret \
+ __builtin_aarch64_be_checked_get_lane##__size (__cast_a __a, __b))
+
+#define __aarch64_vget_lane_f32(__a, __b) \
+ __aarch64_vget_lane_any (v2sf, , , __a, __b)
+#define __aarch64_vget_lane_f64(__a, __b) (__a)
+
+#define __aarch64_vget_lane_p8(__a, __b) \
+ __aarch64_vget_lane_any (v8qi, (poly8_t), (int8x8_t), __a, __b)
+#define __aarch64_vget_lane_p16(__a, __b) \
+ __aarch64_vget_lane_any (v4hi, (poly16_t), (int16x4_t), __a, __b)
+
+#define __aarch64_vget_lane_s8(__a, __b) \
+ __aarch64_vget_lane_any (v8qi, , ,__a, __b)
+#define __aarch64_vget_lane_s16(__a, __b) \
+ __aarch64_vget_lane_any (v4hi, , ,__a, __b)
+#define __aarch64_vget_lane_s32(__a, __b) \
+ __aarch64_vget_lane_any (v2si, , ,__a, __b)
+#define __aarch64_vget_lane_s64(__a, __b) (__a)
+
+#define __aarch64_vget_lane_u8(__a, __b) \
+ __aarch64_vget_lane_any (v8qi, (uint8_t), (int8x8_t), __a, __b)
+#define __aarch64_vget_lane_u16(__a, __b) \
+ __aarch64_vget_lane_any (v4hi, (uint16_t), (int16x4_t), __a, __b)
+#define __aarch64_vget_lane_u32(__a, __b) \
+ __aarch64_vget_lane_any (v2si, (uint32_t), (int32x2_t), __a, __b)
+#define __aarch64_vget_lane_u64(__a, __b) (__a)
+
+#define __aarch64_vgetq_lane_f32(__a, __b) \
+ __aarch64_vget_lane_any (v4sf, , , __a, __b)
+#define __aarch64_vgetq_lane_f64(__a, __b) \
+ __aarch64_vget_lane_any (v2df, , , __a, __b)
+
+#define __aarch64_vgetq_lane_p8(__a, __b) \
+ __aarch64_vget_lane_any (v16qi, (poly8_t), (int8x16_t), __a, __b)
+#define __aarch64_vgetq_lane_p16(__a, __b) \
+ __aarch64_vget_lane_any (v8hi, (poly16_t), (int16x8_t), __a, __b)
+
+#define __aarch64_vgetq_lane_s8(__a, __b) \
+ __aarch64_vget_lane_any (v16qi, , ,__a, __b)
+#define __aarch64_vgetq_lane_s16(__a, __b) \
+ __aarch64_vget_lane_any (v8hi, , ,__a, __b)
+#define __aarch64_vgetq_lane_s32(__a, __b) \
+ __aarch64_vget_lane_any (v4si, , ,__a, __b)
+#define __aarch64_vgetq_lane_s64(__a, __b) \
+ __aarch64_vget_lane_any (v2di, , ,__a, __b)
+
+#define __aarch64_vgetq_lane_u8(__a, __b) \
+ __aarch64_vget_lane_any (v16qi, (uint8_t), (int8x16_t), __a, __b)
+#define __aarch64_vgetq_lane_u16(__a, __b) \
+ __aarch64_vget_lane_any (v8hi, (uint16_t), (int16x8_t), __a, __b)
+#define __aarch64_vgetq_lane_u32(__a, __b) \
+ __aarch64_vget_lane_any (v4si, (uint32_t), (int32x4_t), __a, __b)
+#define __aarch64_vgetq_lane_u64(__a, __b) \
+ __aarch64_vget_lane_any (v2di, (uint64_t), (int64x2_t), __a, __b)
+
+/* __aarch64_vdup_lane internal macros. */
+#define __aarch64_vdup_lane_any(__size, __q1, __q2, __a, __b) \
+ vdup##__q1##_n_##__size (__aarch64_vget##__q2##_lane_##__size (__a, __b))
+
+#define __aarch64_vdup_lane_f32(__a, __b) \
+ __aarch64_vdup_lane_any (f32, , , __a, __b)
+#define __aarch64_vdup_lane_f64(__a, __b) (__a)
+#define __aarch64_vdup_lane_p8(__a, __b) \
+ __aarch64_vdup_lane_any (p8, , , __a, __b)
+#define __aarch64_vdup_lane_p16(__a, __b) \
+ __aarch64_vdup_lane_any (p16, , , __a, __b)
+#define __aarch64_vdup_lane_s8(__a, __b) \
+ __aarch64_vdup_lane_any (s8, , , __a, __b)
+#define __aarch64_vdup_lane_s16(__a, __b) \
+ __aarch64_vdup_lane_any (s16, , , __a, __b)
+#define __aarch64_vdup_lane_s32(__a, __b) \
+ __aarch64_vdup_lane_any (s32, , , __a, __b)
+#define __aarch64_vdup_lane_s64(__a, __b) (__a)
+#define __aarch64_vdup_lane_u8(__a, __b) \
+ __aarch64_vdup_lane_any (u8, , , __a, __b)
+#define __aarch64_vdup_lane_u16(__a, __b) \
+ __aarch64_vdup_lane_any (u16, , , __a, __b)
+#define __aarch64_vdup_lane_u32(__a, __b) \
+ __aarch64_vdup_lane_any (u32, , , __a, __b)
+#define __aarch64_vdup_lane_u64(__a, __b) (__a)
+
+/* __aarch64_vdup_laneq internal macros. */
+#define __aarch64_vdup_laneq_f32(__a, __b) \
+ __aarch64_vdup_lane_any (f32, , q, __a, __b)
+#define __aarch64_vdup_laneq_f64(__a, __b) \
+ __aarch64_vdup_lane_any (f64, , q, __a, __b)
+#define __aarch64_vdup_laneq_p8(__a, __b) \
+ __aarch64_vdup_lane_any (p8, , q, __a, __b)
+#define __aarch64_vdup_laneq_p16(__a, __b) \
+ __aarch64_vdup_lane_any (p16, , q, __a, __b)
+#define __aarch64_vdup_laneq_s8(__a, __b) \
+ __aarch64_vdup_lane_any (s8, , q, __a, __b)
+#define __aarch64_vdup_laneq_s16(__a, __b) \
+ __aarch64_vdup_lane_any (s16, , q, __a, __b)
+#define __aarch64_vdup_laneq_s32(__a, __b) \
+ __aarch64_vdup_lane_any (s32, , q, __a, __b)
+#define __aarch64_vdup_laneq_s64(__a, __b) \
+ __aarch64_vdup_lane_any (s64, , q, __a, __b)
+#define __aarch64_vdup_laneq_u8(__a, __b) \
+ __aarch64_vdup_lane_any (u8, , q, __a, __b)
+#define __aarch64_vdup_laneq_u16(__a, __b) \
+ __aarch64_vdup_lane_any (u16, , q, __a, __b)
+#define __aarch64_vdup_laneq_u32(__a, __b) \
+ __aarch64_vdup_lane_any (u32, , q, __a, __b)
+#define __aarch64_vdup_laneq_u64(__a, __b) \
+ __aarch64_vdup_lane_any (u64, , q, __a, __b)
+
+/* __aarch64_vdupq_lane internal macros. */
+#define __aarch64_vdupq_lane_f32(__a, __b) \
+ __aarch64_vdup_lane_any (f32, q, , __a, __b)
+#define __aarch64_vdupq_lane_f64(__a, __b) (vdupq_n_f64 (__a))
+#define __aarch64_vdupq_lane_p8(__a, __b) \
+ __aarch64_vdup_lane_any (p8, q, , __a, __b)
+#define __aarch64_vdupq_lane_p16(__a, __b) \
+ __aarch64_vdup_lane_any (p16, q, , __a, __b)
+#define __aarch64_vdupq_lane_s8(__a, __b) \
+ __aarch64_vdup_lane_any (s8, q, , __a, __b)
+#define __aarch64_vdupq_lane_s16(__a, __b) \
+ __aarch64_vdup_lane_any (s16, q, , __a, __b)
+#define __aarch64_vdupq_lane_s32(__a, __b) \
+ __aarch64_vdup_lane_any (s32, q, , __a, __b)
+#define __aarch64_vdupq_lane_s64(__a, __b) (vdupq_n_s64 (__a))
+#define __aarch64_vdupq_lane_u8(__a, __b) \
+ __aarch64_vdup_lane_any (u8, q, , __a, __b)
+#define __aarch64_vdupq_lane_u16(__a, __b) \
+ __aarch64_vdup_lane_any (u16, q, , __a, __b)
+#define __aarch64_vdupq_lane_u32(__a, __b) \
+ __aarch64_vdup_lane_any (u32, q, , __a, __b)
+#define __aarch64_vdupq_lane_u64(__a, __b) (vdupq_n_u64 (__a))
+
+/* __aarch64_vdupq_laneq internal macros. */
+#define __aarch64_vdupq_laneq_f32(__a, __b) \
+ __aarch64_vdup_lane_any (f32, q, q, __a, __b)
+#define __aarch64_vdupq_laneq_f64(__a, __b) \
+ __aarch64_vdup_lane_any (f64, q, q, __a, __b)
+#define __aarch64_vdupq_laneq_p8(__a, __b) \
+ __aarch64_vdup_lane_any (p8, q, q, __a, __b)
+#define __aarch64_vdupq_laneq_p16(__a, __b) \
+ __aarch64_vdup_lane_any (p16, q, q, __a, __b)
+#define __aarch64_vdupq_laneq_s8(__a, __b) \
+ __aarch64_vdup_lane_any (s8, q, q, __a, __b)
+#define __aarch64_vdupq_laneq_s16(__a, __b) \
+ __aarch64_vdup_lane_any (s16, q, q, __a, __b)
+#define __aarch64_vdupq_laneq_s32(__a, __b) \
+ __aarch64_vdup_lane_any (s32, q, q, __a, __b)
+#define __aarch64_vdupq_laneq_s64(__a, __b) \
+ __aarch64_vdup_lane_any (s64, q, q, __a, __b)
+#define __aarch64_vdupq_laneq_u8(__a, __b) \
+ __aarch64_vdup_lane_any (u8, q, q, __a, __b)
+#define __aarch64_vdupq_laneq_u16(__a, __b) \
+ __aarch64_vdup_lane_any (u16, q, q, __a, __b)
+#define __aarch64_vdupq_laneq_u32(__a, __b) \
+ __aarch64_vdup_lane_any (u32, q, q, __a, __b)
+#define __aarch64_vdupq_laneq_u64(__a, __b) \
+ __aarch64_vdup_lane_any (u64, q, q, __a, __b)
+
+/* vadd */
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vadd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vadd_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vadd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vadd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vaddq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_saddlv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_saddlv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_saddlv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uaddlv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uaddlv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uaddlv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddl_high_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_saddl2v16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddl_high_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_saddl2v8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddl_high_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_saddl2v4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddl_high_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uaddl2v16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddl_high_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uaddl2v8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddl_high_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uaddl2v4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddw_s8 (int16x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_saddwv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddw_s16 (int32x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_saddwv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddw_s32 (int64x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_saddwv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddw_u8 (uint16x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uaddwv8qi ((int16x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddw_u16 (uint32x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uaddwv4hi ((int32x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddw_u32 (uint64x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uaddwv2si ((int64x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddw_high_s8 (int16x8_t __a, int8x16_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_saddw2v16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddw_high_s16 (int32x4_t __a, int16x8_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_saddw2v8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddw_high_s32 (int64x2_t __a, int32x4_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_saddw2v4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddw_high_u8 (uint16x8_t __a, uint8x16_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uaddw2v16qi ((int16x8_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddw_high_u16 (uint32x4_t __a, uint16x8_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uaddw2v8hi ((int32x4_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddw_high_u32 (uint64x2_t __a, uint32x4_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uaddw2v4si ((int64x2_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vhadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_shaddv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vhadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_shaddv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vhadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_shaddv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vhadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uhaddv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vhadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uhaddv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vhadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uhaddv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_shaddv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_shaddv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_shaddv4si (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_uhaddv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uhaddv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uhaddv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrhadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_srhaddv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrhadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_srhaddv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrhadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_srhaddv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrhadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_urhaddv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrhadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_urhaddv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrhadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_urhaddv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_srhaddv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_srhaddv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_srhaddv4si (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_urhaddv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_urhaddv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_urhaddv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vaddhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_addhnv8hi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vaddhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_addhnv4si (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vaddhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_addhnv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vaddhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_addhnv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vaddhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_addhnv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vaddhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_addhnv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vraddhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_raddhnv8hi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vraddhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_raddhnv4si (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vraddhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_raddhnv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vraddhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_raddhnv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vraddhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_raddhnv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vraddhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_raddhnv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vaddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int8x16_t) __builtin_aarch64_addhn2v8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int16x8_t) __builtin_aarch64_addhn2v4si (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
+{
+ return (int32x4_t) __builtin_aarch64_addhn2v2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint8x16_t) __builtin_aarch64_addhn2v8hi ((int8x8_t) __a,
+ (int16x8_t) __b,
+ (int16x8_t) __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint16x8_t) __builtin_aarch64_addhn2v4si ((int16x4_t) __a,
+ (int32x4_t) __b,
+ (int32x4_t) __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
+{
+ return (uint32x4_t) __builtin_aarch64_addhn2v2di ((int32x2_t) __a,
+ (int64x2_t) __b,
+ (int64x2_t) __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vraddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int8x16_t) __builtin_aarch64_raddhn2v8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vraddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int16x8_t) __builtin_aarch64_raddhn2v4si (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vraddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
+{
+ return (int32x4_t) __builtin_aarch64_raddhn2v2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vraddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint8x16_t) __builtin_aarch64_raddhn2v8hi ((int8x8_t) __a,
+ (int16x8_t) __b,
+ (int16x8_t) __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vraddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint16x8_t) __builtin_aarch64_raddhn2v4si ((int16x4_t) __a,
+ (int32x4_t) __b,
+ (int32x4_t) __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vraddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
+{
+ return (uint32x4_t) __builtin_aarch64_raddhn2v2di ((int32x2_t) __a,
+ (int64x2_t) __b,
+ (int64x2_t) __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vdiv_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __a / __b;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vdiv_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return __a / __b;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vdivq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __a / __b;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vdivq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __a / __b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmul_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmul_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmul_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly8x8_t) __builtin_aarch64_pmulv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmulq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmulq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmulq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (poly8x16_t) __builtin_aarch64_pmulv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vand_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vand_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vand_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vand_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vand_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vand_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vand_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vand_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vandq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vandq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vandq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vandq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vandq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vandq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vandq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vandq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vorr_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vorr_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vorr_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vorr_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vorr_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vorr_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vorr_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vorr_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vorrq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vorrq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vorrq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vorrq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vorrq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vorrq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vorrq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vorrq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+veor_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+veor_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+veor_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+veor_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+veor_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+veor_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+veor_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+veor_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+veorq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+veorq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+veorq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+veorq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+veorq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+veorq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+veorq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+veorq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vbic_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vbic_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vbic_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vbic_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vbic_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vbic_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vbic_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vbic_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vbicq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vbicq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vbicq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vbicq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vbicq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vorn_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vorn_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vorn_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vorn_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vorn_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vorn_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vorn_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vorn_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vornq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vornq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vornq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vornq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vornq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vornq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vornq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vornq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vsub_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vsub_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsub_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsub_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vsubq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vsubq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_ssublv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_ssublv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_ssublv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_usublv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_usublv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_usublv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubl_high_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_ssubl2v16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubl_high_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_ssubl2v8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubl_high_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_ssubl2v4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubl_high_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_usubl2v16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubl_high_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_usubl2v8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubl_high_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_usubl2v4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubw_s8 (int16x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_ssubwv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubw_s16 (int32x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_ssubwv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubw_s32 (int64x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_ssubwv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubw_u8 (uint16x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_usubwv8qi ((int16x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubw_u16 (uint32x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_usubwv4hi ((int32x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubw_u32 (uint64x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_usubwv2si ((int64x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubw_high_s8 (int16x8_t __a, int8x16_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_ssubw2v16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubw_high_s16 (int32x4_t __a, int16x8_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_ssubw2v8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubw_high_s32 (int64x2_t __a, int32x4_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_ssubw2v4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubw_high_u8 (uint16x8_t __a, uint8x16_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_usubw2v16qi ((int16x8_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubw_high_u16 (uint32x4_t __a, uint16x8_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_usubw2v8hi ((int32x4_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubw_high_u32 (uint64x2_t __a, uint32x4_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_usubw2v4si ((int64x2_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_sqaddv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_sqaddv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_sqaddv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqadd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqadddi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uqaddv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uqaddv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uqaddv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqadd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqadddi ((int64x1_t) __a,
+ (int64x1_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_sqaddv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_sqaddv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_sqaddv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqaddq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_sqaddv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_uqaddv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uqaddv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uqaddv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqaddq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uqaddv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_sqsubv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_sqsubv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_sqsubv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqsub_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqsubdi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uqsubv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uqsubv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uqsubv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqsub_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqsubdi ((int64x1_t) __a,
+ (int64x1_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_sqsubv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_sqsubv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_sqsubv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqsubq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_sqsubv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_uqsubv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uqsubv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uqsubv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqsubq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uqsubv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqneg_s8 (int8x8_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_sqnegv8qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqneg_s16 (int16x4_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_sqnegv4hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqneg_s32 (int32x2_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_sqnegv2si (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqnegq_s8 (int8x16_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_sqnegv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqnegq_s16 (int16x8_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_sqnegv8hi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqnegq_s32 (int32x4_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_sqnegv4si (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqabs_s8 (int8x8_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_sqabsv8qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqabs_s16 (int16x4_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_sqabsv4hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqabs_s32 (int32x2_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_sqabsv2si (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqabsq_s8 (int8x16_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_sqabsv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqabsq_s16 (int16x8_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_sqabsv8hi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqabsq_s32 (int32x4_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_sqabsv4si (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_sqdmulhv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_sqdmulhv2si (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_sqdmulhv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_sqdmulhv4si (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_sqrdmulhv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_sqrdmulhv2si (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_sqrdmulhv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_sqrdmulhv4si (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcreate_s8 (uint64_t __a)
+{
+ return (int8x8_t) __a;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vcreate_s16 (uint64_t __a)
+{
+ return (int16x4_t) __a;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcreate_s32 (uint64_t __a)
+{
+ return (int32x2_t) __a;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vcreate_s64 (uint64_t __a)
+{
+ return (int64x1_t) __a;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcreate_f32 (uint64_t __a)
+{
+ return (float32x2_t) __a;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcreate_u8 (uint64_t __a)
+{
+ return (uint8x8_t) __a;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcreate_u16 (uint64_t __a)
+{
+ return (uint16x4_t) __a;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcreate_u32 (uint64_t __a)
+{
+ return (uint32x2_t) __a;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcreate_u64 (uint64_t __a)
+{
+ return (uint64x1_t) __a;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vcreate_f64 (uint64_t __a)
+{
+ return (float64x1_t) __builtin_aarch64_createdf (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vcreate_p8 (uint64_t __a)
+{
+ return (poly8x8_t) __a;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vcreate_p16 (uint64_t __a)
+{
+ return (poly16x4_t) __a;
+}
+
+/* vget_lane */
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vget_lane_f32 (float32x2_t __a, const int __b)
+{
+ return __aarch64_vget_lane_f32 (__a, __b);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vget_lane_f64 (float64x1_t __a, const int __b)
+{
+ return __aarch64_vget_lane_f64 (__a, __b);
+}
+
+__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+vget_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return __aarch64_vget_lane_p8 (__a, __b);
+}
+
+__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+vget_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return __aarch64_vget_lane_p16 (__a, __b);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vget_lane_s8 (int8x8_t __a, const int __b)
+{
+ return __aarch64_vget_lane_s8 (__a, __b);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vget_lane_s16 (int16x4_t __a, const int __b)
+{
+ return __aarch64_vget_lane_s16 (__a, __b);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vget_lane_s32 (int32x2_t __a, const int __b)
+{
+ return __aarch64_vget_lane_s32 (__a, __b);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vget_lane_s64 (int64x1_t __a, const int __b)
+{
+ return __aarch64_vget_lane_s64 (__a, __b);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vget_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return __aarch64_vget_lane_u8 (__a, __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vget_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return __aarch64_vget_lane_u16 (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vget_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return __aarch64_vget_lane_u32 (__a, __b);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vget_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return __aarch64_vget_lane_u64 (__a, __b);
+}
+
+/* vgetq_lane */
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vgetq_lane_f32 (float32x4_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_f32 (__a, __b);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vgetq_lane_f64 (float64x2_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_f64 (__a, __b);
+}
+
+__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+vgetq_lane_p8 (poly8x16_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_p8 (__a, __b);
+}
+
+__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+vgetq_lane_p16 (poly16x8_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_p16 (__a, __b);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vgetq_lane_s8 (int8x16_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_s8 (__a, __b);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vgetq_lane_s16 (int16x8_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_s16 (__a, __b);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vgetq_lane_s32 (int32x4_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_s32 (__a, __b);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vgetq_lane_s64 (int64x2_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_s64 (__a, __b);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vgetq_lane_u8 (uint8x16_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_u8 (__a, __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vgetq_lane_u16 (uint16x8_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_u16 (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vgetq_lane_u32 (uint32x4_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_u32 (__a, __b);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vgetq_lane_u64 (uint64x2_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_u64 (__a, __b);
+}
+
+/* vreinterpret */
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s8 (int8x8_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv8qi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s16 (int16x4_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s32 (int32x2_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s64 (int64x1_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qidi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_f32 (float32x2_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u8 (uint8x8_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u16 (uint16x4_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u32 (uint32x2_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u64 (uint64x1_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_p16 (poly16x4_t __a)
+{
+ return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s8 (int8x16_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv16qi (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s16 (int16x8_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s32 (int32x4_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s64 (int64x2_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_f32 (float32x4_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u8 (uint8x16_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u16 (uint16x8_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
+ __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u32 (uint32x4_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv4si ((int32x4_t)
+ __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u64 (uint64x2_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv2di ((int64x2_t)
+ __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_p16 (poly16x8_t __a)
+{
+ return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
+ __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s8 (int8x8_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s16 (int16x4_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv4hi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s32 (int32x2_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s64 (int64x1_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hidi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_f32 (float32x2_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u8 (uint8x8_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u16 (uint16x4_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u32 (uint32x2_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u64 (uint64x1_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_p8 (poly8x8_t __a)
+{
+ return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s8 (int8x16_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s16 (int16x8_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv8hi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s32 (int32x4_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s64 (int64x2_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_f32 (float32x4_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u8 (uint8x16_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u16 (uint16x8_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u32 (uint32x4_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u64 (uint64x2_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_p8 (poly8x16_t __a)
+{
+ return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s8 (int8x8_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv8qi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s16 (int16x4_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv4hi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s32 (int32x2_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv2si (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s64 (int64x1_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfdi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u8 (uint8x8_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u16 (uint16x4_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv4hi ((int16x4_t)
+ __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u32 (uint32x2_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv2si ((int32x2_t)
+ __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u64 (uint64x1_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfdi ((int64x1_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p8 (poly8x8_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p16 (poly16x4_t __a)
+{
+ return (float32x2_t) __builtin_aarch64_reinterpretv2sfv4hi ((int16x4_t)
+ __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s8 (int8x16_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv16qi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s16 (int16x8_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv8hi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s32 (int32x4_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv4si (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s64 (int64x2_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv2di (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u8 (uint8x16_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u16 (uint16x8_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv8hi ((int16x8_t)
+ __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u32 (uint32x4_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv4si ((int32x4_t)
+ __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u64 (uint64x2_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv2di ((int64x2_t)
+ __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p8 (poly8x16_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p16 (poly16x8_t __a)
+{
+ return (float32x4_t) __builtin_aarch64_reinterpretv4sfv8hi ((int16x8_t)
+ __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s8 (int8x8_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv8qi (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s16 (int16x4_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv4hi (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s32 (int32x2_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv2si (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_f32 (float32x2_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv2sf (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u8 (uint8x8_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u16 (uint16x4_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u32 (uint32x2_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u64 (uint64x1_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p8 (poly8x8_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p16 (poly16x4_t __a)
+{
+ return (int64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s8 (int8x16_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s16 (int16x8_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s32 (int32x4_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div4si (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_f32 (float32x4_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div4sf (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u8 (uint8x16_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u16 (uint16x8_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u32 (uint32x4_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u64 (uint64x2_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p8 (poly8x16_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p16 (poly16x8_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s8 (int8x8_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv8qi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s16 (int16x4_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv4hi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s32 (int32x2_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv2si (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s64 (int64x1_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdidi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_f32 (float32x2_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv2sf (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u8 (uint8x8_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u16 (uint16x4_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u32 (uint32x2_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p8 (poly8x8_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p16 (poly16x4_t __a)
+{
+ return (uint64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s8 (int8x16_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s16 (int16x8_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s32 (int32x4_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div4si (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s64 (int64x2_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div2di (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_f32 (float32x4_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div4sf (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u8 (uint8x16_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u16 (uint16x8_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u32 (uint32x4_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p8 (poly8x16_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p16 (poly16x8_t __a)
+{
+ return (uint64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s16 (int16x4_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s32 (int32x2_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s64 (int64x1_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qidi (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_f32 (float32x2_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u8 (uint8x8_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u16 (uint16x4_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u32 (uint32x2_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u64 (uint64x1_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p8 (poly8x8_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p16 (poly16x4_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s16 (int16x8_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s32 (int32x4_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s64 (int64x2_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_f32 (float32x4_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u8 (uint8x16_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u16 (uint16x8_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u32 (uint32x4_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u64 (uint64x2_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p8 (poly8x16_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p16 (poly16x8_t __a)
+{
+ return (int8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s8 (int8x8_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s32 (int32x2_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s64 (int64x1_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hidi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_f32 (float32x2_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u8 (uint8x8_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u16 (uint16x4_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u32 (uint32x2_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u64 (uint64x1_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p8 (poly8x8_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p16 (poly16x4_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s8 (int8x16_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s32 (int32x4_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s64 (int64x2_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_f32 (float32x4_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u8 (uint8x16_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u16 (uint16x8_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u32 (uint32x4_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u64 (uint64x2_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p8 (poly8x16_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p16 (poly16x8_t __a)
+{
+ return (int16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s8 (int8x8_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv8qi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s16 (int16x4_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv4hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s64 (int64x1_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2sidi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_f32 (float32x2_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv2sf (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u8 (uint8x8_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u16 (uint16x4_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u32 (uint32x2_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u64 (uint64x1_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2sidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p8 (poly8x8_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p16 (poly16x4_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s8 (int8x16_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv16qi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s16 (int16x8_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv8hi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s64 (int64x2_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv2di (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_f32 (float32x4_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv4sf (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u8 (uint8x16_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u16 (uint16x8_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u32 (uint32x4_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u64 (uint64x2_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p8 (poly8x16_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p16 (poly16x8_t __a)
+{
+ return (int32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s8 (int8x8_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv8qi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s16 (int16x4_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s32 (int32x2_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s64 (int64x1_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qidi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_f32 (float32x2_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u16 (uint16x4_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u32 (uint32x2_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u64 (uint64x1_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p8 (poly8x8_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p16 (poly16x4_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s8 (int8x16_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv16qi (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s16 (int16x8_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s32 (int32x4_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s64 (int64x2_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_f32 (float32x4_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u16 (uint16x8_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
+ __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u32 (uint32x4_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv4si ((int32x4_t)
+ __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u64 (uint64x2_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv2di ((int64x2_t)
+ __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_p8 (poly8x16_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_p16 (poly16x8_t __a)
+{
+ return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
+ __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s8 (int8x8_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s16 (int16x4_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv4hi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s32 (int32x2_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s64 (int64x1_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hidi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_f32 (float32x2_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u8 (uint8x8_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u32 (uint32x2_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u64 (uint64x1_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p8 (poly8x8_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p16 (poly16x4_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s8 (int8x16_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s16 (int16x8_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv8hi (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s32 (int32x4_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s64 (int64x2_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_f32 (float32x4_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u8 (uint8x16_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u32 (uint32x4_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u64 (uint64x2_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p8 (poly8x16_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p16 (poly16x8_t __a)
+{
+ return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s8 (int8x8_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv8qi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s16 (int16x4_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv4hi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s32 (int32x2_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv2si (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s64 (int64x1_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2sidi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_f32 (float32x2_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv2sf (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u8 (uint8x8_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u16 (uint16x4_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u64 (uint64x1_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2sidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p8 (poly8x8_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p16 (poly16x4_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s8 (int8x16_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv16qi (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s16 (int16x8_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv8hi (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s32 (int32x4_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv4si (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s64 (int64x2_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv2di (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_f32 (float32x4_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv4sf (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u8 (uint8x16_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u16 (uint16x8_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u64 (uint64x2_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p8 (poly8x16_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t)
+ __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p16 (poly16x8_t __a)
+{
+ return (uint32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+#define __GET_LOW(__TYPE) \
+ uint64x2_t tmp = vreinterpretq_u64_##__TYPE (__a); \
+ uint64_t lo = vgetq_lane_u64 (tmp, 0); \
+ return vreinterpret_##__TYPE##_u64 (lo);
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vget_low_f32 (float32x4_t __a)
+{
+ __GET_LOW (f32);
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vget_low_f64 (float64x2_t __a)
+{
+ return vgetq_lane_f64 (__a, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vget_low_p8 (poly8x16_t __a)
+{
+ __GET_LOW (p8);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vget_low_p16 (poly16x8_t __a)
+{
+ __GET_LOW (p16);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vget_low_s8 (int8x16_t __a)
+{
+ __GET_LOW (s8);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vget_low_s16 (int16x8_t __a)
+{
+ __GET_LOW (s16);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vget_low_s32 (int32x4_t __a)
+{
+ __GET_LOW (s32);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vget_low_s64 (int64x2_t __a)
+{
+ return vgetq_lane_s64 (__a, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vget_low_u8 (uint8x16_t __a)
+{
+ __GET_LOW (u8);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vget_low_u16 (uint16x8_t __a)
+{
+ __GET_LOW (u16);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vget_low_u32 (uint32x4_t __a)
+{
+ __GET_LOW (u32);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vget_low_u64 (uint64x2_t __a)
+{
+ return vgetq_lane_u64 (__a, 0);
+}
+
+#undef __GET_LOW
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vcombine_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_combinev8qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vcombine_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_combinev4hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcombine_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_combinev2si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcombine_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_combinedi (__a, __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcombine_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x4_t) __builtin_aarch64_combinev2sf (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcombine_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcombine_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcombine_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_combinev2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcombine_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_combinedi ((int64x1_t) __a,
+ (int64x1_t) __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcombine_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return (float64x2_t) __builtin_aarch64_combinedf (__a, __b);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vcombine_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vcombine_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ return (poly16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+/* Start of temporary inline asm implementations. */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vaba_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
+{
+ int8x8_t result;
+ __asm__ ("saba %0.8b,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vaba_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
+{
+ int16x4_t result;
+ __asm__ ("saba %0.4h,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vaba_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
+{
+ int32x2_t result;
+ __asm__ ("saba %0.2s,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vaba_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
+{
+ uint8x8_t result;
+ __asm__ ("uaba %0.8b,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vaba_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
+{
+ uint16x4_t result;
+ __asm__ ("uaba %0.4h,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vaba_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
+{
+ uint32x2_t result;
+ __asm__ ("uaba %0.2s,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
+{
+ int16x8_t result;
+ __asm__ ("sabal2 %0.8h,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+{
+ int32x4_t result;
+ __asm__ ("sabal2 %0.4s,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
+{
+ int64x2_t result;
+ __asm__ ("sabal2 %0.2d,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
+{
+ uint16x8_t result;
+ __asm__ ("uabal2 %0.8h,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint32x4_t result;
+ __asm__ ("uabal2 %0.4s,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint64x2_t result;
+ __asm__ ("uabal2 %0.2d,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabal_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
+{
+ int16x8_t result;
+ __asm__ ("sabal %0.8h,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabal_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
+{
+ int32x4_t result;
+ __asm__ ("sabal %0.4s,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabal_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
+{
+ int64x2_t result;
+ __asm__ ("sabal %0.2d,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
+{
+ uint16x8_t result;
+ __asm__ ("uabal %0.8h,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
+{
+ uint32x4_t result;
+ __asm__ ("uabal %0.4s,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
+{
+ uint64x2_t result;
+ __asm__ ("uabal %0.2d,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
+{
+ int8x16_t result;
+ __asm__ ("saba %0.16b,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
+{
+ int16x8_t result;
+ __asm__ ("saba %0.8h,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
+{
+ int32x4_t result;
+ __asm__ ("saba %0.4s,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vabaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
+{
+ uint8x16_t result;
+ __asm__ ("uaba %0.16b,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint16x8_t result;
+ __asm__ ("uaba %0.8h,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint32x4_t result;
+ __asm__ ("uaba %0.4s,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vabd_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("fabd %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vabd_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("sabd %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vabd_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("sabd %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vabd_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("sabd %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vabd_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("uabd %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vabd_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("uabd %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vabd_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("uabd %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vabdd_f64 (float64_t a, float64_t b)
+{
+ float64_t result;
+ __asm__ ("fabd %d0, %d1, %d2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdl_high_s8 (int8x16_t a, int8x16_t b)
+{
+ int16x8_t result;
+ __asm__ ("sabdl2 %0.8h,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdl_high_s16 (int16x8_t a, int16x8_t b)
+{
+ int32x4_t result;
+ __asm__ ("sabdl2 %0.4s,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabdl_high_s32 (int32x4_t a, int32x4_t b)
+{
+ int64x2_t result;
+ __asm__ ("sabdl2 %0.2d,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdl_high_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uabdl2 %0.8h,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdl_high_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uabdl2 %0.4s,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabdl_high_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint64x2_t result;
+ __asm__ ("uabdl2 %0.2d,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdl_s8 (int8x8_t a, int8x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("sabdl %0.8h, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdl_s16 (int16x4_t a, int16x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("sabdl %0.4s, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabdl_s32 (int32x2_t a, int32x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("sabdl %0.2d, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdl_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uabdl %0.8h, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdl_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uabdl %0.4s, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabdl_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("uabdl %0.2d, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vabdq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("fabd %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vabdq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("fabd %0.2d, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabdq_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("sabd %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdq_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("sabd %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdq_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("sabd %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vabdq_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("uabd %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdq_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uabd %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdq_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uabd %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vabds_f32 (float32_t a, float32_t b)
+{
+ float32_t result;
+ __asm__ ("fabd %s0, %s1, %s2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vaddlv_s8 (int8x8_t a)
+{
+ int16_t result;
+ __asm__ ("saddlv %h0,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vaddlv_s16 (int16x4_t a)
+{
+ int32_t result;
+ __asm__ ("saddlv %s0,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vaddlv_u8 (uint8x8_t a)
+{
+ uint16_t result;
+ __asm__ ("uaddlv %h0,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vaddlv_u16 (uint16x4_t a)
+{
+ uint32_t result;
+ __asm__ ("uaddlv %s0,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vaddlvq_s8 (int8x16_t a)
+{
+ int16_t result;
+ __asm__ ("saddlv %h0,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vaddlvq_s16 (int16x8_t a)
+{
+ int32_t result;
+ __asm__ ("saddlv %s0,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vaddlvq_s32 (int32x4_t a)
+{
+ int64_t result;
+ __asm__ ("saddlv %d0,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vaddlvq_u8 (uint8x16_t a)
+{
+ uint16_t result;
+ __asm__ ("uaddlv %h0,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vaddlvq_u16 (uint16x8_t a)
+{
+ uint32_t result;
+ __asm__ ("uaddlv %s0,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vaddlvq_u32 (uint32x4_t a)
+{
+ uint64_t result;
+ __asm__ ("uaddlv %d0,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcls_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("cls %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vcls_s16 (int16x4_t a)
+{
+ int16x4_t result;
+ __asm__ ("cls %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcls_s32 (int32x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("cls %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vclsq_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("cls %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vclsq_s16 (int16x8_t a)
+{
+ int16x8_t result;
+ __asm__ ("cls %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vclsq_s32 (int32x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("cls %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vcnt_p8 (poly8x8_t a)
+{
+ poly8x8_t result;
+ __asm__ ("cnt %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcnt_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("cnt %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcnt_u8 (uint8x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("cnt %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vcntq_p8 (poly8x16_t a)
+{
+ poly8x16_t result;
+ __asm__ ("cnt %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vcntq_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("cnt %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcntq_u8 (uint8x16_t a)
+{
+ uint8x16_t result;
+ __asm__ ("cnt %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vcopyq_lane_f32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ float32x4_t c_ = (c); \
+ float32x4_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("ins %0.s[%2], %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_f64(a, b, c, d) \
+ __extension__ \
+ ({ \
+ float64x2_t c_ = (c); \
+ float64x2_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("ins %0.d[%2], %3.d[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_p8(a, b, c, d) \
+ __extension__ \
+ ({ \
+ poly8x16_t c_ = (c); \
+ poly8x16_t a_ = (a); \
+ poly8x16_t result; \
+ __asm__ ("ins %0.b[%2], %3.b[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_p16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ poly16x8_t c_ = (c); \
+ poly16x8_t a_ = (a); \
+ poly16x8_t result; \
+ __asm__ ("ins %0.h[%2], %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_s8(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int8x16_t c_ = (c); \
+ int8x16_t a_ = (a); \
+ int8x16_t result; \
+ __asm__ ("ins %0.b[%2], %3.b[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x8_t a_ = (a); \
+ int16x8_t result; \
+ __asm__ ("ins %0.h[%2], %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("ins %0.s[%2], %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_s64(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int64x2_t c_ = (c); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("ins %0.d[%2], %3.d[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_u8(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint8x16_t c_ = (c); \
+ uint8x16_t a_ = (a); \
+ uint8x16_t result; \
+ __asm__ ("ins %0.b[%2], %3.b[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x8_t a_ = (a); \
+ uint16x8_t result; \
+ __asm__ ("ins %0.h[%2], %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("ins %0.s[%2], %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcopyq_lane_u64(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint64x2_t c_ = (c); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("ins %0.d[%2], %3.d[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "i"(b), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+/* vcvt_f16_f32 not supported */
+
+/* vcvt_f32_f16 not supported */
+
+/* vcvt_high_f16_f32 not supported */
+
+/* vcvt_high_f32_f16 not supported */
+
+static float32x2_t vdup_n_f32 (float32_t);
+
+#define vcvt_n_f32_s32(a, b) \
+ __extension__ \
+ ({ \
+ int32x2_t a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("scvtf %0.2s, %1.2s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvt_n_f32_u32(a, b) \
+ __extension__ \
+ ({ \
+ uint32x2_t a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("ucvtf %0.2s, %1.2s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvt_n_s32_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32x2_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("fcvtzs %0.2s, %1.2s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvt_n_u32_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32x2_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("fcvtzu %0.2s, %1.2s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtd_n_f64_s64(a, b) \
+ __extension__ \
+ ({ \
+ int64_t a_ = (a); \
+ float64_t result; \
+ __asm__ ("scvtf %d0,%d1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtd_n_f64_u64(a, b) \
+ __extension__ \
+ ({ \
+ uint64_t a_ = (a); \
+ float64_t result; \
+ __asm__ ("ucvtf %d0,%d1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtd_n_s64_f64(a, b) \
+ __extension__ \
+ ({ \
+ float64_t a_ = (a); \
+ int64_t result; \
+ __asm__ ("fcvtzs %d0,%d1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtd_n_u64_f64(a, b) \
+ __extension__ \
+ ({ \
+ float64_t a_ = (a); \
+ uint64_t result; \
+ __asm__ ("fcvtzu %d0,%d1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_f32_s32(a, b) \
+ __extension__ \
+ ({ \
+ int32x4_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("scvtf %0.4s, %1.4s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_f32_u32(a, b) \
+ __extension__ \
+ ({ \
+ uint32x4_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("ucvtf %0.4s, %1.4s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_f64_s64(a, b) \
+ __extension__ \
+ ({ \
+ int64x2_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("scvtf %0.2d, %1.2d, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_f64_u64(a, b) \
+ __extension__ \
+ ({ \
+ uint64x2_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("ucvtf %0.2d, %1.2d, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_s32_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("fcvtzs %0.4s, %1.4s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_s64_f64(a, b) \
+ __extension__ \
+ ({ \
+ float64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("fcvtzs %0.2d, %1.2d, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_u32_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("fcvtzu %0.4s, %1.4s, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvtq_n_u64_f64(a, b) \
+ __extension__ \
+ ({ \
+ float64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("fcvtzu %0.2d, %1.2d, #%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvts_n_f32_s32(a, b) \
+ __extension__ \
+ ({ \
+ int32_t a_ = (a); \
+ float32_t result; \
+ __asm__ ("scvtf %s0,%s1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvts_n_f32_u32(a, b) \
+ __extension__ \
+ ({ \
+ uint32_t a_ = (a); \
+ float32_t result; \
+ __asm__ ("ucvtf %s0,%s1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvts_n_s32_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32_t a_ = (a); \
+ int32_t result; \
+ __asm__ ("fcvtzs %s0,%s1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vcvts_n_u32_f32(a, b) \
+ __extension__ \
+ ({ \
+ float32_t a_ = (a); \
+ uint32_t result; \
+ __asm__ ("fcvtzu %s0,%s1,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvtx_f32_f64 (float64x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("fcvtxn %0.2s,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtx_high_f32_f64 (float32x2_t a, float64x2_t b)
+{
+ float32x4_t result;
+ __asm__ ("fcvtxn2 %0.4s,%1.2d"
+ : "=w"(result)
+ : "w" (b), "0"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvtxd_f32_f64 (float64_t a)
+{
+ float32_t result;
+ __asm__ ("fcvtxn %s0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vext_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x2_t b_ = (b); \
+ float32x2_t a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*4" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x1_t b_ = (b); \
+ float64x1_t a_ = (a); \
+ float64x1_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*8" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x8_t b_ = (b); \
+ poly8x8_t a_ = (a); \
+ poly8x8_t result; \
+ __asm__ ("ext %0.8b,%1.8b,%2.8b,%3" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x4_t b_ = (b); \
+ poly16x4_t a_ = (a); \
+ poly16x4_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*2" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x8_t b_ = (b); \
+ int8x8_t a_ = (a); \
+ int8x8_t result; \
+ __asm__ ("ext %0.8b,%1.8b,%2.8b,%3" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x4_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*2" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x2_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*4" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x1_t b_ = (b); \
+ int64x1_t a_ = (a); \
+ int64x1_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*8" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x8_t b_ = (b); \
+ uint8x8_t a_ = (a); \
+ uint8x8_t result; \
+ __asm__ ("ext %0.8b,%1.8b,%2.8b,%3" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*2" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*4" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vext_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x1_t b_ = (b); \
+ uint64x1_t a_ = (a); \
+ uint64x1_t result; \
+ __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*8" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32x4_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*4" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64x2_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*8" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x16_t b_ = (b); \
+ poly8x16_t a_ = (a); \
+ poly8x16_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x8_t b_ = (b); \
+ poly16x8_t a_ = (a); \
+ poly16x8_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*2" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x16_t b_ = (b); \
+ int8x16_t a_ = (a); \
+ int8x16_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16x8_t a_ = (a); \
+ int16x8_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*2" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*4" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*8" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x16_t b_ = (b); \
+ uint8x16_t a_ = (a); \
+ uint8x16_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16x8_t a_ = (a); \
+ uint16x8_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*2" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*4" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vextq_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*8" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vfma_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
+{
+ float32x2_t result;
+ __asm__ ("fmla %0.2s,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vfmaq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
+{
+ float32x4_t result;
+ __asm__ ("fmla %0.4s,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vfmaq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
+{
+ float64x2_t result;
+ __asm__ ("fmla %0.2d,%2.2d,%3.2d"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vfma_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
+{
+ float32x2_t result;
+ __asm__ ("fmla %0.2s, %2.2s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vfmaq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
+{
+ float32x4_t result;
+ __asm__ ("fmla %0.4s, %2.4s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vfmaq_n_f64 (float64x2_t a, float64x2_t b, float64_t c)
+{
+ float64x2_t result;
+ __asm__ ("fmla %0.2d, %2.2d, %3.d[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vfms_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
+{
+ float32x2_t result;
+ __asm__ ("fmls %0.2s,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vfmsq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
+{
+ float32x4_t result;
+ __asm__ ("fmls %0.4s,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vfmsq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
+{
+ float64x2_t result;
+ __asm__ ("fmls %0.2d,%2.2d,%3.2d"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vget_high_f32 (float32x4_t a)
+{
+ float32x2_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vget_high_f64 (float64x2_t a)
+{
+ float64x1_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vget_high_p8 (poly8x16_t a)
+{
+ poly8x8_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vget_high_p16 (poly16x8_t a)
+{
+ poly16x4_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vget_high_s8 (int8x16_t a)
+{
+ int8x8_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vget_high_s16 (int16x8_t a)
+{
+ int16x4_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vget_high_s32 (int32x4_t a)
+{
+ int32x2_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vget_high_s64 (int64x2_t a)
+{
+ int64x1_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vget_high_u8 (uint8x16_t a)
+{
+ uint8x8_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vget_high_u16 (uint16x8_t a)
+{
+ uint16x4_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vget_high_u32 (uint32x4_t a)
+{
+ uint32x2_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vget_high_u64 (uint64x2_t a)
+{
+ uint64x1_t result;
+ __asm__ ("ins %0.d[0], %1.d[1]"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vhsub_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("shsub %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vhsub_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("shsub %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vhsub_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("shsub %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vhsub_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("uhsub %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vhsub_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("uhsub %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vhsub_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("uhsub %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vhsubq_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("shsub %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vhsubq_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("shsub %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vhsubq_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("shsub %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vhsubq_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("uhsub %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vhsubq_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uhsub %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vhsubq_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uhsub %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_dup_f32 (const float32_t * a)
+{
+ float32x2_t result;
+ __asm__ ("ld1r {%0.2s}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vld1_dup_f64 (const float64_t * a)
+{
+ float64x1_t result;
+ __asm__ ("ld1r {%0.1d}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_dup_p8 (const poly8_t * a)
+{
+ poly8x8_t result;
+ __asm__ ("ld1r {%0.8b}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_dup_p16 (const poly16_t * a)
+{
+ poly16x4_t result;
+ __asm__ ("ld1r {%0.4h}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_dup_s8 (const int8_t * a)
+{
+ int8x8_t result;
+ __asm__ ("ld1r {%0.8b}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_dup_s16 (const int16_t * a)
+{
+ int16x4_t result;
+ __asm__ ("ld1r {%0.4h}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_dup_s32 (const int32_t * a)
+{
+ int32x2_t result;
+ __asm__ ("ld1r {%0.2s}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_dup_s64 (const int64_t * a)
+{
+ int64x1_t result;
+ __asm__ ("ld1r {%0.1d}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_dup_u8 (const uint8_t * a)
+{
+ uint8x8_t result;
+ __asm__ ("ld1r {%0.8b}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_dup_u16 (const uint16_t * a)
+{
+ uint16x4_t result;
+ __asm__ ("ld1r {%0.4h}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_dup_u32 (const uint32_t * a)
+{
+ uint32x2_t result;
+ __asm__ ("ld1r {%0.2s}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_dup_u64 (const uint64_t * a)
+{
+ uint64x1_t result;
+ __asm__ ("ld1r {%0.1d}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vld1_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x2_t b_ = (b); \
+ const float32_t * a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("ld1 {%0.s}[%1], %2" \
+ : "=w"(result) \
+ : "i" (c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x1_t b_ = (b); \
+ const float64_t * a_ = (a); \
+ float64x1_t result; \
+ __asm__ ("ld1 {%0.d}[%1], %2" \
+ : "=w"(result) \
+ : "i" (c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x8_t b_ = (b); \
+ const poly8_t * a_ = (a); \
+ poly8x8_t result; \
+ __asm__ ("ld1 {%0.b}[%1], %2" \
+ : "=w"(result) \
+ : "i" (c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x4_t b_ = (b); \
+ const poly16_t * a_ = (a); \
+ poly16x4_t result; \
+ __asm__ ("ld1 {%0.h}[%1], %2" \
+ : "=w"(result) \
+ : "i" (c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x8_t b_ = (b); \
+ const int8_t * a_ = (a); \
+ int8x8_t result; \
+ __asm__ ("ld1 {%0.b}[%1], %2" \
+ : "=w"(result) \
+ : "i" (c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x4_t b_ = (b); \
+ const int16_t * a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("ld1 {%0.h}[%1], %2" \
+ : "=w"(result) \
+ : "i" (c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x2_t b_ = (b); \
+ const int32_t * a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("ld1 {%0.s}[%1], %2" \
+ : "=w"(result) \
+ : "i" (c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x1_t b_ = (b); \
+ const int64_t * a_ = (a); \
+ int64x1_t result; \
+ __asm__ ("ld1 {%0.d}[%1], %2" \
+ : "=w"(result) \
+ : "i" (c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x8_t b_ = (b); \
+ const uint8_t * a_ = (a); \
+ uint8x8_t result; \
+ __asm__ ("ld1 {%0.b}[%1], %2" \
+ : "=w"(result) \
+ : "i" (c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x4_t b_ = (b); \
+ const uint16_t * a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("ld1 {%0.h}[%1], %2" \
+ : "=w"(result) \
+ : "i" (c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x2_t b_ = (b); \
+ const uint32_t * a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("ld1 {%0.s}[%1], %2" \
+ : "=w"(result) \
+ : "i" (c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1_lane_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x1_t b_ = (b); \
+ const uint64_t * a_ = (a); \
+ uint64x1_t result; \
+ __asm__ ("ld1 {%0.d}[%1], %2" \
+ : "=w"(result) \
+ : "i" (c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_f32 (const float32_t * a)
+{
+ float32x4_t result;
+ __asm__ ("ld1r {%0.4s}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_f64 (const float64_t * a)
+{
+ float64x2_t result;
+ __asm__ ("ld1r {%0.2d}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_p8 (const poly8_t * a)
+{
+ poly8x16_t result;
+ __asm__ ("ld1r {%0.16b}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_p16 (const poly16_t * a)
+{
+ poly16x8_t result;
+ __asm__ ("ld1r {%0.8h}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_s8 (const int8_t * a)
+{
+ int8x16_t result;
+ __asm__ ("ld1r {%0.16b}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_s16 (const int16_t * a)
+{
+ int16x8_t result;
+ __asm__ ("ld1r {%0.8h}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_s32 (const int32_t * a)
+{
+ int32x4_t result;
+ __asm__ ("ld1r {%0.4s}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_s64 (const int64_t * a)
+{
+ int64x2_t result;
+ __asm__ ("ld1r {%0.2d}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_u8 (const uint8_t * a)
+{
+ uint8x16_t result;
+ __asm__ ("ld1r {%0.16b}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_u16 (const uint16_t * a)
+{
+ uint16x8_t result;
+ __asm__ ("ld1r {%0.8h}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_u32 (const uint32_t * a)
+{
+ uint32x4_t result;
+ __asm__ ("ld1r {%0.4s}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_u64 (const uint64_t * a)
+{
+ uint64x2_t result;
+ __asm__ ("ld1r {%0.2d}, %1"
+ : "=w"(result)
+ : "Utv"(*a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vld1q_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ const float32_t * a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("ld1 {%0.s}[%1], %2" \
+ : "=w"(result) \
+ : "i"(c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ const float64_t * a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("ld1 {%0.d}[%1], %2" \
+ : "=w"(result) \
+ : "i"(c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x16_t b_ = (b); \
+ const poly8_t * a_ = (a); \
+ poly8x16_t result; \
+ __asm__ ("ld1 {%0.b}[%1], %2" \
+ : "=w"(result) \
+ : "i"(c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x8_t b_ = (b); \
+ const poly16_t * a_ = (a); \
+ poly16x8_t result; \
+ __asm__ ("ld1 {%0.h}[%1], %2" \
+ : "=w"(result) \
+ : "i"(c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x16_t b_ = (b); \
+ const int8_t * a_ = (a); \
+ int8x16_t result; \
+ __asm__ ("ld1 {%0.b}[%1], %2" \
+ : "=w"(result) \
+ : "i"(c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ const int16_t * a_ = (a); \
+ int16x8_t result; \
+ __asm__ ("ld1 {%0.h}[%1], %2" \
+ : "=w"(result) \
+ : "i"(c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ const int32_t * a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("ld1 {%0.s}[%1], %2" \
+ : "=w"(result) \
+ : "i"(c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ const int64_t * a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("ld1 {%0.d}[%1], %2" \
+ : "=w"(result) \
+ : "i"(c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x16_t b_ = (b); \
+ const uint8_t * a_ = (a); \
+ uint8x16_t result; \
+ __asm__ ("ld1 {%0.b}[%1], %2" \
+ : "=w"(result) \
+ : "i"(c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ const uint16_t * a_ = (a); \
+ uint16x8_t result; \
+ __asm__ ("ld1 {%0.h}[%1], %2" \
+ : "=w"(result) \
+ : "i"(c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ const uint32_t * a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("ld1 {%0.s}[%1], %2" \
+ : "=w"(result) \
+ : "i"(c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vld1q_lane_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ const uint64_t * a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("ld1 {%0.d}[%1], %2" \
+ : "=w"(result) \
+ : "i"(c), "Utv"(*a_), "0"(b_) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
+{
+ float32x2_t result;
+ float32x2_t t1;
+ __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s"
+ : "=w"(result), "=w"(t1)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_n_s16 (int16x4_t a, int16x4_t b, int16_t c)
+{
+ int16x4_t result;
+ __asm__ ("mla %0.4h,%2.4h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_n_s32 (int32x2_t a, int32x2_t b, int32_t c)
+{
+ int32x2_t result;
+ __asm__ ("mla %0.2s,%2.2s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c)
+{
+ uint16x4_t result;
+ __asm__ ("mla %0.4h,%2.4h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c)
+{
+ uint32x2_t result;
+ __asm__ ("mla %0.2s,%2.2s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmla_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
+{
+ int8x8_t result;
+ __asm__ ("mla %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
+{
+ int16x4_t result;
+ __asm__ ("mla %0.4h, %2.4h, %3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
+{
+ int32x2_t result;
+ __asm__ ("mla %0.2s, %2.2s, %3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmla_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
+{
+ uint8x8_t result;
+ __asm__ ("mla %0.8b, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
+{
+ uint16x4_t result;
+ __asm__ ("mla %0.4h, %2.4h, %3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
+{
+ uint32x2_t result;
+ __asm__ ("mla %0.2s, %2.2s, %3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmlal_high_lane_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x8_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlal2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_high_lane_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x4_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlal2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_high_lane_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x8_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlal2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_high_lane_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x4_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlal2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_high_laneq_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x8_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlal2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_high_laneq_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x4_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlal2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_high_laneq_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x8_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlal2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_high_laneq_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x4_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlal2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlal2 %0.4s,%2.8h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlal2 %0.2d,%2.4s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_high_n_u16 (uint32x4_t a, uint16x8_t b, uint16_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlal2 %0.4s,%2.8h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_high_n_u32 (uint64x2_t a, uint32x4_t b, uint32_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlal2 %0.2d,%2.4s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
+{
+ int16x8_t result;
+ __asm__ ("smlal2 %0.8h,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlal2 %0.4s,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlal2 %0.2d,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
+{
+ uint16x8_t result;
+ __asm__ ("umlal2 %0.8h,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlal2 %0.4s,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlal2 %0.2d,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmlal_lane_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x4_t c_ = (c); \
+ int16x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlal %0.4s,%2.4h,%3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_lane_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x2_t c_ = (c); \
+ int32x2_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlal %0.2d,%2.2s,%3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_lane_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x4_t c_ = (c); \
+ uint16x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlal %0.4s,%2.4h,%3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_lane_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x2_t c_ = (c); \
+ uint32x2_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlal %0.2d, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_laneq_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlal %0.4s, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_laneq_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x2_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlal %0.2d, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_laneq_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlal %0.4s, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlal_laneq_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x2_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlal %0.2d, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_n_s16 (int32x4_t a, int16x4_t b, int16_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlal %0.4s,%2.4h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_n_s32 (int64x2_t a, int32x2_t b, int32_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlal %0.2d,%2.2s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_n_u16 (uint32x4_t a, uint16x4_t b, uint16_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlal %0.4s,%2.4h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_n_u32 (uint64x2_t a, uint32x2_t b, uint32_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlal %0.2d,%2.2s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlal_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
+{
+ int16x8_t result;
+ __asm__ ("smlal %0.8h,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlal %0.4s,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlal %0.2d,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
+{
+ uint16x8_t result;
+ __asm__ ("umlal %0.8h,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlal %0.4s,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlal %0.2d,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
+{
+ float32x4_t result;
+ float32x4_t t1;
+ __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s"
+ : "=w"(result), "=w"(t1)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmlaq_n_f64 (float64x2_t a, float64x2_t b, float64_t c)
+{
+ float64x2_t result;
+ float64x2_t t1;
+ __asm__ ("fmul %1.2d, %3.2d, %4.d[0]; fadd %0.2d, %0.2d, %1.2d"
+ : "=w"(result), "=w"(t1)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_n_s16 (int16x8_t a, int16x8_t b, int16_t c)
+{
+ int16x8_t result;
+ __asm__ ("mla %0.8h,%2.8h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_s32 (int32x4_t a, int32x4_t b, int32_t c)
+{
+ int32x4_t result;
+ __asm__ ("mla %0.4s,%2.4s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c)
+{
+ uint16x8_t result;
+ __asm__ ("mla %0.8h,%2.8h,%3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c)
+{
+ uint32x4_t result;
+ __asm__ ("mla %0.4s,%2.4s,%3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmlaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
+{
+ int8x16_t result;
+ __asm__ ("mla %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
+{
+ int16x8_t result;
+ __asm__ ("mla %0.8h, %2.8h, %3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
+{
+ int32x4_t result;
+ __asm__ ("mla %0.4s, %2.4s, %3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmlaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
+{
+ uint8x16_t result;
+ __asm__ ("mla %0.16b, %2.16b, %3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint16x8_t result;
+ __asm__ ("mla %0.8h, %2.8h, %3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint32x4_t result;
+ __asm__ ("mla %0.4s, %2.4s, %3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
+{
+ float32x2_t result;
+ float32x2_t t1;
+ __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s"
+ : "=w"(result), "=w"(t1)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_n_s16 (int16x4_t a, int16x4_t b, int16_t c)
+{
+ int16x4_t result;
+ __asm__ ("mls %0.4h, %2.4h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_n_s32 (int32x2_t a, int32x2_t b, int32_t c)
+{
+ int32x2_t result;
+ __asm__ ("mls %0.2s, %2.2s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c)
+{
+ uint16x4_t result;
+ __asm__ ("mls %0.4h, %2.4h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c)
+{
+ uint32x2_t result;
+ __asm__ ("mls %0.2s, %2.2s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmls_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
+{
+ int8x8_t result;
+ __asm__ ("mls %0.8b,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
+{
+ int16x4_t result;
+ __asm__ ("mls %0.4h,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
+{
+ int32x2_t result;
+ __asm__ ("mls %0.2s,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmls_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
+{
+ uint8x8_t result;
+ __asm__ ("mls %0.8b,%2.8b,%3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
+{
+ uint16x4_t result;
+ __asm__ ("mls %0.4h,%2.4h,%3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
+{
+ uint32x2_t result;
+ __asm__ ("mls %0.2s,%2.2s,%3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmlsl_high_lane_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x8_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_high_lane_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x4_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_high_lane_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x8_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_high_lane_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x4_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_high_laneq_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x8_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_high_laneq_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x4_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_high_laneq_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x8_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_high_laneq_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x4_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_high_n_u16 (uint32x4_t a, uint16x8_t b, uint16_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_high_n_u32 (uint64x2_t a, uint32x4_t b, uint32_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsl_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
+{
+ int16x8_t result;
+ __asm__ ("smlsl2 %0.8h,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlsl2 %0.4s,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlsl2 %0.2d,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsl_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
+{
+ uint16x8_t result;
+ __asm__ ("umlsl2 %0.8h,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlsl2 %0.4s,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlsl2 %0.2d,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmlsl_lane_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x4_t c_ = (c); \
+ int16x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlsl %0.4s, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_lane_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x2_t c_ = (c); \
+ int32x2_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlsl %0.2d, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_lane_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x4_t c_ = (c); \
+ uint16x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlsl %0.4s, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_lane_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x2_t c_ = (c); \
+ uint32x2_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlsl %0.2d, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_laneq_s16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int16x8_t c_ = (c); \
+ int16x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smlsl %0.4s, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_laneq_s32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ int32x4_t c_ = (c); \
+ int32x2_t b_ = (b); \
+ int64x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smlsl %0.2d, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_laneq_u16(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint16x8_t c_ = (c); \
+ uint16x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umlsl %0.4s, %2.4h, %3.h[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmlsl_laneq_u32(a, b, c, d) \
+ __extension__ \
+ ({ \
+ uint32x4_t c_ = (c); \
+ uint32x2_t b_ = (b); \
+ uint64x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umlsl %0.2d, %2.2s, %3.s[%4]" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_n_s16 (int32x4_t a, int16x4_t b, int16_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlsl %0.4s, %2.4h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_n_s32 (int64x2_t a, int32x2_t b, int32_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlsl %0.2d, %2.2s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_n_u16 (uint32x4_t a, uint16x4_t b, uint16_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlsl %0.4s, %2.4h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_n_u32 (uint64x2_t a, uint32x2_t b, uint32_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlsl %0.2d, %2.2s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsl_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
+{
+ int16x8_t result;
+ __asm__ ("smlsl %0.8h, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
+{
+ int32x4_t result;
+ __asm__ ("smlsl %0.4s, %2.4h, %3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
+{
+ int64x2_t result;
+ __asm__ ("smlsl %0.2d, %2.2s, %3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsl_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
+{
+ uint16x8_t result;
+ __asm__ ("umlsl %0.8h, %2.8b, %3.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
+{
+ uint32x4_t result;
+ __asm__ ("umlsl %0.4s, %2.4h, %3.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
+{
+ uint64x2_t result;
+ __asm__ ("umlsl %0.2d, %2.2s, %3.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
+{
+ float32x4_t result;
+ float32x4_t t1;
+ __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s"
+ : "=w"(result), "=w"(t1)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmlsq_n_f64 (float64x2_t a, float64x2_t b, float64_t c)
+{
+ float64x2_t result;
+ float64x2_t t1;
+ __asm__ ("fmul %1.2d, %3.2d, %4.d[0]; fsub %0.2d, %0.2d, %1.2d"
+ : "=w"(result), "=w"(t1)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_n_s16 (int16x8_t a, int16x8_t b, int16_t c)
+{
+ int16x8_t result;
+ __asm__ ("mls %0.8h, %2.8h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_s32 (int32x4_t a, int32x4_t b, int32_t c)
+{
+ int32x4_t result;
+ __asm__ ("mls %0.4s, %2.4s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c)
+{
+ uint16x8_t result;
+ __asm__ ("mls %0.8h, %2.8h, %3.h[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "x"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c)
+{
+ uint32x4_t result;
+ __asm__ ("mls %0.4s, %2.4s, %3.s[0]"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmlsq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
+{
+ int8x16_t result;
+ __asm__ ("mls %0.16b,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
+{
+ int16x8_t result;
+ __asm__ ("mls %0.8h,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
+{
+ int32x4_t result;
+ __asm__ ("mls %0.4s,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmlsq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
+{
+ uint8x16_t result;
+ __asm__ ("mls %0.16b,%2.16b,%3.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint16x8_t result;
+ __asm__ ("mls %0.8h,%2.8h,%3.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint32x4_t result;
+ __asm__ ("mls %0.4s,%2.4s,%3.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovl_high_s8 (int8x16_t a)
+{
+ int16x8_t result;
+ __asm__ ("sshll2 %0.8h,%1.16b,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovl_high_s16 (int16x8_t a)
+{
+ int32x4_t result;
+ __asm__ ("sshll2 %0.4s,%1.8h,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovl_high_s32 (int32x4_t a)
+{
+ int64x2_t result;
+ __asm__ ("sshll2 %0.2d,%1.4s,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovl_high_u8 (uint8x16_t a)
+{
+ uint16x8_t result;
+ __asm__ ("ushll2 %0.8h,%1.16b,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovl_high_u16 (uint16x8_t a)
+{
+ uint32x4_t result;
+ __asm__ ("ushll2 %0.4s,%1.8h,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovl_high_u32 (uint32x4_t a)
+{
+ uint64x2_t result;
+ __asm__ ("ushll2 %0.2d,%1.4s,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovl_s8 (int8x8_t a)
+{
+ int16x8_t result;
+ __asm__ ("sshll %0.8h,%1.8b,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovl_s16 (int16x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("sshll %0.4s,%1.4h,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovl_s32 (int32x2_t a)
+{
+ int64x2_t result;
+ __asm__ ("sshll %0.2d,%1.2s,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovl_u8 (uint8x8_t a)
+{
+ uint16x8_t result;
+ __asm__ ("ushll %0.8h,%1.8b,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovl_u16 (uint16x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("ushll %0.4s,%1.4h,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovl_u32 (uint32x2_t a)
+{
+ uint64x2_t result;
+ __asm__ ("ushll %0.2d,%1.2s,#0"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmovn_high_s16 (int8x8_t a, int16x8_t b)
+{
+ int8x16_t result = vcombine_s8 (a, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("xtn2 %0.16b,%1.8h"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovn_high_s32 (int16x4_t a, int32x4_t b)
+{
+ int16x8_t result = vcombine_s16 (a, vcreate_s16 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("xtn2 %0.8h,%1.4s"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovn_high_s64 (int32x2_t a, int64x2_t b)
+{
+ int32x4_t result = vcombine_s32 (a, vcreate_s32 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("xtn2 %0.4s,%1.2d"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmovn_high_u16 (uint8x8_t a, uint16x8_t b)
+{
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("xtn2 %0.16b,%1.8h"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovn_high_u32 (uint16x4_t a, uint32x4_t b)
+{
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("xtn2 %0.8h,%1.4s"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovn_high_u64 (uint32x2_t a, uint64x2_t b)
+{
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("xtn2 %0.4s,%1.2d"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmovn_s16 (int16x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("xtn %0.8b,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmovn_s32 (int32x4_t a)
+{
+ int16x4_t result;
+ __asm__ ("xtn %0.4h,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmovn_s64 (int64x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("xtn %0.2s,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmovn_u16 (uint16x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("xtn %0.8b,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmovn_u32 (uint32x4_t a)
+{
+ uint16x4_t result;
+ __asm__ ("xtn %0.4h,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmovn_u64 (uint64x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("xtn %0.2s,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_n_f32 (float32x2_t a, float32_t b)
+{
+ float32x2_t result;
+ __asm__ ("fmul %0.2s,%1.2s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_n_s16 (int16x4_t a, int16_t b)
+{
+ int16x4_t result;
+ __asm__ ("mul %0.4h,%1.4h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "x"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_n_s32 (int32x2_t a, int32_t b)
+{
+ int32x2_t result;
+ __asm__ ("mul %0.2s,%1.2s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_n_u16 (uint16x4_t a, uint16_t b)
+{
+ uint16x4_t result;
+ __asm__ ("mul %0.4h,%1.4h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "x"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_n_u32 (uint32x2_t a, uint32_t b)
+{
+ uint32x2_t result;
+ __asm__ ("mul %0.2s,%1.2s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmuld_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64_t a_ = (a); \
+ float64_t result; \
+ __asm__ ("fmul %d0,%d1,%2.d[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16x8_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smull2 %0.4s, %1.8h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "x"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smull2 %0.2d, %1.4s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16x8_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umull2 %0.4s, %1.8h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "x"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umull2 %0.2d, %1.4s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_laneq_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16x8_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smull2 %0.4s, %1.8h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "x"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_laneq_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32x4_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smull2 %0.2d, %1.4s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_laneq_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16x8_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umull2 %0.4s, %1.8h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "x"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_high_laneq_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32x4_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umull2 %0.2d, %1.4s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_high_n_s16 (int16x8_t a, int16_t b)
+{
+ int32x4_t result;
+ __asm__ ("smull2 %0.4s,%1.8h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "x"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_high_n_s32 (int32x4_t a, int32_t b)
+{
+ int64x2_t result;
+ __asm__ ("smull2 %0.2d,%1.4s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_high_n_u16 (uint16x8_t a, uint16_t b)
+{
+ uint32x4_t result;
+ __asm__ ("umull2 %0.4s,%1.8h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "x"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_high_n_u32 (uint32x4_t a, uint32_t b)
+{
+ uint64x2_t result;
+ __asm__ ("umull2 %0.2d,%1.4s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmull_high_p8 (poly8x16_t a, poly8x16_t b)
+{
+ poly16x8_t result;
+ __asm__ ("pmull2 %0.8h,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmull_high_s8 (int8x16_t a, int8x16_t b)
+{
+ int16x8_t result;
+ __asm__ ("smull2 %0.8h,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_high_s16 (int16x8_t a, int16x8_t b)
+{
+ int32x4_t result;
+ __asm__ ("smull2 %0.4s,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_high_s32 (int32x4_t a, int32x4_t b)
+{
+ int64x2_t result;
+ __asm__ ("smull2 %0.2d,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmull_high_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint16x8_t result;
+ __asm__ ("umull2 %0.8h,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_high_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint32x4_t result;
+ __asm__ ("umull2 %0.4s,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_high_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint64x2_t result;
+ __asm__ ("umull2 %0.2d,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmull_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x4_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smull %0.4s,%1.4h,%2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "x"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x2_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smull %0.2d,%1.2s,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umull %0.4s,%1.4h,%2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "x"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umull %0.2d, %1.2s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_laneq_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("smull %0.4s, %1.4h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "x"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_laneq_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("smull %0.2d, %1.2s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_laneq_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("umull %0.4s, %1.4h, %2.h[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "x"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmull_laneq_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("umull %0.2d, %1.2s, %2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_n_s16 (int16x4_t a, int16_t b)
+{
+ int32x4_t result;
+ __asm__ ("smull %0.4s,%1.4h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "x"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_n_s32 (int32x2_t a, int32_t b)
+{
+ int64x2_t result;
+ __asm__ ("smull %0.2d,%1.2s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_n_u16 (uint16x4_t a, uint16_t b)
+{
+ uint32x4_t result;
+ __asm__ ("umull %0.4s,%1.4h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "x"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_n_u32 (uint32x2_t a, uint32_t b)
+{
+ uint64x2_t result;
+ __asm__ ("umull %0.2d,%1.2s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmull_p8 (poly8x8_t a, poly8x8_t b)
+{
+ poly16x8_t result;
+ __asm__ ("pmull %0.8h, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmull_s8 (int8x8_t a, int8x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("smull %0.8h, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_s16 (int16x4_t a, int16x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("smull %0.4s, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_s32 (int32x2_t a, int32x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("smull %0.2d, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmull_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("umull %0.8h, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("umull %0.4s, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("umull %0.2d, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_n_f32 (float32x4_t a, float32_t b)
+{
+ float32x4_t result;
+ __asm__ ("fmul %0.4s,%1.4s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmulq_n_f64 (float64x2_t a, float64_t b)
+{
+ float64x2_t result;
+ __asm__ ("fmul %0.2d,%1.2d,%2.d[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_n_s16 (int16x8_t a, int16_t b)
+{
+ int16x8_t result;
+ __asm__ ("mul %0.8h,%1.8h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "x"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_n_s32 (int32x4_t a, int32_t b)
+{
+ int32x4_t result;
+ __asm__ ("mul %0.4s,%1.4s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_n_u16 (uint16x8_t a, uint16_t b)
+{
+ uint16x8_t result;
+ __asm__ ("mul %0.8h,%1.8h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "x"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_n_u32 (uint32x4_t a, uint32_t b)
+{
+ uint32x4_t result;
+ __asm__ ("mul %0.4s,%1.4s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmuls_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32_t a_ = (a); \
+ float32_t result; \
+ __asm__ ("fmul %s0,%s1,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmulx_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("fmulx %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmulx_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32x2_t a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("fmulx %0.2s,%1.2s,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vmulxd_f64 (float64_t a, float64_t b)
+{
+ float64_t result;
+ __asm__ ("fmulx %d0, %d1, %d2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulxq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("fmulx %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmulxq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("fmulx %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vmulxq_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32x4_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("fmulx %0.4s,%1.4s,%2.s[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vmulxq_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64x2_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("fmulx %0.2d,%1.2d,%2.d[%3]" \
+ : "=w"(result) \
+ : "w"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vmulxs_f32 (float32_t a, float32_t b)
+{
+ float32_t result;
+ __asm__ ("fmulx %s0, %s1, %s2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmvn_p8 (poly8x8_t a)
+{
+ poly8x8_t result;
+ __asm__ ("mvn %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmvn_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("mvn %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmvn_s16 (int16x4_t a)
+{
+ int16x4_t result;
+ __asm__ ("mvn %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmvn_s32 (int32x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("mvn %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmvn_u8 (uint8x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("mvn %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmvn_u16 (uint16x4_t a)
+{
+ uint16x4_t result;
+ __asm__ ("mvn %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmvn_u32 (uint32x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("mvn %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmvnq_p8 (poly8x16_t a)
+{
+ poly8x16_t result;
+ __asm__ ("mvn %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmvnq_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("mvn %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmvnq_s16 (int16x8_t a)
+{
+ int16x8_t result;
+ __asm__ ("mvn %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmvnq_s32 (int32x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("mvn %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmvnq_u8 (uint8x16_t a)
+{
+ uint8x16_t result;
+ __asm__ ("mvn %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmvnq_u16 (uint16x8_t a)
+{
+ uint16x8_t result;
+ __asm__ ("mvn %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmvnq_u32 (uint32x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("mvn %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpadal_s8 (int16x4_t a, int8x8_t b)
+{
+ int16x4_t result;
+ __asm__ ("sadalp %0.4h,%2.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpadal_s16 (int32x2_t a, int16x4_t b)
+{
+ int32x2_t result;
+ __asm__ ("sadalp %0.2s,%2.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpadal_s32 (int64x1_t a, int32x2_t b)
+{
+ int64x1_t result;
+ __asm__ ("sadalp %0.1d,%2.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpadal_u8 (uint16x4_t a, uint8x8_t b)
+{
+ uint16x4_t result;
+ __asm__ ("uadalp %0.4h,%2.8b"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpadal_u16 (uint32x2_t a, uint16x4_t b)
+{
+ uint32x2_t result;
+ __asm__ ("uadalp %0.2s,%2.4h"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vpadal_u32 (uint64x1_t a, uint32x2_t b)
+{
+ uint64x1_t result;
+ __asm__ ("uadalp %0.1d,%2.2s"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpadalq_s8 (int16x8_t a, int8x16_t b)
+{
+ int16x8_t result;
+ __asm__ ("sadalp %0.8h,%2.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpadalq_s16 (int32x4_t a, int16x8_t b)
+{
+ int32x4_t result;
+ __asm__ ("sadalp %0.4s,%2.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpadalq_s32 (int64x2_t a, int32x4_t b)
+{
+ int64x2_t result;
+ __asm__ ("sadalp %0.2d,%2.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpadalq_u8 (uint16x8_t a, uint8x16_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uadalp %0.8h,%2.16b"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpadalq_u16 (uint32x4_t a, uint16x8_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uadalp %0.4s,%2.8h"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpadalq_u32 (uint64x2_t a, uint32x4_t b)
+{
+ uint64x2_t result;
+ __asm__ ("uadalp %0.2d,%2.4s"
+ : "=w"(result)
+ : "0"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpadd_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("faddp %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __builtin_aarch64_addpv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __builtin_aarch64_addpv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __builtin_aarch64_addpv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_addpv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_addpv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_addpv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpaddd_f64 (float64x2_t a)
+{
+ float64_t result;
+ __asm__ ("faddp %d0,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpaddl_s8 (int8x8_t a)
+{
+ int16x4_t result;
+ __asm__ ("saddlp %0.4h,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpaddl_s16 (int16x4_t a)
+{
+ int32x2_t result;
+ __asm__ ("saddlp %0.2s,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpaddl_s32 (int32x2_t a)
+{
+ int64x1_t result;
+ __asm__ ("saddlp %0.1d,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpaddl_u8 (uint8x8_t a)
+{
+ uint16x4_t result;
+ __asm__ ("uaddlp %0.4h,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpaddl_u16 (uint16x4_t a)
+{
+ uint32x2_t result;
+ __asm__ ("uaddlp %0.2s,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vpaddl_u32 (uint32x2_t a)
+{
+ uint64x1_t result;
+ __asm__ ("uaddlp %0.1d,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpaddlq_s8 (int8x16_t a)
+{
+ int16x8_t result;
+ __asm__ ("saddlp %0.8h,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpaddlq_s16 (int16x8_t a)
+{
+ int32x4_t result;
+ __asm__ ("saddlp %0.4s,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpaddlq_s32 (int32x4_t a)
+{
+ int64x2_t result;
+ __asm__ ("saddlp %0.2d,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpaddlq_u8 (uint8x16_t a)
+{
+ uint16x8_t result;
+ __asm__ ("uaddlp %0.8h,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpaddlq_u16 (uint16x8_t a)
+{
+ uint32x4_t result;
+ __asm__ ("uaddlp %0.4s,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpaddlq_u32 (uint32x4_t a)
+{
+ uint64x2_t result;
+ __asm__ ("uaddlp %0.2d,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpaddq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("faddp %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpaddq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("faddp %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vpaddq_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("addp %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpaddq_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("addp %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpaddq_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("addp %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpaddq_s64 (int64x2_t a, int64x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("addp %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vpaddq_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("addp %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpaddq_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("addp %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpaddq_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("addp %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpaddq_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("addp %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpadds_f32 (float32x2_t a)
+{
+ float32_t result;
+ __asm__ ("faddp %s0,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmax_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("fmaxp %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpmax_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("smaxp %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpmax_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("smaxp %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpmax_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("smaxp %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpmax_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("umaxp %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpmax_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("umaxp %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpmax_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("umaxp %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmaxnm_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("fmaxnmp %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpmaxnmq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("fmaxnmp %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpmaxnmq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("fmaxnmp %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpmaxnmqd_f64 (float64x2_t a)
+{
+ float64_t result;
+ __asm__ ("fmaxnmp %d0,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpmaxnms_f32 (float32x2_t a)
+{
+ float32_t result;
+ __asm__ ("fmaxnmp %s0,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpmaxq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("fmaxp %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpmaxq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("fmaxp %0.2d, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vpmaxq_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("smaxp %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpmaxq_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("smaxp %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpmaxq_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("smaxp %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vpmaxq_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("umaxp %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpmaxq_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("umaxp %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpmaxq_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("umaxp %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpmaxqd_f64 (float64x2_t a)
+{
+ float64_t result;
+ __asm__ ("fmaxp %d0,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpmaxs_f32 (float32x2_t a)
+{
+ float32_t result;
+ __asm__ ("fmaxp %s0,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmin_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("fminp %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpmin_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("sminp %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpmin_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("sminp %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpmin_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("sminp %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpmin_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("uminp %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpmin_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("uminp %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpmin_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("uminp %0.2s, %1.2s, %2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpminnm_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("fminnmp %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpminnmq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("fminnmp %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpminnmq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("fminnmp %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpminnmqd_f64 (float64x2_t a)
+{
+ float64_t result;
+ __asm__ ("fminnmp %d0,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpminnms_f32 (float32x2_t a)
+{
+ float32_t result;
+ __asm__ ("fminnmp %s0,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpminq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("fminp %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpminq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("fminp %0.2d, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vpminq_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("sminp %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpminq_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("sminp %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpminq_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("sminp %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vpminq_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("uminp %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpminq_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uminp %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpminq_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uminp %0.4s, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vpminqd_f64 (float64x2_t a)
+{
+ float64_t result;
+ __asm__ ("fminp %d0,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vpmins_f32 (float32x2_t a)
+{
+ float32_t result;
+ __asm__ ("fminp %s0,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_n_s16 (int16x4_t a, int16_t b)
+{
+ int16x4_t result;
+ __asm__ ("sqdmulh %0.4h,%1.4h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_n_s32 (int32x2_t a, int32_t b)
+{
+ int32x2_t result;
+ __asm__ ("sqdmulh %0.2s,%1.2s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_n_s16 (int16x8_t a, int16_t b)
+{
+ int16x8_t result;
+ __asm__ ("sqdmulh %0.8h,%1.8h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_n_s32 (int32x4_t a, int32_t b)
+{
+ int32x4_t result;
+ __asm__ ("sqdmulh %0.4s,%1.4s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqmovn_high_s16 (int8x8_t a, int16x8_t b)
+{
+ int8x16_t result = vcombine_s8 (a, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("sqxtn2 %0.16b, %1.8h"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqmovn_high_s32 (int16x4_t a, int32x4_t b)
+{
+ int16x8_t result = vcombine_s16 (a, vcreate_s16 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("sqxtn2 %0.8h, %1.4s"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqmovn_high_s64 (int32x2_t a, int64x2_t b)
+{
+ int32x4_t result = vcombine_s32 (a, vcreate_s32 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("sqxtn2 %0.4s, %1.2d"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqmovn_high_u16 (uint8x8_t a, uint16x8_t b)
+{
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("uqxtn2 %0.16b, %1.8h"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqmovn_high_u32 (uint16x4_t a, uint32x4_t b)
+{
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("uqxtn2 %0.8h, %1.4s"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqmovn_high_u64 (uint32x2_t a, uint64x2_t b)
+{
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("uqxtn2 %0.4s, %1.2d"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqmovun_high_s16 (uint8x8_t a, int16x8_t b)
+{
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("sqxtun2 %0.16b, %1.8h"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqmovun_high_s32 (uint16x4_t a, int32x4_t b)
+{
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("sqxtun2 %0.8h, %1.4s"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqmovun_high_s64 (uint32x2_t a, int64x2_t b)
+{
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("sqxtun2 %0.4s, %1.2d"
+ : "+w"(result)
+ : "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_n_s16 (int16x4_t a, int16_t b)
+{
+ int16x4_t result;
+ __asm__ ("sqrdmulh %0.4h,%1.4h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "x"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_n_s32 (int32x2_t a, int32_t b)
+{
+ int32x2_t result;
+ __asm__ ("sqrdmulh %0.2s,%1.2s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_n_s16 (int16x8_t a, int16_t b)
+{
+ int16x8_t result;
+ __asm__ ("sqrdmulh %0.8h,%1.8h,%2.h[0]"
+ : "=w"(result)
+ : "w"(a), "x"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_n_s32 (int32x4_t a, int32_t b)
+{
+ int32x4_t result;
+ __asm__ ("sqrdmulh %0.4s,%1.4s,%2.s[0]"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vqrshrn_high_n_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int8x8_t a_ = (a); \
+ int8x16_t result = vcombine_s8 \
+ (a_, vcreate_s8 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("sqrshrn2 %0.16b, %1.8h, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrn_high_n_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x8_t result = vcombine_s16 \
+ (a_, vcreate_s16 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("sqrshrn2 %0.8h, %1.4s, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrn_high_n_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x4_t result = vcombine_s32 \
+ (a_, vcreate_s32 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("sqrshrn2 %0.4s, %1.2d, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrn_high_n_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint8x8_t a_ = (a); \
+ uint8x16_t result = vcombine_u8 \
+ (a_, vcreate_u8 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("uqrshrn2 %0.16b, %1.8h, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrn_high_n_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x8_t result = vcombine_u16 \
+ (a_, vcreate_u16 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("uqrshrn2 %0.8h, %1.4s, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrn_high_n_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x4_t result = vcombine_u32 \
+ (a_, vcreate_u32 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("uqrshrn2 %0.4s, %1.2d, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrun_high_n_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ uint8x8_t a_ = (a); \
+ uint8x16_t result = vcombine_u8 \
+ (a_, vcreate_u8 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("sqrshrun2 %0.16b, %1.8h, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrun_high_n_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x8_t result = vcombine_u16 \
+ (a_, vcreate_u16 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("sqrshrun2 %0.8h, %1.4s, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqrshrun_high_n_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x4_t result = vcombine_u32 \
+ (a_, vcreate_u32 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("sqrshrun2 %0.4s, %1.2d, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrn_high_n_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int8x8_t a_ = (a); \
+ int8x16_t result = vcombine_s8 \
+ (a_, vcreate_s8 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("sqshrn2 %0.16b, %1.8h, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrn_high_n_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x8_t result = vcombine_s16 \
+ (a_, vcreate_s16 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("sqshrn2 %0.8h, %1.4s, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrn_high_n_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x4_t result = vcombine_s32 \
+ (a_, vcreate_s32 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("sqshrn2 %0.4s, %1.2d, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrn_high_n_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint8x8_t a_ = (a); \
+ uint8x16_t result = vcombine_u8 \
+ (a_, vcreate_u8 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("uqshrn2 %0.16b, %1.8h, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrn_high_n_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x8_t result = vcombine_u16 \
+ (a_, vcreate_u16 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("uqshrn2 %0.8h, %1.4s, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrn_high_n_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x4_t result = vcombine_u32 \
+ (a_, vcreate_u32 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("uqshrn2 %0.4s, %1.2d, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrun_high_n_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ uint8x8_t a_ = (a); \
+ uint8x16_t result = vcombine_u8 \
+ (a_, vcreate_u8 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("sqshrun2 %0.16b, %1.8h, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrun_high_n_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x8_t result = vcombine_u16 \
+ (a_, vcreate_u16 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("sqshrun2 %0.8h, %1.4s, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vqshrun_high_n_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x4_t result = vcombine_u32 \
+ (a_, vcreate_u32 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("sqshrun2 %0.4s, %1.2d, #%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrbit_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("rbit %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrbit_u8 (uint8x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("rbit %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrbitq_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("rbit %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrbitq_u8 (uint8x16_t a)
+{
+ uint8x16_t result;
+ __asm__ ("rbit %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrecpe_u32 (uint32x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("urecpe %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrecpeq_u32 (uint32x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("urecpe %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev16_p8 (poly8x8_t a)
+{
+ poly8x8_t result;
+ __asm__ ("rev16 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev16_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("rev16 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev16_u8 (uint8x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("rev16 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev16q_p8 (poly8x16_t a)
+{
+ poly8x16_t result;
+ __asm__ ("rev16 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev16q_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("rev16 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev16q_u8 (uint8x16_t a)
+{
+ uint8x16_t result;
+ __asm__ ("rev16 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev32_p8 (poly8x8_t a)
+{
+ poly8x8_t result;
+ __asm__ ("rev32 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vrev32_p16 (poly16x4_t a)
+{
+ poly16x4_t result;
+ __asm__ ("rev32 %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev32_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("rev32 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrev32_s16 (int16x4_t a)
+{
+ int16x4_t result;
+ __asm__ ("rev32 %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev32_u8 (uint8x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("rev32 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrev32_u16 (uint16x4_t a)
+{
+ uint16x4_t result;
+ __asm__ ("rev32 %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev32q_p8 (poly8x16_t a)
+{
+ poly8x16_t result;
+ __asm__ ("rev32 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vrev32q_p16 (poly16x8_t a)
+{
+ poly16x8_t result;
+ __asm__ ("rev32 %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev32q_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("rev32 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrev32q_s16 (int16x8_t a)
+{
+ int16x8_t result;
+ __asm__ ("rev32 %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev32q_u8 (uint8x16_t a)
+{
+ uint8x16_t result;
+ __asm__ ("rev32 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrev32q_u16 (uint16x8_t a)
+{
+ uint16x8_t result;
+ __asm__ ("rev32 %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrev64_f32 (float32x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("rev64 %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev64_p8 (poly8x8_t a)
+{
+ poly8x8_t result;
+ __asm__ ("rev64 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vrev64_p16 (poly16x4_t a)
+{
+ poly16x4_t result;
+ __asm__ ("rev64 %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev64_s8 (int8x8_t a)
+{
+ int8x8_t result;
+ __asm__ ("rev64 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrev64_s16 (int16x4_t a)
+{
+ int16x4_t result;
+ __asm__ ("rev64 %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrev64_s32 (int32x2_t a)
+{
+ int32x2_t result;
+ __asm__ ("rev64 %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev64_u8 (uint8x8_t a)
+{
+ uint8x8_t result;
+ __asm__ ("rev64 %0.8b,%1.8b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrev64_u16 (uint16x4_t a)
+{
+ uint16x4_t result;
+ __asm__ ("rev64 %0.4h,%1.4h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrev64_u32 (uint32x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("rev64 %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrev64q_f32 (float32x4_t a)
+{
+ float32x4_t result;
+ __asm__ ("rev64 %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev64q_p8 (poly8x16_t a)
+{
+ poly8x16_t result;
+ __asm__ ("rev64 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vrev64q_p16 (poly16x8_t a)
+{
+ poly16x8_t result;
+ __asm__ ("rev64 %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev64q_s8 (int8x16_t a)
+{
+ int8x16_t result;
+ __asm__ ("rev64 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrev64q_s16 (int16x8_t a)
+{
+ int16x8_t result;
+ __asm__ ("rev64 %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrev64q_s32 (int32x4_t a)
+{
+ int32x4_t result;
+ __asm__ ("rev64 %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev64q_u8 (uint8x16_t a)
+{
+ uint8x16_t result;
+ __asm__ ("rev64 %0.16b,%1.16b"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrev64q_u16 (uint16x8_t a)
+{
+ uint16x8_t result;
+ __asm__ ("rev64 %0.8h,%1.8h"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrev64q_u32 (uint32x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("rev64 %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vrshrn_high_n_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int8x8_t a_ = (a); \
+ int8x16_t result = vcombine_s8 \
+ (a_, vcreate_s8 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("rshrn2 %0.16b,%1.8h,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_high_n_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x8_t result = vcombine_s16 \
+ (a_, vcreate_s16 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("rshrn2 %0.8h,%1.4s,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_high_n_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x4_t result = vcombine_s32 \
+ (a_, vcreate_s32 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("rshrn2 %0.4s,%1.2d,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_high_n_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint8x8_t a_ = (a); \
+ uint8x16_t result = vcombine_u8 \
+ (a_, vcreate_u8 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("rshrn2 %0.16b,%1.8h,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_high_n_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x8_t result = vcombine_u16 \
+ (a_, vcreate_u16 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("rshrn2 %0.8h,%1.4s,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_high_n_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x4_t result = vcombine_u32 \
+ (a_, vcreate_u32 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("rshrn2 %0.4s,%1.2d,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_n_s16(a, b) \
+ __extension__ \
+ ({ \
+ int16x8_t a_ = (a); \
+ int8x8_t result; \
+ __asm__ ("rshrn %0.8b,%1.8h,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_n_s32(a, b) \
+ __extension__ \
+ ({ \
+ int32x4_t a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("rshrn %0.4h,%1.4s,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_n_s64(a, b) \
+ __extension__ \
+ ({ \
+ int64x2_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("rshrn %0.2s,%1.2d,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_n_u16(a, b) \
+ __extension__ \
+ ({ \
+ uint16x8_t a_ = (a); \
+ uint8x8_t result; \
+ __asm__ ("rshrn %0.8b,%1.8h,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_n_u32(a, b) \
+ __extension__ \
+ ({ \
+ uint32x4_t a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("rshrn %0.4h,%1.4s,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vrshrn_n_u64(a, b) \
+ __extension__ \
+ ({ \
+ uint64x2_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("rshrn %0.2s,%1.2d,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrsqrte_f32 (float32x2_t a)
+{
+ float32x2_t result;
+ __asm__ ("frsqrte %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vrsqrte_f64 (float64x1_t a)
+{
+ float64x1_t result;
+ __asm__ ("frsqrte %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsqrte_u32 (uint32x2_t a)
+{
+ uint32x2_t result;
+ __asm__ ("ursqrte %0.2s,%1.2s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrsqrted_f64 (float64_t a)
+{
+ float64_t result;
+ __asm__ ("frsqrte %d0,%d1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrsqrteq_f32 (float32x4_t a)
+{
+ float32x4_t result;
+ __asm__ ("frsqrte %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrsqrteq_f64 (float64x2_t a)
+{
+ float64x2_t result;
+ __asm__ ("frsqrte %0.2d,%1.2d"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsqrteq_u32 (uint32x4_t a)
+{
+ uint32x4_t result;
+ __asm__ ("ursqrte %0.4s,%1.4s"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrsqrtes_f32 (float32_t a)
+{
+ float32_t result;
+ __asm__ ("frsqrte %s0,%s1"
+ : "=w"(result)
+ : "w"(a)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrsqrts_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("frsqrts %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrsqrtsd_f64 (float64_t a, float64_t b)
+{
+ float64_t result;
+ __asm__ ("frsqrts %d0,%d1,%d2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrsqrtsq_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("frsqrts %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrsqrtsq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("frsqrts %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrsqrtss_f32 (float32_t a, float32_t b)
+{
+ float32_t result;
+ __asm__ ("frsqrts %s0,%s1,%s2"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrsrtsq_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("frsqrts %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrsubhn_high_s16 (int8x8_t a, int16x8_t b, int16x8_t c)
+{
+ int8x16_t result = vcombine_s8 (a, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("rsubhn2 %0.16b, %1.8h, %2.8h"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrsubhn_high_s32 (int16x4_t a, int32x4_t b, int32x4_t c)
+{
+ int16x8_t result = vcombine_s16 (a, vcreate_s16 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("rsubhn2 %0.8h, %1.4s, %2.4s"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrsubhn_high_s64 (int32x2_t a, int64x2_t b, int64x2_t c)
+{
+ int32x4_t result = vcombine_s32 (a, vcreate_s32 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("rsubhn2 %0.4s, %1.2d, %2.2d"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrsubhn_high_u16 (uint8x8_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("rsubhn2 %0.16b, %1.8h, %2.8h"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrsubhn_high_u32 (uint16x4_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("rsubhn2 %0.8h, %1.4s, %2.4s"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsubhn_high_u64 (uint32x2_t a, uint64x2_t b, uint64x2_t c)
+{
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("rsubhn2 %0.4s, %1.2d, %2.2d"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrsubhn_s16 (int16x8_t a, int16x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("rsubhn %0.8b, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrsubhn_s32 (int32x4_t a, int32x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("rsubhn %0.4h, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrsubhn_s64 (int64x2_t a, int64x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("rsubhn %0.2s, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrsubhn_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("rsubhn %0.8b, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrsubhn_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("rsubhn %0.4h, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsubhn_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("rsubhn %0.2s, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+#define vset_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x2_t b_ = (b); \
+ float32_t a_ = (a); \
+ float32x2_t result; \
+ __asm__ ("ins %0.s[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x1_t b_ = (b); \
+ float64_t a_ = (a); \
+ float64x1_t result; \
+ __asm__ ("ins %0.d[%3], %x1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x8_t b_ = (b); \
+ poly8_t a_ = (a); \
+ poly8x8_t result; \
+ __asm__ ("ins %0.b[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x4_t b_ = (b); \
+ poly16_t a_ = (a); \
+ poly16x4_t result; \
+ __asm__ ("ins %0.h[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x8_t b_ = (b); \
+ int8_t a_ = (a); \
+ int8x8_t result; \
+ __asm__ ("ins %0.b[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x4_t b_ = (b); \
+ int16_t a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("ins %0.h[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x2_t b_ = (b); \
+ int32_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("ins %0.s[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x1_t b_ = (b); \
+ int64_t a_ = (a); \
+ int64x1_t result; \
+ __asm__ ("ins %0.d[%3], %x1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x8_t b_ = (b); \
+ uint8_t a_ = (a); \
+ uint8x8_t result; \
+ __asm__ ("ins %0.b[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x4_t b_ = (b); \
+ uint16_t a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("ins %0.h[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x2_t b_ = (b); \
+ uint32_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("ins %0.s[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vset_lane_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x1_t b_ = (b); \
+ uint64_t a_ = (a); \
+ uint64x1_t result; \
+ __asm__ ("ins %0.d[%3], %x1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32_t a_ = (a); \
+ float32x4_t result; \
+ __asm__ ("ins %0.s[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64_t a_ = (a); \
+ float64x2_t result; \
+ __asm__ ("ins %0.d[%3], %x1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x16_t b_ = (b); \
+ poly8_t a_ = (a); \
+ poly8x16_t result; \
+ __asm__ ("ins %0.b[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x8_t b_ = (b); \
+ poly16_t a_ = (a); \
+ poly16x8_t result; \
+ __asm__ ("ins %0.h[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x16_t b_ = (b); \
+ int8_t a_ = (a); \
+ int8x16_t result; \
+ __asm__ ("ins %0.b[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16_t a_ = (a); \
+ int16x8_t result; \
+ __asm__ ("ins %0.h[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32_t a_ = (a); \
+ int32x4_t result; \
+ __asm__ ("ins %0.s[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int64_t a_ = (a); \
+ int64x2_t result; \
+ __asm__ ("ins %0.d[%3], %x1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x16_t b_ = (b); \
+ uint8_t a_ = (a); \
+ uint8x16_t result; \
+ __asm__ ("ins %0.b[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16_t a_ = (a); \
+ uint16x8_t result; \
+ __asm__ ("ins %0.h[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32_t a_ = (a); \
+ uint32x4_t result; \
+ __asm__ ("ins %0.s[%3], %w1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsetq_lane_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint64_t a_ = (a); \
+ uint64x2_t result; \
+ __asm__ ("ins %0.d[%3], %x1" \
+ : "=w"(result) \
+ : "r"(a_), "0"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_high_n_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int8x8_t a_ = (a); \
+ int8x16_t result = vcombine_s8 \
+ (a_, vcreate_s8 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("shrn2 %0.16b,%1.8h,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_high_n_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int16x4_t a_ = (a); \
+ int16x8_t result = vcombine_s16 \
+ (a_, vcreate_s16 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("shrn2 %0.8h,%1.4s,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_high_n_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int32x2_t a_ = (a); \
+ int32x4_t result = vcombine_s32 \
+ (a_, vcreate_s32 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("shrn2 %0.4s,%1.2d,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_high_n_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint8x8_t a_ = (a); \
+ uint8x16_t result = vcombine_u8 \
+ (a_, vcreate_u8 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("shrn2 %0.16b,%1.8h,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_high_n_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint16x4_t a_ = (a); \
+ uint16x8_t result = vcombine_u16 \
+ (a_, vcreate_u16 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("shrn2 %0.8h,%1.4s,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_high_n_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint32x2_t a_ = (a); \
+ uint32x4_t result = vcombine_u32 \
+ (a_, vcreate_u32 \
+ (__AARCH64_UINT64_C (0x0))); \
+ __asm__ ("shrn2 %0.4s,%1.2d,#%2" \
+ : "+w"(result) \
+ : "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_n_s16(a, b) \
+ __extension__ \
+ ({ \
+ int16x8_t a_ = (a); \
+ int8x8_t result; \
+ __asm__ ("shrn %0.8b,%1.8h,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_n_s32(a, b) \
+ __extension__ \
+ ({ \
+ int32x4_t a_ = (a); \
+ int16x4_t result; \
+ __asm__ ("shrn %0.4h,%1.4s,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_n_s64(a, b) \
+ __extension__ \
+ ({ \
+ int64x2_t a_ = (a); \
+ int32x2_t result; \
+ __asm__ ("shrn %0.2s,%1.2d,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_n_u16(a, b) \
+ __extension__ \
+ ({ \
+ uint16x8_t a_ = (a); \
+ uint8x8_t result; \
+ __asm__ ("shrn %0.8b,%1.8h,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_n_u32(a, b) \
+ __extension__ \
+ ({ \
+ uint32x4_t a_ = (a); \
+ uint16x4_t result; \
+ __asm__ ("shrn %0.4h,%1.4s,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vshrn_n_u64(a, b) \
+ __extension__ \
+ ({ \
+ uint64x2_t a_ = (a); \
+ uint32x2_t result; \
+ __asm__ ("shrn %0.2s,%1.2d,%2" \
+ : "=w"(result) \
+ : "w"(a_), "i"(b) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsli_n_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x8_t b_ = (b); \
+ poly8x8_t a_ = (a); \
+ poly8x8_t result; \
+ __asm__ ("sli %0.8b,%2.8b,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsli_n_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x4_t b_ = (b); \
+ poly16x4_t a_ = (a); \
+ poly16x4_t result; \
+ __asm__ ("sli %0.4h,%2.4h,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsliq_n_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x16_t b_ = (b); \
+ poly8x16_t a_ = (a); \
+ poly8x16_t result; \
+ __asm__ ("sli %0.16b,%2.16b,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsliq_n_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x8_t b_ = (b); \
+ poly16x8_t a_ = (a); \
+ poly16x8_t result; \
+ __asm__ ("sli %0.8h,%2.8h,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsri_n_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x8_t b_ = (b); \
+ poly8x8_t a_ = (a); \
+ poly8x8_t result; \
+ __asm__ ("sri %0.8b,%2.8b,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsri_n_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x4_t b_ = (b); \
+ poly16x4_t a_ = (a); \
+ poly16x4_t result; \
+ __asm__ ("sri %0.4h,%2.4h,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsriq_n_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x16_t b_ = (b); \
+ poly8x16_t a_ = (a); \
+ poly8x16_t result; \
+ __asm__ ("sri %0.16b,%2.16b,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vsriq_n_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x8_t b_ = (b); \
+ poly16x8_t a_ = (a); \
+ poly16x8_t result; \
+ __asm__ ("sri %0.8h,%2.8h,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers */); \
+ result; \
+ })
+
+#define vst1_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x2_t b_ = (b); \
+ float32_t * a_ = (a); \
+ __asm__ ("st1 {%1.s}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x1_t b_ = (b); \
+ float64_t * a_ = (a); \
+ __asm__ ("st1 {%1.d}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x8_t b_ = (b); \
+ poly8_t * a_ = (a); \
+ __asm__ ("st1 {%1.b}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x4_t b_ = (b); \
+ poly16_t * a_ = (a); \
+ __asm__ ("st1 {%1.h}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x8_t b_ = (b); \
+ int8_t * a_ = (a); \
+ __asm__ ("st1 {%1.b}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x4_t b_ = (b); \
+ int16_t * a_ = (a); \
+ __asm__ ("st1 {%1.h}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x2_t b_ = (b); \
+ int32_t * a_ = (a); \
+ __asm__ ("st1 {%1.s}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x1_t b_ = (b); \
+ int64_t * a_ = (a); \
+ __asm__ ("st1 {%1.d}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x8_t b_ = (b); \
+ uint8_t * a_ = (a); \
+ __asm__ ("st1 {%1.b}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x4_t b_ = (b); \
+ uint16_t * a_ = (a); \
+ __asm__ ("st1 {%1.h}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x2_t b_ = (b); \
+ uint32_t * a_ = (a); \
+ __asm__ ("st1 {%1.s}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1_lane_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x1_t b_ = (b); \
+ uint64_t * a_ = (a); \
+ __asm__ ("st1 {%1.d}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+
+#define vst1q_lane_f32(a, b, c) \
+ __extension__ \
+ ({ \
+ float32x4_t b_ = (b); \
+ float32_t * a_ = (a); \
+ __asm__ ("st1 {%1.s}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_f64(a, b, c) \
+ __extension__ \
+ ({ \
+ float64x2_t b_ = (b); \
+ float64_t * a_ = (a); \
+ __asm__ ("st1 {%1.d}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_p8(a, b, c) \
+ __extension__ \
+ ({ \
+ poly8x16_t b_ = (b); \
+ poly8_t * a_ = (a); \
+ __asm__ ("st1 {%1.b}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_p16(a, b, c) \
+ __extension__ \
+ ({ \
+ poly16x8_t b_ = (b); \
+ poly16_t * a_ = (a); \
+ __asm__ ("st1 {%1.h}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_s8(a, b, c) \
+ __extension__ \
+ ({ \
+ int8x16_t b_ = (b); \
+ int8_t * a_ = (a); \
+ __asm__ ("st1 {%1.b}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_s16(a, b, c) \
+ __extension__ \
+ ({ \
+ int16x8_t b_ = (b); \
+ int16_t * a_ = (a); \
+ __asm__ ("st1 {%1.h}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_s32(a, b, c) \
+ __extension__ \
+ ({ \
+ int32x4_t b_ = (b); \
+ int32_t * a_ = (a); \
+ __asm__ ("st1 {%1.s}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_s64(a, b, c) \
+ __extension__ \
+ ({ \
+ int64x2_t b_ = (b); \
+ int64_t * a_ = (a); \
+ __asm__ ("st1 {%1.d}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_u8(a, b, c) \
+ __extension__ \
+ ({ \
+ uint8x16_t b_ = (b); \
+ uint8_t * a_ = (a); \
+ __asm__ ("st1 {%1.b}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_u16(a, b, c) \
+ __extension__ \
+ ({ \
+ uint16x8_t b_ = (b); \
+ uint16_t * a_ = (a); \
+ __asm__ ("st1 {%1.h}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_u32(a, b, c) \
+ __extension__ \
+ ({ \
+ uint32x4_t b_ = (b); \
+ uint32_t * a_ = (a); \
+ __asm__ ("st1 {%1.s}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+#define vst1q_lane_u64(a, b, c) \
+ __extension__ \
+ ({ \
+ uint64x2_t b_ = (b); \
+ uint64_t * a_ = (a); \
+ __asm__ ("st1 {%1.d}[%2],[%0]" \
+ : \
+ : "r"(a_), "w"(b_), "i"(c) \
+ : "memory"); \
+ })
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsubhn_high_s16 (int8x8_t a, int16x8_t b, int16x8_t c)
+{
+ int8x16_t result = vcombine_s8 (a, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("subhn2 %0.16b, %1.8h, %2.8h"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubhn_high_s32 (int16x4_t a, int32x4_t b, int32x4_t c)
+{
+ int16x8_t result = vcombine_s16 (a, vcreate_s16 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("subhn2 %0.8h, %1.4s, %2.4s"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubhn_high_s64 (int32x2_t a, int64x2_t b, int64x2_t c)
+{
+ int32x4_t result = vcombine_s32 (a, vcreate_s32 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("subhn2 %0.4s, %1.2d, %2.2d"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsubhn_high_u16 (uint8x8_t a, uint16x8_t b, uint16x8_t c)
+{
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("subhn2 %0.16b, %1.8h, %2.8h"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubhn_high_u32 (uint16x4_t a, uint32x4_t b, uint32x4_t c)
+{
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("subhn2 %0.8h, %1.4s, %2.4s"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubhn_high_u64 (uint32x2_t a, uint64x2_t b, uint64x2_t c)
+{
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("subhn2 %0.4s, %1.2d, %2.2d"
+ : "+w"(result)
+ : "w"(b), "w"(c)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsubhn_s16 (int16x8_t a, int16x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("subhn %0.8b, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsubhn_s32 (int32x4_t a, int32x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("subhn %0.4h, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsubhn_s64 (int64x2_t a, int64x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("subhn %0.2s, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsubhn_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("subhn %0.8b, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsubhn_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("subhn %0.4h, %1.4s, %2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsubhn_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("subhn %0.2s, %1.2d, %2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vtrn1_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("trn1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtrn1_p8 (poly8x8_t a, poly8x8_t b)
+{
+ poly8x8_t result;
+ __asm__ ("trn1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vtrn1_p16 (poly16x4_t a, poly16x4_t b)
+{
+ poly16x4_t result;
+ __asm__ ("trn1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtrn1_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("trn1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vtrn1_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("trn1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vtrn1_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("trn1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtrn1_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("trn1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtrn1_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("trn1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtrn1_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("trn1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vtrn1q_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("trn1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vtrn1q_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("trn1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vtrn1q_p8 (poly8x16_t a, poly8x16_t b)
+{
+ poly8x16_t result;
+ __asm__ ("trn1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vtrn1q_p16 (poly16x8_t a, poly16x8_t b)
+{
+ poly16x8_t result;
+ __asm__ ("trn1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vtrn1q_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("trn1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vtrn1q_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("trn1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vtrn1q_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("trn1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vtrn1q_s64 (int64x2_t a, int64x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("trn1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtrn1q_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("trn1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtrn1q_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("trn1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtrn1q_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("trn1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vtrn1q_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("trn1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vtrn2_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("trn2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtrn2_p8 (poly8x8_t a, poly8x8_t b)
+{
+ poly8x8_t result;
+ __asm__ ("trn2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vtrn2_p16 (poly16x4_t a, poly16x4_t b)
+{
+ poly16x4_t result;
+ __asm__ ("trn2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtrn2_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("trn2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vtrn2_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("trn2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vtrn2_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("trn2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtrn2_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("trn2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtrn2_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("trn2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtrn2_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("trn2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vtrn2q_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("trn2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vtrn2q_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("trn2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vtrn2q_p8 (poly8x16_t a, poly8x16_t b)
+{
+ poly8x16_t result;
+ __asm__ ("trn2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vtrn2q_p16 (poly16x8_t a, poly16x8_t b)
+{
+ poly16x8_t result;
+ __asm__ ("trn2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vtrn2q_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("trn2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vtrn2q_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("trn2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vtrn2q_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("trn2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vtrn2q_s64 (int64x2_t a, int64x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("trn2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtrn2q_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("trn2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtrn2q_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("trn2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtrn2q_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("trn2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vtrn2q_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("trn2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_p8 (poly8x8_t a, poly8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("cmtst %0.8b, %1.8b, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_p16 (poly16x4_t a, poly16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("cmtst %0.4h, %1.4h, %2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_p8 (poly8x16_t a, poly8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("cmtst %0.16b, %1.16b, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_p16 (poly16x8_t a, poly16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("cmtst %0.8h, %1.8h, %2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vuzp1_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vuzp1_p8 (poly8x8_t a, poly8x8_t b)
+{
+ poly8x8_t result;
+ __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vuzp1_p16 (poly16x4_t a, poly16x4_t b)
+{
+ poly16x4_t result;
+ __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vuzp1_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vuzp1_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vuzp1_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vuzp1_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vuzp1_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vuzp1_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vuzp1q_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vuzp1q_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vuzp1q_p8 (poly8x16_t a, poly8x16_t b)
+{
+ poly8x16_t result;
+ __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vuzp1q_p16 (poly16x8_t a, poly16x8_t b)
+{
+ poly16x8_t result;
+ __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vuzp1q_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vuzp1q_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vuzp1q_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vuzp1q_s64 (int64x2_t a, int64x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vuzp1q_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vuzp1q_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vuzp1q_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vuzp1q_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vuzp2_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vuzp2_p8 (poly8x8_t a, poly8x8_t b)
+{
+ poly8x8_t result;
+ __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vuzp2_p16 (poly16x4_t a, poly16x4_t b)
+{
+ poly16x4_t result;
+ __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vuzp2_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vuzp2_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vuzp2_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vuzp2_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vuzp2_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vuzp2_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vuzp2q_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vuzp2q_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vuzp2q_p8 (poly8x16_t a, poly8x16_t b)
+{
+ poly8x16_t result;
+ __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vuzp2q_p16 (poly16x8_t a, poly16x8_t b)
+{
+ poly16x8_t result;
+ __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vuzp2q_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vuzp2q_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vuzp2q_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vuzp2q_s64 (int64x2_t a, int64x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vuzp2q_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vuzp2q_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vuzp2q_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vuzp2q_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vzip1_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vzip1_p8 (poly8x8_t a, poly8x8_t b)
+{
+ poly8x8_t result;
+ __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vzip1_p16 (poly16x4_t a, poly16x4_t b)
+{
+ poly16x4_t result;
+ __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vzip1_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vzip1_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vzip1_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vzip1_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vzip1_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vzip1_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vzip1q_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vzip1q_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vzip1q_p8 (poly8x16_t a, poly8x16_t b)
+{
+ poly8x16_t result;
+ __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vzip1q_p16 (poly16x8_t a, poly16x8_t b)
+{
+ poly16x8_t result;
+ __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vzip1q_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vzip1q_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vzip1q_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vzip1q_s64 (int64x2_t a, int64x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vzip1q_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vzip1q_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vzip1q_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vzip1q_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vzip2_f32 (float32x2_t a, float32x2_t b)
+{
+ float32x2_t result;
+ __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vzip2_p8 (poly8x8_t a, poly8x8_t b)
+{
+ poly8x8_t result;
+ __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vzip2_p16 (poly16x4_t a, poly16x4_t b)
+{
+ poly16x4_t result;
+ __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vzip2_s8 (int8x8_t a, int8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vzip2_s16 (int16x4_t a, int16x4_t b)
+{
+ int16x4_t result;
+ __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vzip2_s32 (int32x2_t a, int32x2_t b)
+{
+ int32x2_t result;
+ __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vzip2_u8 (uint8x8_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vzip2_u16 (uint16x4_t a, uint16x4_t b)
+{
+ uint16x4_t result;
+ __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vzip2_u32 (uint32x2_t a, uint32x2_t b)
+{
+ uint32x2_t result;
+ __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vzip2q_f32 (float32x4_t a, float32x4_t b)
+{
+ float32x4_t result;
+ __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vzip2q_f64 (float64x2_t a, float64x2_t b)
+{
+ float64x2_t result;
+ __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vzip2q_p8 (poly8x16_t a, poly8x16_t b)
+{
+ poly8x16_t result;
+ __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vzip2q_p16 (poly16x8_t a, poly16x8_t b)
+{
+ poly16x8_t result;
+ __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vzip2q_s8 (int8x16_t a, int8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vzip2q_s16 (int16x8_t a, int16x8_t b)
+{
+ int16x8_t result;
+ __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vzip2q_s32 (int32x4_t a, int32x4_t b)
+{
+ int32x4_t result;
+ __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vzip2q_s64 (int64x2_t a, int64x2_t b)
+{
+ int64x2_t result;
+ __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vzip2q_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vzip2q_u16 (uint16x8_t a, uint16x8_t b)
+{
+ uint16x8_t result;
+ __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vzip2q_u32 (uint32x4_t a, uint32x4_t b)
+{
+ uint32x4_t result;
+ __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vzip2q_u64 (uint64x2_t a, uint64x2_t b)
+{
+ uint64x2_t result;
+ __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+/* End of temporary inline asm implementations. */
+
+/* Start of temporary inline asm for vldn, vstn and friends. */
+
+/* Create struct element types for duplicating loads.
+
+ Create 2 element structures of:
+
+ +------+----+----+----+----+
+ | | 8 | 16 | 32 | 64 |
+ +------+----+----+----+----+
+ |int | Y | Y | N | N |
+ +------+----+----+----+----+
+ |uint | Y | Y | N | N |
+ +------+----+----+----+----+
+ |float | - | - | N | N |
+ +------+----+----+----+----+
+ |poly | Y | Y | - | - |
+ +------+----+----+----+----+
+
+ Create 3 element structures of:
+
+ +------+----+----+----+----+
+ | | 8 | 16 | 32 | 64 |
+ +------+----+----+----+----+
+ |int | Y | Y | Y | Y |
+ +------+----+----+----+----+
+ |uint | Y | Y | Y | Y |
+ +------+----+----+----+----+
+ |float | - | - | Y | Y |
+ +------+----+----+----+----+
+ |poly | Y | Y | - | - |
+ +------+----+----+----+----+
+
+ Create 4 element structures of:
+
+ +------+----+----+----+----+
+ | | 8 | 16 | 32 | 64 |
+ +------+----+----+----+----+
+ |int | Y | N | N | Y |
+ +------+----+----+----+----+
+ |uint | Y | N | N | Y |
+ +------+----+----+----+----+
+ |float | - | - | N | Y |
+ +------+----+----+----+----+
+ |poly | Y | N | - | - |
+ +------+----+----+----+----+
+
+ This is required for casting memory reference. */
+#define __STRUCTN(t, sz, nelem) \
+ typedef struct t ## sz ## x ## nelem ## _t { \
+ t ## sz ## _t val[nelem]; \
+ } t ## sz ## x ## nelem ## _t;
+
+/* 2-element structs. */
+__STRUCTN (int, 8, 2)
+__STRUCTN (int, 16, 2)
+__STRUCTN (uint, 8, 2)
+__STRUCTN (uint, 16, 2)
+__STRUCTN (poly, 8, 2)
+__STRUCTN (poly, 16, 2)
+/* 3-element structs. */
+__STRUCTN (int, 8, 3)
+__STRUCTN (int, 16, 3)
+__STRUCTN (int, 32, 3)
+__STRUCTN (int, 64, 3)
+__STRUCTN (uint, 8, 3)
+__STRUCTN (uint, 16, 3)
+__STRUCTN (uint, 32, 3)
+__STRUCTN (uint, 64, 3)
+__STRUCTN (float, 32, 3)
+__STRUCTN (float, 64, 3)
+__STRUCTN (poly, 8, 3)
+__STRUCTN (poly, 16, 3)
+/* 4-element structs. */
+__STRUCTN (int, 8, 4)
+__STRUCTN (int, 64, 4)
+__STRUCTN (uint, 8, 4)
+__STRUCTN (uint, 64, 4)
+__STRUCTN (poly, 8, 4)
+__STRUCTN (float, 64, 4)
+#undef __STRUCTN
+
+#define __LD2R_FUNC(rettype, structtype, ptrtype, \
+ regsuffix, funcsuffix, Q) \
+ __extension__ static __inline rettype \
+ __attribute__ ((__always_inline__)) \
+ vld2 ## Q ## _dup_ ## funcsuffix (const ptrtype *ptr) \
+ { \
+ rettype result; \
+ __asm__ ("ld2r {v16." #regsuffix ", v17." #regsuffix "}, %1\n\t" \
+ "st1 {v16." #regsuffix ", v17." #regsuffix "}, %0\n\t" \
+ : "=Q"(result) \
+ : "Q"(*(const structtype *)ptr) \
+ : "memory", "v16", "v17"); \
+ return result; \
+ }
+
+__LD2R_FUNC (float32x2x2_t, float32x2_t, float32_t, 2s, f32,)
+__LD2R_FUNC (float64x1x2_t, float64x2_t, float64_t, 1d, f64,)
+__LD2R_FUNC (poly8x8x2_t, poly8x2_t, poly8_t, 8b, p8,)
+__LD2R_FUNC (poly16x4x2_t, poly16x2_t, poly16_t, 4h, p16,)
+__LD2R_FUNC (int8x8x2_t, int8x2_t, int8_t, 8b, s8,)
+__LD2R_FUNC (int16x4x2_t, int16x2_t, int16_t, 4h, s16,)
+__LD2R_FUNC (int32x2x2_t, int32x2_t, int32_t, 2s, s32,)
+__LD2R_FUNC (int64x1x2_t, int64x2_t, int64_t, 1d, s64,)
+__LD2R_FUNC (uint8x8x2_t, uint8x2_t, uint8_t, 8b, u8,)
+__LD2R_FUNC (uint16x4x2_t, uint16x2_t, uint16_t, 4h, u16,)
+__LD2R_FUNC (uint32x2x2_t, uint32x2_t, uint32_t, 2s, u32,)
+__LD2R_FUNC (uint64x1x2_t, uint64x2_t, uint64_t, 1d, u64,)
+__LD2R_FUNC (float32x4x2_t, float32x2_t, float32_t, 4s, f32, q)
+__LD2R_FUNC (float64x2x2_t, float64x2_t, float64_t, 2d, f64, q)
+__LD2R_FUNC (poly8x16x2_t, poly8x2_t, poly8_t, 16b, p8, q)
+__LD2R_FUNC (poly16x8x2_t, poly16x2_t, poly16_t, 8h, p16, q)
+__LD2R_FUNC (int8x16x2_t, int8x2_t, int8_t, 16b, s8, q)
+__LD2R_FUNC (int16x8x2_t, int16x2_t, int16_t, 8h, s16, q)
+__LD2R_FUNC (int32x4x2_t, int32x2_t, int32_t, 4s, s32, q)
+__LD2R_FUNC (int64x2x2_t, int64x2_t, int64_t, 2d, s64, q)
+__LD2R_FUNC (uint8x16x2_t, uint8x2_t, uint8_t, 16b, u8, q)
+__LD2R_FUNC (uint16x8x2_t, uint16x2_t, uint16_t, 8h, u16, q)
+__LD2R_FUNC (uint32x4x2_t, uint32x2_t, uint32_t, 4s, u32, q)
+__LD2R_FUNC (uint64x2x2_t, uint64x2_t, uint64_t, 2d, u64, q)
+
+#define __LD2_LANE_FUNC(rettype, ptrtype, regsuffix, \
+ lnsuffix, funcsuffix, Q) \
+ __extension__ static __inline rettype \
+ __attribute__ ((__always_inline__)) \
+ vld2 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
+ rettype b, const int c) \
+ { \
+ rettype result; \
+ __asm__ ("ld1 {v16." #regsuffix ", v17." #regsuffix "}, %1\n\t" \
+ "ld2 {v16." #lnsuffix ", v17." #lnsuffix "}[%3], %2\n\t" \
+ "st1 {v16." #regsuffix ", v17." #regsuffix "}, %0\n\t" \
+ : "=Q"(result) \
+ : "Q"(b), "Q"(*(const rettype *)ptr), "i"(c) \
+ : "memory", "v16", "v17"); \
+ return result; \
+ }
+
+__LD2_LANE_FUNC (int8x8x2_t, uint8_t, 8b, b, s8,)
+__LD2_LANE_FUNC (float32x2x2_t, float32_t, 2s, s, f32,)
+__LD2_LANE_FUNC (float64x1x2_t, float64_t, 1d, d, f64,)
+__LD2_LANE_FUNC (poly8x8x2_t, poly8_t, 8b, b, p8,)
+__LD2_LANE_FUNC (poly16x4x2_t, poly16_t, 4h, h, p16,)
+__LD2_LANE_FUNC (int16x4x2_t, int16_t, 4h, h, s16,)
+__LD2_LANE_FUNC (int32x2x2_t, int32_t, 2s, s, s32,)
+__LD2_LANE_FUNC (int64x1x2_t, int64_t, 1d, d, s64,)
+__LD2_LANE_FUNC (uint8x8x2_t, uint8_t, 8b, b, u8,)
+__LD2_LANE_FUNC (uint16x4x2_t, uint16_t, 4h, h, u16,)
+__LD2_LANE_FUNC (uint32x2x2_t, uint32_t, 2s, s, u32,)
+__LD2_LANE_FUNC (uint64x1x2_t, uint64_t, 1d, d, u64,)
+__LD2_LANE_FUNC (float32x4x2_t, float32_t, 4s, s, f32, q)
+__LD2_LANE_FUNC (float64x2x2_t, float64_t, 2d, d, f64, q)
+__LD2_LANE_FUNC (poly8x16x2_t, poly8_t, 16b, b, p8, q)
+__LD2_LANE_FUNC (poly16x8x2_t, poly16_t, 8h, h, p16, q)
+__LD2_LANE_FUNC (int8x16x2_t, int8_t, 16b, b, s8, q)
+__LD2_LANE_FUNC (int16x8x2_t, int16_t, 8h, h, s16, q)
+__LD2_LANE_FUNC (int32x4x2_t, int32_t, 4s, s, s32, q)
+__LD2_LANE_FUNC (int64x2x2_t, int64_t, 2d, d, s64, q)
+__LD2_LANE_FUNC (uint8x16x2_t, uint8_t, 16b, b, u8, q)
+__LD2_LANE_FUNC (uint16x8x2_t, uint16_t, 8h, h, u16, q)
+__LD2_LANE_FUNC (uint32x4x2_t, uint32_t, 4s, s, u32, q)
+__LD2_LANE_FUNC (uint64x2x2_t, uint64_t, 2d, d, u64, q)
+
+#define __LD3R_FUNC(rettype, structtype, ptrtype, \
+ regsuffix, funcsuffix, Q) \
+ __extension__ static __inline rettype \
+ __attribute__ ((__always_inline__)) \
+ vld3 ## Q ## _dup_ ## funcsuffix (const ptrtype *ptr) \
+ { \
+ rettype result; \
+ __asm__ ("ld3r {v16." #regsuffix " - v18." #regsuffix "}, %1\n\t" \
+ "st1 {v16." #regsuffix " - v18." #regsuffix "}, %0\n\t" \
+ : "=Q"(result) \
+ : "Q"(*(const structtype *)ptr) \
+ : "memory", "v16", "v17", "v18"); \
+ return result; \
+ }
+
+__LD3R_FUNC (float32x2x3_t, float32x3_t, float32_t, 2s, f32,)
+__LD3R_FUNC (float64x1x3_t, float64x3_t, float64_t, 1d, f64,)
+__LD3R_FUNC (poly8x8x3_t, poly8x3_t, poly8_t, 8b, p8,)
+__LD3R_FUNC (poly16x4x3_t, poly16x3_t, poly16_t, 4h, p16,)
+__LD3R_FUNC (int8x8x3_t, int8x3_t, int8_t, 8b, s8,)
+__LD3R_FUNC (int16x4x3_t, int16x3_t, int16_t, 4h, s16,)
+__LD3R_FUNC (int32x2x3_t, int32x3_t, int32_t, 2s, s32,)
+__LD3R_FUNC (int64x1x3_t, int64x3_t, int64_t, 1d, s64,)
+__LD3R_FUNC (uint8x8x3_t, uint8x3_t, uint8_t, 8b, u8,)
+__LD3R_FUNC (uint16x4x3_t, uint16x3_t, uint16_t, 4h, u16,)
+__LD3R_FUNC (uint32x2x3_t, uint32x3_t, uint32_t, 2s, u32,)
+__LD3R_FUNC (uint64x1x3_t, uint64x3_t, uint64_t, 1d, u64,)
+__LD3R_FUNC (float32x4x3_t, float32x3_t, float32_t, 4s, f32, q)
+__LD3R_FUNC (float64x2x3_t, float64x3_t, float64_t, 2d, f64, q)
+__LD3R_FUNC (poly8x16x3_t, poly8x3_t, poly8_t, 16b, p8, q)
+__LD3R_FUNC (poly16x8x3_t, poly16x3_t, poly16_t, 8h, p16, q)
+__LD3R_FUNC (int8x16x3_t, int8x3_t, int8_t, 16b, s8, q)
+__LD3R_FUNC (int16x8x3_t, int16x3_t, int16_t, 8h, s16, q)
+__LD3R_FUNC (int32x4x3_t, int32x3_t, int32_t, 4s, s32, q)
+__LD3R_FUNC (int64x2x3_t, int64x3_t, int64_t, 2d, s64, q)
+__LD3R_FUNC (uint8x16x3_t, uint8x3_t, uint8_t, 16b, u8, q)
+__LD3R_FUNC (uint16x8x3_t, uint16x3_t, uint16_t, 8h, u16, q)
+__LD3R_FUNC (uint32x4x3_t, uint32x3_t, uint32_t, 4s, u32, q)
+__LD3R_FUNC (uint64x2x3_t, uint64x3_t, uint64_t, 2d, u64, q)
+
+#define __LD3_LANE_FUNC(rettype, ptrtype, regsuffix, \
+ lnsuffix, funcsuffix, Q) \
+ __extension__ static __inline rettype \
+ __attribute__ ((__always_inline__)) \
+ vld3 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
+ rettype b, const int c) \
+ { \
+ rettype result; \
+ __asm__ ("ld1 {v16." #regsuffix " - v18." #regsuffix "}, %1\n\t" \
+ "ld3 {v16." #lnsuffix " - v18." #lnsuffix "}[%3], %2\n\t" \
+ "st1 {v16." #regsuffix " - v18." #regsuffix "}, %0\n\t" \
+ : "=Q"(result) \
+ : "Q"(b), "Q"(*(const rettype *)ptr), "i"(c) \
+ : "memory", "v16", "v17", "v18"); \
+ return result; \
+ }
+
+__LD3_LANE_FUNC (int8x8x3_t, uint8_t, 8b, b, s8,)
+__LD3_LANE_FUNC (float32x2x3_t, float32_t, 2s, s, f32,)
+__LD3_LANE_FUNC (float64x1x3_t, float64_t, 1d, d, f64,)
+__LD3_LANE_FUNC (poly8x8x3_t, poly8_t, 8b, b, p8,)
+__LD3_LANE_FUNC (poly16x4x3_t, poly16_t, 4h, h, p16,)
+__LD3_LANE_FUNC (int16x4x3_t, int16_t, 4h, h, s16,)
+__LD3_LANE_FUNC (int32x2x3_t, int32_t, 2s, s, s32,)
+__LD3_LANE_FUNC (int64x1x3_t, int64_t, 1d, d, s64,)
+__LD3_LANE_FUNC (uint8x8x3_t, uint8_t, 8b, b, u8,)
+__LD3_LANE_FUNC (uint16x4x3_t, uint16_t, 4h, h, u16,)
+__LD3_LANE_FUNC (uint32x2x3_t, uint32_t, 2s, s, u32,)
+__LD3_LANE_FUNC (uint64x1x3_t, uint64_t, 1d, d, u64,)
+__LD3_LANE_FUNC (float32x4x3_t, float32_t, 4s, s, f32, q)
+__LD3_LANE_FUNC (float64x2x3_t, float64_t, 2d, d, f64, q)
+__LD3_LANE_FUNC (poly8x16x3_t, poly8_t, 16b, b, p8, q)
+__LD3_LANE_FUNC (poly16x8x3_t, poly16_t, 8h, h, p16, q)
+__LD3_LANE_FUNC (int8x16x3_t, int8_t, 16b, b, s8, q)
+__LD3_LANE_FUNC (int16x8x3_t, int16_t, 8h, h, s16, q)
+__LD3_LANE_FUNC (int32x4x3_t, int32_t, 4s, s, s32, q)
+__LD3_LANE_FUNC (int64x2x3_t, int64_t, 2d, d, s64, q)
+__LD3_LANE_FUNC (uint8x16x3_t, uint8_t, 16b, b, u8, q)
+__LD3_LANE_FUNC (uint16x8x3_t, uint16_t, 8h, h, u16, q)
+__LD3_LANE_FUNC (uint32x4x3_t, uint32_t, 4s, s, u32, q)
+__LD3_LANE_FUNC (uint64x2x3_t, uint64_t, 2d, d, u64, q)
+
+#define __LD4R_FUNC(rettype, structtype, ptrtype, \
+ regsuffix, funcsuffix, Q) \
+ __extension__ static __inline rettype \
+ __attribute__ ((__always_inline__)) \
+ vld4 ## Q ## _dup_ ## funcsuffix (const ptrtype *ptr) \
+ { \
+ rettype result; \
+ __asm__ ("ld4r {v16." #regsuffix " - v19." #regsuffix "}, %1\n\t" \
+ "st1 {v16." #regsuffix " - v19." #regsuffix "}, %0\n\t" \
+ : "=Q"(result) \
+ : "Q"(*(const structtype *)ptr) \
+ : "memory", "v16", "v17", "v18", "v19"); \
+ return result; \
+ }
+
+__LD4R_FUNC (float32x2x4_t, float32x4_t, float32_t, 2s, f32,)
+__LD4R_FUNC (float64x1x4_t, float64x4_t, float64_t, 1d, f64,)
+__LD4R_FUNC (poly8x8x4_t, poly8x4_t, poly8_t, 8b, p8,)
+__LD4R_FUNC (poly16x4x4_t, poly16x4_t, poly16_t, 4h, p16,)
+__LD4R_FUNC (int8x8x4_t, int8x4_t, int8_t, 8b, s8,)
+__LD4R_FUNC (int16x4x4_t, int16x4_t, int16_t, 4h, s16,)
+__LD4R_FUNC (int32x2x4_t, int32x4_t, int32_t, 2s, s32,)
+__LD4R_FUNC (int64x1x4_t, int64x4_t, int64_t, 1d, s64,)
+__LD4R_FUNC (uint8x8x4_t, uint8x4_t, uint8_t, 8b, u8,)
+__LD4R_FUNC (uint16x4x4_t, uint16x4_t, uint16_t, 4h, u16,)
+__LD4R_FUNC (uint32x2x4_t, uint32x4_t, uint32_t, 2s, u32,)
+__LD4R_FUNC (uint64x1x4_t, uint64x4_t, uint64_t, 1d, u64,)
+__LD4R_FUNC (float32x4x4_t, float32x4_t, float32_t, 4s, f32, q)
+__LD4R_FUNC (float64x2x4_t, float64x4_t, float64_t, 2d, f64, q)
+__LD4R_FUNC (poly8x16x4_t, poly8x4_t, poly8_t, 16b, p8, q)
+__LD4R_FUNC (poly16x8x4_t, poly16x4_t, poly16_t, 8h, p16, q)
+__LD4R_FUNC (int8x16x4_t, int8x4_t, int8_t, 16b, s8, q)
+__LD4R_FUNC (int16x8x4_t, int16x4_t, int16_t, 8h, s16, q)
+__LD4R_FUNC (int32x4x4_t, int32x4_t, int32_t, 4s, s32, q)
+__LD4R_FUNC (int64x2x4_t, int64x4_t, int64_t, 2d, s64, q)
+__LD4R_FUNC (uint8x16x4_t, uint8x4_t, uint8_t, 16b, u8, q)
+__LD4R_FUNC (uint16x8x4_t, uint16x4_t, uint16_t, 8h, u16, q)
+__LD4R_FUNC (uint32x4x4_t, uint32x4_t, uint32_t, 4s, u32, q)
+__LD4R_FUNC (uint64x2x4_t, uint64x4_t, uint64_t, 2d, u64, q)
+
+#define __LD4_LANE_FUNC(rettype, ptrtype, regsuffix, \
+ lnsuffix, funcsuffix, Q) \
+ __extension__ static __inline rettype \
+ __attribute__ ((__always_inline__)) \
+ vld4 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
+ rettype b, const int c) \
+ { \
+ rettype result; \
+ __asm__ ("ld1 {v16." #regsuffix " - v19." #regsuffix "}, %1\n\t" \
+ "ld4 {v16." #lnsuffix " - v19." #lnsuffix "}[%3], %2\n\t" \
+ "st1 {v16." #regsuffix " - v19." #regsuffix "}, %0\n\t" \
+ : "=Q"(result) \
+ : "Q"(b), "Q"(*(const rettype *)ptr), "i"(c) \
+ : "memory", "v16", "v17", "v18", "v19"); \
+ return result; \
+ }
+
+__LD4_LANE_FUNC (int8x8x4_t, uint8_t, 8b, b, s8,)
+__LD4_LANE_FUNC (float32x2x4_t, float32_t, 2s, s, f32,)
+__LD4_LANE_FUNC (float64x1x4_t, float64_t, 1d, d, f64,)
+__LD4_LANE_FUNC (poly8x8x4_t, poly8_t, 8b, b, p8,)
+__LD4_LANE_FUNC (poly16x4x4_t, poly16_t, 4h, h, p16,)
+__LD4_LANE_FUNC (int16x4x4_t, int16_t, 4h, h, s16,)
+__LD4_LANE_FUNC (int32x2x4_t, int32_t, 2s, s, s32,)
+__LD4_LANE_FUNC (int64x1x4_t, int64_t, 1d, d, s64,)
+__LD4_LANE_FUNC (uint8x8x4_t, uint8_t, 8b, b, u8,)
+__LD4_LANE_FUNC (uint16x4x4_t, uint16_t, 4h, h, u16,)
+__LD4_LANE_FUNC (uint32x2x4_t, uint32_t, 2s, s, u32,)
+__LD4_LANE_FUNC (uint64x1x4_t, uint64_t, 1d, d, u64,)
+__LD4_LANE_FUNC (float32x4x4_t, float32_t, 4s, s, f32, q)
+__LD4_LANE_FUNC (float64x2x4_t, float64_t, 2d, d, f64, q)
+__LD4_LANE_FUNC (poly8x16x4_t, poly8_t, 16b, b, p8, q)
+__LD4_LANE_FUNC (poly16x8x4_t, poly16_t, 8h, h, p16, q)
+__LD4_LANE_FUNC (int8x16x4_t, int8_t, 16b, b, s8, q)
+__LD4_LANE_FUNC (int16x8x4_t, int16_t, 8h, h, s16, q)
+__LD4_LANE_FUNC (int32x4x4_t, int32_t, 4s, s, s32, q)
+__LD4_LANE_FUNC (int64x2x4_t, int64_t, 2d, d, s64, q)
+__LD4_LANE_FUNC (uint8x16x4_t, uint8_t, 16b, b, u8, q)
+__LD4_LANE_FUNC (uint16x8x4_t, uint16_t, 8h, h, u16, q)
+__LD4_LANE_FUNC (uint32x4x4_t, uint32_t, 4s, s, u32, q)
+__LD4_LANE_FUNC (uint64x2x4_t, uint64_t, 2d, d, u64, q)
+
+#define __ST2_LANE_FUNC(intype, ptrtype, regsuffix, \
+ lnsuffix, funcsuffix, Q) \
+ typedef struct { ptrtype __x[2]; } __ST2_LANE_STRUCTURE_##intype; \
+ __extension__ static __inline void \
+ __attribute__ ((__always_inline__)) \
+ vst2 ## Q ## _lane_ ## funcsuffix (ptrtype *ptr, \
+ intype b, const int c) \
+ { \
+ __ST2_LANE_STRUCTURE_##intype *__p = \
+ (__ST2_LANE_STRUCTURE_##intype *)ptr; \
+ __asm__ ("ld1 {v16." #regsuffix ", v17." #regsuffix "}, %1\n\t" \
+ "st2 {v16." #lnsuffix ", v17." #lnsuffix "}[%2], %0\n\t" \
+ : "=Q"(*__p) \
+ : "Q"(b), "i"(c) \
+ : "v16", "v17"); \
+ }
+
+__ST2_LANE_FUNC (int8x8x2_t, int8_t, 8b, b, s8,)
+__ST2_LANE_FUNC (float32x2x2_t, float32_t, 2s, s, f32,)
+__ST2_LANE_FUNC (float64x1x2_t, float64_t, 1d, d, f64,)
+__ST2_LANE_FUNC (poly8x8x2_t, poly8_t, 8b, b, p8,)
+__ST2_LANE_FUNC (poly16x4x2_t, poly16_t, 4h, h, p16,)
+__ST2_LANE_FUNC (int16x4x2_t, int16_t, 4h, h, s16,)
+__ST2_LANE_FUNC (int32x2x2_t, int32_t, 2s, s, s32,)
+__ST2_LANE_FUNC (int64x1x2_t, int64_t, 1d, d, s64,)
+__ST2_LANE_FUNC (uint8x8x2_t, uint8_t, 8b, b, u8,)
+__ST2_LANE_FUNC (uint16x4x2_t, uint16_t, 4h, h, u16,)
+__ST2_LANE_FUNC (uint32x2x2_t, uint32_t, 2s, s, u32,)
+__ST2_LANE_FUNC (uint64x1x2_t, uint64_t, 1d, d, u64,)
+__ST2_LANE_FUNC (float32x4x2_t, float32_t, 4s, s, f32, q)
+__ST2_LANE_FUNC (float64x2x2_t, float64_t, 2d, d, f64, q)
+__ST2_LANE_FUNC (poly8x16x2_t, poly8_t, 16b, b, p8, q)
+__ST2_LANE_FUNC (poly16x8x2_t, poly16_t, 8h, h, p16, q)
+__ST2_LANE_FUNC (int8x16x2_t, int8_t, 16b, b, s8, q)
+__ST2_LANE_FUNC (int16x8x2_t, int16_t, 8h, h, s16, q)
+__ST2_LANE_FUNC (int32x4x2_t, int32_t, 4s, s, s32, q)
+__ST2_LANE_FUNC (int64x2x2_t, int64_t, 2d, d, s64, q)
+__ST2_LANE_FUNC (uint8x16x2_t, uint8_t, 16b, b, u8, q)
+__ST2_LANE_FUNC (uint16x8x2_t, uint16_t, 8h, h, u16, q)
+__ST2_LANE_FUNC (uint32x4x2_t, uint32_t, 4s, s, u32, q)
+__ST2_LANE_FUNC (uint64x2x2_t, uint64_t, 2d, d, u64, q)
+
+#define __ST3_LANE_FUNC(intype, ptrtype, regsuffix, \
+ lnsuffix, funcsuffix, Q) \
+ typedef struct { ptrtype __x[3]; } __ST3_LANE_STRUCTURE_##intype; \
+ __extension__ static __inline void \
+ __attribute__ ((__always_inline__)) \
+ vst3 ## Q ## _lane_ ## funcsuffix (ptrtype *ptr, \
+ intype b, const int c) \
+ { \
+ __ST3_LANE_STRUCTURE_##intype *__p = \
+ (__ST3_LANE_STRUCTURE_##intype *)ptr; \
+ __asm__ ("ld1 {v16." #regsuffix " - v18." #regsuffix "}, %1\n\t" \
+ "st3 {v16." #lnsuffix " - v18." #lnsuffix "}[%2], %0\n\t" \
+ : "=Q"(*__p) \
+ : "Q"(b), "i"(c) \
+ : "v16", "v17", "v18"); \
+ }
+
+__ST3_LANE_FUNC (int8x8x3_t, int8_t, 8b, b, s8,)
+__ST3_LANE_FUNC (float32x2x3_t, float32_t, 2s, s, f32,)
+__ST3_LANE_FUNC (float64x1x3_t, float64_t, 1d, d, f64,)
+__ST3_LANE_FUNC (poly8x8x3_t, poly8_t, 8b, b, p8,)
+__ST3_LANE_FUNC (poly16x4x3_t, poly16_t, 4h, h, p16,)
+__ST3_LANE_FUNC (int16x4x3_t, int16_t, 4h, h, s16,)
+__ST3_LANE_FUNC (int32x2x3_t, int32_t, 2s, s, s32,)
+__ST3_LANE_FUNC (int64x1x3_t, int64_t, 1d, d, s64,)
+__ST3_LANE_FUNC (uint8x8x3_t, uint8_t, 8b, b, u8,)
+__ST3_LANE_FUNC (uint16x4x3_t, uint16_t, 4h, h, u16,)
+__ST3_LANE_FUNC (uint32x2x3_t, uint32_t, 2s, s, u32,)
+__ST3_LANE_FUNC (uint64x1x3_t, uint64_t, 1d, d, u64,)
+__ST3_LANE_FUNC (float32x4x3_t, float32_t, 4s, s, f32, q)
+__ST3_LANE_FUNC (float64x2x3_t, float64_t, 2d, d, f64, q)
+__ST3_LANE_FUNC (poly8x16x3_t, poly8_t, 16b, b, p8, q)
+__ST3_LANE_FUNC (poly16x8x3_t, poly16_t, 8h, h, p16, q)
+__ST3_LANE_FUNC (int8x16x3_t, int8_t, 16b, b, s8, q)
+__ST3_LANE_FUNC (int16x8x3_t, int16_t, 8h, h, s16, q)
+__ST3_LANE_FUNC (int32x4x3_t, int32_t, 4s, s, s32, q)
+__ST3_LANE_FUNC (int64x2x3_t, int64_t, 2d, d, s64, q)
+__ST3_LANE_FUNC (uint8x16x3_t, uint8_t, 16b, b, u8, q)
+__ST3_LANE_FUNC (uint16x8x3_t, uint16_t, 8h, h, u16, q)
+__ST3_LANE_FUNC (uint32x4x3_t, uint32_t, 4s, s, u32, q)
+__ST3_LANE_FUNC (uint64x2x3_t, uint64_t, 2d, d, u64, q)
+
+#define __ST4_LANE_FUNC(intype, ptrtype, regsuffix, \
+ lnsuffix, funcsuffix, Q) \
+ typedef struct { ptrtype __x[4]; } __ST4_LANE_STRUCTURE_##intype; \
+ __extension__ static __inline void \
+ __attribute__ ((__always_inline__)) \
+ vst4 ## Q ## _lane_ ## funcsuffix (ptrtype *ptr, \
+ intype b, const int c) \
+ { \
+ __ST4_LANE_STRUCTURE_##intype *__p = \
+ (__ST4_LANE_STRUCTURE_##intype *)ptr; \
+ __asm__ ("ld1 {v16." #regsuffix " - v19." #regsuffix "}, %1\n\t" \
+ "st4 {v16." #lnsuffix " - v19." #lnsuffix "}[%2], %0\n\t" \
+ : "=Q"(*__p) \
+ : "Q"(b), "i"(c) \
+ : "v16", "v17", "v18", "v19"); \
+ }
+
+__ST4_LANE_FUNC (int8x8x4_t, int8_t, 8b, b, s8,)
+__ST4_LANE_FUNC (float32x2x4_t, float32_t, 2s, s, f32,)
+__ST4_LANE_FUNC (float64x1x4_t, float64_t, 1d, d, f64,)
+__ST4_LANE_FUNC (poly8x8x4_t, poly8_t, 8b, b, p8,)
+__ST4_LANE_FUNC (poly16x4x4_t, poly16_t, 4h, h, p16,)
+__ST4_LANE_FUNC (int16x4x4_t, int16_t, 4h, h, s16,)
+__ST4_LANE_FUNC (int32x2x4_t, int32_t, 2s, s, s32,)
+__ST4_LANE_FUNC (int64x1x4_t, int64_t, 1d, d, s64,)
+__ST4_LANE_FUNC (uint8x8x4_t, uint8_t, 8b, b, u8,)
+__ST4_LANE_FUNC (uint16x4x4_t, uint16_t, 4h, h, u16,)
+__ST4_LANE_FUNC (uint32x2x4_t, uint32_t, 2s, s, u32,)
+__ST4_LANE_FUNC (uint64x1x4_t, uint64_t, 1d, d, u64,)
+__ST4_LANE_FUNC (float32x4x4_t, float32_t, 4s, s, f32, q)
+__ST4_LANE_FUNC (float64x2x4_t, float64_t, 2d, d, f64, q)
+__ST4_LANE_FUNC (poly8x16x4_t, poly8_t, 16b, b, p8, q)
+__ST4_LANE_FUNC (poly16x8x4_t, poly16_t, 8h, h, p16, q)
+__ST4_LANE_FUNC (int8x16x4_t, int8_t, 16b, b, s8, q)
+__ST4_LANE_FUNC (int16x8x4_t, int16_t, 8h, h, s16, q)
+__ST4_LANE_FUNC (int32x4x4_t, int32_t, 4s, s, s32, q)
+__ST4_LANE_FUNC (int64x2x4_t, int64_t, 2d, d, s64, q)
+__ST4_LANE_FUNC (uint8x16x4_t, uint8_t, 16b, b, u8, q)
+__ST4_LANE_FUNC (uint16x8x4_t, uint16_t, 8h, h, u16, q)
+__ST4_LANE_FUNC (uint32x4x4_t, uint32_t, 4s, s, u32, q)
+__ST4_LANE_FUNC (uint64x2x4_t, uint64_t, 2d, d, u64, q)
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vaddlv_s32 (int32x2_t a)
+{
+ int64_t result;
+ __asm__ ("saddlp %0.1d, %1.2s" : "=w"(result) : "w"(a) : );
+ return result;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vaddlv_u32 (uint32x2_t a)
+{
+ uint64_t result;
+ __asm__ ("uaddlp %0.1d, %1.2s" : "=w"(result) : "w"(a) : );
+ return result;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpaddd_s64 (int64x2_t __a)
+{
+ return __builtin_aarch64_addpdi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_laneqv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_laneqv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_laneqv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_laneqv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_laneqv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_laneqv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_laneqv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_laneqv4si (__a, __b, __c);
+}
+
+/* Table intrinsics. */
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbl1_p8 (poly8x16_t a, uint8x8_t b)
+{
+ poly8x8_t result;
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbl1_s8 (int8x16_t a, uint8x8_t b)
+{
+ int8x8_t result;
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbl1_u8 (uint8x16_t a, uint8x8_t b)
+{
+ uint8x8_t result;
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbl1q_p8 (poly8x16_t a, uint8x16_t b)
+{
+ poly8x16_t result;
+ __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbl1q_s8 (int8x16_t a, uint8x16_t b)
+{
+ int8x16_t result;
+ __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbl1q_u8 (uint8x16_t a, uint8x16_t b)
+{
+ uint8x16_t result;
+ __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
+ : "=w"(result)
+ : "w"(a), "w"(b)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbl2_s8 (int8x16x2_t tab, uint8x8_t idx)
+{
+ int8x8_t result;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbl2_u8 (uint8x16x2_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbl2_p8 (poly8x16x2_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbl2q_s8 (int8x16x2_t tab, uint8x16_t idx)
+{
+ int8x16_t result;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbl2q_u8 (uint8x16x2_t tab, uint8x16_t idx)
+{
+ uint8x16_t result;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbl2q_p8 (poly8x16x2_t tab, uint8x16_t idx)
+{
+ poly8x16_t result;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbl3_s8 (int8x16x3_t tab, uint8x8_t idx)
+{
+ int8x8_t result;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbl3_u8 (uint8x16x3_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbl3_p8 (poly8x16x3_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbl3q_s8 (int8x16x3_t tab, uint8x16_t idx)
+{
+ int8x16_t result;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbl3q_u8 (uint8x16x3_t tab, uint8x16_t idx)
+{
+ uint8x16_t result;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbl3q_p8 (poly8x16x3_t tab, uint8x16_t idx)
+{
+ poly8x16_t result;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbl4_s8 (int8x16x4_t tab, uint8x8_t idx)
+{
+ int8x8_t result;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbl4_u8 (uint8x16x4_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbl4_p8 (poly8x16x4_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbl4q_s8 (int8x16x4_t tab, uint8x16_t idx)
+{
+ int8x16_t result;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbl4q_u8 (uint8x16x4_t tab, uint8x16_t idx)
+{
+ uint8x16_t result;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbl4q_p8 (poly8x16x4_t tab, uint8x16_t idx)
+{
+ poly8x16_t result;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+ :"=w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbx1_s8 (int8x8_t r, int8x16_t tab, uint8x8_t idx)
+{
+ int8x8_t result = r;
+ __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
+ : "+w"(result)
+ : "w"(tab), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbx1_u8 (uint8x8_t r, uint8x16_t tab, uint8x8_t idx)
+{
+ uint8x8_t result = r;
+ __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
+ : "+w"(result)
+ : "w"(tab), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbx1_p8 (poly8x8_t r, poly8x16_t tab, uint8x8_t idx)
+{
+ poly8x8_t result = r;
+ __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
+ : "+w"(result)
+ : "w"(tab), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbx1q_s8 (int8x16_t r, int8x16_t tab, uint8x16_t idx)
+{
+ int8x16_t result = r;
+ __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
+ : "+w"(result)
+ : "w"(tab), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbx1q_u8 (uint8x16_t r, uint8x16_t tab, uint8x16_t idx)
+{
+ uint8x16_t result = r;
+ __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
+ : "+w"(result)
+ : "w"(tab), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbx1q_p8 (poly8x16_t r, poly8x16_t tab, uint8x16_t idx)
+{
+ poly8x16_t result = r;
+ __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
+ : "+w"(result)
+ : "w"(tab), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbx2_s8 (int8x8_t r, int8x16x2_t tab, uint8x8_t idx)
+{
+ int8x8_t result = r;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbx2_u8 (uint8x8_t r, uint8x16x2_t tab, uint8x8_t idx)
+{
+ uint8x8_t result = r;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbx2_p8 (poly8x8_t r, poly8x16x2_t tab, uint8x8_t idx)
+{
+ poly8x8_t result = r;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbx2q_s8 (int8x16_t r, int8x16x2_t tab, uint8x16_t idx)
+{
+ int8x16_t result = r;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbx2q_u8 (uint8x16_t r, uint8x16x2_t tab, uint8x16_t idx)
+{
+ uint8x16_t result = r;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbx2q_p8 (poly8x16_t r, poly8x16x2_t tab, uint8x16_t idx)
+{
+ poly8x16_t result = r;
+ __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17");
+ return result;
+}
+
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbx3_s8 (int8x8_t r, int8x16x3_t tab, uint8x8_t idx)
+{
+ int8x8_t result = r;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbx3_u8 (uint8x8_t r, uint8x16x3_t tab, uint8x8_t idx)
+{
+ uint8x8_t result = r;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbx3_p8 (poly8x8_t r, poly8x16x3_t tab, uint8x8_t idx)
+{
+ poly8x8_t result = r;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbx3q_s8 (int8x16_t r, int8x16x3_t tab, uint8x16_t idx)
+{
+ int8x16_t result = r;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbx3q_u8 (uint8x16_t r, uint8x16x3_t tab, uint8x16_t idx)
+{
+ uint8x16_t result = r;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbx3q_p8 (poly8x16_t r, poly8x16x3_t tab, uint8x16_t idx)
+{
+ poly8x16_t result = r;
+ __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18");
+ return result;
+}
+
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqtbx4_s8 (int8x8_t r, int8x16x4_t tab, uint8x8_t idx)
+{
+ int8x8_t result = r;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqtbx4_u8 (uint8x8_t r, uint8x16x4_t tab, uint8x8_t idx)
+{
+ uint8x8_t result = r;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vqtbx4_p8 (poly8x8_t r, poly8x16x4_t tab, uint8x8_t idx)
+{
+ poly8x8_t result = r;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqtbx4q_s8 (int8x16_t r, int8x16x4_t tab, uint8x16_t idx)
+{
+ int8x16_t result = r;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqtbx4q_u8 (uint8x16_t r, uint8x16x4_t tab, uint8x16_t idx)
+{
+ uint8x16_t result = r;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vqtbx4q_p8 (poly8x16_t r, poly8x16x4_t tab, uint8x16_t idx)
+{
+ poly8x16_t result = r;
+ __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
+ "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
+ :"+w"(result)
+ :"Q"(tab),"w"(idx)
+ :"memory", "v16", "v17", "v18", "v19");
+ return result;
+}
+
+/* V7 legacy table intrinsics. */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl1_s8 (int8x8_t tab, int8x8_t idx)
+{
+ int8x8_t result;
+ int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl1_u8 (uint8x8_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl1_p8 (poly8x8_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl2_s8 (int8x8x2_t tab, int8x8_t idx)
+{
+ int8x8_t result;
+ int8x16_t temp = vcombine_s8 (tab.val[0], tab.val[1]);
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl2_u8 (uint8x8x2_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ uint8x16_t temp = vcombine_u8 (tab.val[0], tab.val[1]);
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl2_p8 (poly8x8x2_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ poly8x16_t temp = vcombine_p8 (tab.val[0], tab.val[1]);
+ __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
+ : "=w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl3_s8 (int8x8x3_t tab, int8x8_t idx)
+{
+ int8x8_t result;
+ int8x16x2_t temp;
+ temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "=w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl3_u8 (uint8x8x3_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ uint8x16x2_t temp;
+ temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "=w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl3_p8 (poly8x8x3_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ poly8x16x2_t temp;
+ temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (__AARCH64_UINT64_C (0x0)));
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "=w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl4_s8 (int8x8x4_t tab, int8x8_t idx)
+{
+ int8x8_t result;
+ int8x16x2_t temp;
+ temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_s8 (tab.val[2], tab.val[3]);
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "=w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl4_u8 (uint8x8x4_t tab, uint8x8_t idx)
+{
+ uint8x8_t result;
+ uint8x16x2_t temp;
+ temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_u8 (tab.val[2], tab.val[3]);
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "=w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl4_p8 (poly8x8x4_t tab, uint8x8_t idx)
+{
+ poly8x8_t result;
+ poly8x16x2_t temp;
+ temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_p8 (tab.val[2], tab.val[3]);
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "=w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx2_s8 (int8x8_t r, int8x8x2_t tab, int8x8_t idx)
+{
+ int8x8_t result = r;
+ int8x16_t temp = vcombine_s8 (tab.val[0], tab.val[1]);
+ __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
+ : "+w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx2_u8 (uint8x8_t r, uint8x8x2_t tab, uint8x8_t idx)
+{
+ uint8x8_t result = r;
+ uint8x16_t temp = vcombine_u8 (tab.val[0], tab.val[1]);
+ __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
+ : "+w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx2_p8 (poly8x8_t r, poly8x8x2_t tab, uint8x8_t idx)
+{
+ poly8x8_t result = r;
+ poly8x16_t temp = vcombine_p8 (tab.val[0], tab.val[1]);
+ __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
+ : "+w"(result)
+ : "w"(temp), "w"(idx)
+ : /* No clobbers */);
+ return result;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx4_s8 (int8x8_t r, int8x8x4_t tab, int8x8_t idx)
+{
+ int8x8_t result = r;
+ int8x16x2_t temp;
+ temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_s8 (tab.val[2], tab.val[3]);
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbx %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "+w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx4_u8 (uint8x8_t r, uint8x8x4_t tab, uint8x8_t idx)
+{
+ uint8x8_t result = r;
+ uint8x16x2_t temp;
+ temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_u8 (tab.val[2], tab.val[3]);
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbx %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "+w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx4_p8 (poly8x8_t r, poly8x8x4_t tab, uint8x8_t idx)
+{
+ poly8x8_t result = r;
+ poly8x16x2_t temp;
+ temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
+ temp.val[1] = vcombine_p8 (tab.val[2], tab.val[3]);
+ __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
+ "tbx %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
+ : "+w"(result)
+ : "Q"(temp), "w"(idx)
+ : "v16", "v17", "memory");
+ return result;
+}
+
+/* End of temporary inline asm. */
+
+/* Start of optimal implementations in approved order. */
+
+/* vabs */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vabs_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_absv2sf (__a);
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vabs_f64 (float64x1_t __a)
+{
+ return __builtin_fabs (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vabs_s8 (int8x8_t __a)
+{
+ return __builtin_aarch64_absv8qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vabs_s16 (int16x4_t __a)
+{
+ return __builtin_aarch64_absv4hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vabs_s32 (int32x2_t __a)
+{
+ return __builtin_aarch64_absv2si (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vabs_s64 (int64x1_t __a)
+{
+ return __builtin_llabs (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vabsq_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_absv4sf (__a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vabsq_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_absv2df (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabsq_s8 (int8x16_t __a)
+{
+ return __builtin_aarch64_absv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabsq_s16 (int16x8_t __a)
+{
+ return __builtin_aarch64_absv8hi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabsq_s32 (int32x4_t __a)
+{
+ return __builtin_aarch64_absv4si (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabsq_s64 (int64x2_t __a)
+{
+ return __builtin_aarch64_absv2di (__a);
+}
+
+/* vadd */
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vaddd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vaddd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a + __b;
+}
+
+/* vaddv */
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vaddv_s8 (int8x8_t __a)
+{
+ return vget_lane_s8 (__builtin_aarch64_reduc_splus_v8qi (__a), 0);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vaddv_s16 (int16x4_t __a)
+{
+ return vget_lane_s16 (__builtin_aarch64_reduc_splus_v4hi (__a), 0);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vaddv_s32 (int32x2_t __a)
+{
+ return vget_lane_s32 (__builtin_aarch64_reduc_splus_v2si (__a), 0);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vaddv_u8 (uint8x8_t __a)
+{
+ return vget_lane_u8 ((uint8x8_t)
+ __builtin_aarch64_reduc_uplus_v8qi ((int8x8_t) __a),
+ 0);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vaddv_u16 (uint16x4_t __a)
+{
+ return vget_lane_u16 ((uint16x4_t)
+ __builtin_aarch64_reduc_uplus_v4hi ((int16x4_t) __a),
+ 0);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vaddv_u32 (uint32x2_t __a)
+{
+ return vget_lane_u32 ((uint32x2_t)
+ __builtin_aarch64_reduc_uplus_v2si ((int32x2_t) __a),
+ 0);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vaddvq_s8 (int8x16_t __a)
+{
+ return vgetq_lane_s8 (__builtin_aarch64_reduc_splus_v16qi (__a),
+ 0);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vaddvq_s16 (int16x8_t __a)
+{
+ return vgetq_lane_s16 (__builtin_aarch64_reduc_splus_v8hi (__a), 0);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vaddvq_s32 (int32x4_t __a)
+{
+ return vgetq_lane_s32 (__builtin_aarch64_reduc_splus_v4si (__a), 0);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vaddvq_s64 (int64x2_t __a)
+{
+ return vgetq_lane_s64 (__builtin_aarch64_reduc_splus_v2di (__a), 0);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vaddvq_u8 (uint8x16_t __a)
+{
+ return vgetq_lane_u8 ((uint8x16_t)
+ __builtin_aarch64_reduc_uplus_v16qi ((int8x16_t) __a),
+ 0);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vaddvq_u16 (uint16x8_t __a)
+{
+ return vgetq_lane_u16 ((uint16x8_t)
+ __builtin_aarch64_reduc_uplus_v8hi ((int16x8_t) __a),
+ 0);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vaddvq_u32 (uint32x4_t __a)
+{
+ return vgetq_lane_u32 ((uint32x4_t)
+ __builtin_aarch64_reduc_uplus_v4si ((int32x4_t) __a),
+ 0);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vaddvq_u64 (uint64x2_t __a)
+{
+ return vgetq_lane_u64 ((uint64x2_t)
+ __builtin_aarch64_reduc_uplus_v2di ((int64x2_t) __a),
+ 0);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vaddv_f32 (float32x2_t __a)
+{
+ float32x2_t __t = __builtin_aarch64_reduc_splus_v2sf (__a);
+ return vget_lane_f32 (__t, 0);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vaddvq_f32 (float32x4_t __a)
+{
+ float32x4_t __t = __builtin_aarch64_reduc_splus_v4sf (__a);
+ return vgetq_lane_f32 (__t, 0);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vaddvq_f64 (float64x2_t __a)
+{
+ float64x2_t __t = __builtin_aarch64_reduc_splus_v2df (__a);
+ return vgetq_lane_f64 (__t, 0);
+}
+
+/* vbsl */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vbsl_f32 (uint32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return __builtin_aarch64_simd_bslv2sf_suss (__a, __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vbsl_p8 (uint8x8_t __a, poly8x8_t __b, poly8x8_t __c)
+{
+ return __builtin_aarch64_simd_bslv8qi_pupp (__a, __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vbsl_p16 (uint16x4_t __a, poly16x4_t __b, poly16x4_t __c)
+{
+ return __builtin_aarch64_simd_bslv4hi_pupp (__a, __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vbsl_s8 (uint8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return __builtin_aarch64_simd_bslv8qi_suss (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vbsl_s16 (uint16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return __builtin_aarch64_simd_bslv4hi_suss (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vbsl_s32 (uint32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return __builtin_aarch64_simd_bslv2si_suss (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vbsl_s64 (uint64x1_t __a, int64x1_t __b, int64x1_t __c)
+{
+ return __builtin_aarch64_simd_bsldi_suss (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vbsl_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return __builtin_aarch64_simd_bslv8qi_uuuu (__a, __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vbsl_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return __builtin_aarch64_simd_bslv4hi_uuuu (__a, __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vbsl_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return __builtin_aarch64_simd_bslv2si_uuuu (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vbsl_u64 (uint64x1_t __a, uint64x1_t __b, uint64x1_t __c)
+{
+ return __builtin_aarch64_simd_bsldi_uuuu (__a, __b, __c);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vbslq_f32 (uint32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return __builtin_aarch64_simd_bslv4sf_suss (__a, __b, __c);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vbslq_f64 (uint64x2_t __a, float64x2_t __b, float64x2_t __c)
+{
+ return __builtin_aarch64_simd_bslv2df_suss (__a, __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vbslq_p8 (uint8x16_t __a, poly8x16_t __b, poly8x16_t __c)
+{
+ return __builtin_aarch64_simd_bslv16qi_pupp (__a, __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vbslq_p16 (uint16x8_t __a, poly16x8_t __b, poly16x8_t __c)
+{
+ return __builtin_aarch64_simd_bslv8hi_pupp (__a, __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vbslq_s8 (uint8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return __builtin_aarch64_simd_bslv16qi_suss (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vbslq_s16 (uint16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_aarch64_simd_bslv8hi_suss (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vbslq_s32 (uint32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_aarch64_simd_bslv4si_suss (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vbslq_s64 (uint64x2_t __a, int64x2_t __b, int64x2_t __c)
+{
+ return __builtin_aarch64_simd_bslv2di_suss (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vbslq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return __builtin_aarch64_simd_bslv16qi_uuuu (__a, __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vbslq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return __builtin_aarch64_simd_bslv8hi_uuuu (__a, __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vbslq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return __builtin_aarch64_simd_bslv4si_uuuu (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vbslq_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c)
+{
+ return __builtin_aarch64_simd_bslv2di_uuuu (__a, __b, __c);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+
+/* vaes */
+
+static __inline uint8x16_t
+vaeseq_u8 (uint8x16_t data, uint8x16_t key)
+{
+ return __builtin_aarch64_crypto_aesev16qi_uuu (data, key);
+}
+
+static __inline uint8x16_t
+vaesdq_u8 (uint8x16_t data, uint8x16_t key)
+{
+ return __builtin_aarch64_crypto_aesdv16qi_uuu (data, key);
+}
+
+static __inline uint8x16_t
+vaesmcq_u8 (uint8x16_t data)
+{
+ return __builtin_aarch64_crypto_aesmcv16qi_uu (data);
+}
+
+static __inline uint8x16_t
+vaesimcq_u8 (uint8x16_t data)
+{
+ return __builtin_aarch64_crypto_aesimcv16qi_uu (data);
+}
+
+#endif
+
+/* vcage */
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcages_f32 (float32_t __a, float32_t __b)
+{
+ return __builtin_fabsf (__a) >= __builtin_fabsf (__b) ? -1 : 0;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcage_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return vabs_f32 (__a) >= vabs_f32 (__b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcageq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return vabsq_f32 (__a) >= vabsq_f32 (__b);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcaged_f64 (float64_t __a, float64_t __b)
+{
+ return __builtin_fabs (__a) >= __builtin_fabs (__b) ? -1 : 0;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcageq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return vabsq_f64 (__a) >= vabsq_f64 (__b);
+}
+
+/* vcagt */
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcagts_f32 (float32_t __a, float32_t __b)
+{
+ return __builtin_fabsf (__a) > __builtin_fabsf (__b) ? -1 : 0;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcagt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return vabs_f32 (__a) > vabs_f32 (__b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcagtq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return vabsq_f32 (__a) > vabsq_f32 (__b);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcagtd_f64 (float64_t __a, float64_t __b)
+{
+ return __builtin_fabs (__a) > __builtin_fabs (__b) ? -1 : 0;
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcagtq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return vabsq_f64 (__a) > vabsq_f64 (__b);
+}
+
+/* vcale */
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcale_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return vabs_f32 (__a) <= vabs_f32 (__b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcaleq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return vabsq_f32 (__a) <= vabsq_f32 (__b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcaleq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return vabsq_f64 (__a) <= vabsq_f64 (__b);
+}
+
+/* vcalt */
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcalt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return vabs_f32 (__a) < vabs_f32 (__b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcaltq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return vabsq_f32 (__a) < vabsq_f32 (__b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcaltq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return vabsq_f64 (__a) < vabsq_f64 (__b);
+}
+
+/* vceq - vector. */
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmeqv2sf (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceq_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return __a == __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmeqv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmeqv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vceq_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmeqv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmeqv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceq_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a == __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmeqv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vceq_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmeqv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmeqv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceq_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a == __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmeqv4sf (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vceqq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmeqv2df (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmeqv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmeqv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vceqq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmeqv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmeqv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vceqq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmeqv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmeqv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vceqq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmeqv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmeqv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vceqq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmeqv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+/* vceq - scalar. */
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vceqs_f32 (float32_t __a, float32_t __b)
+{
+ return __a == __b ? -1 : 0;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceqd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a == __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceqd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a == __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vceqd_f64 (float64_t __a, float64_t __b)
+{
+ return __a == __b ? -1ll : 0ll;
+}
+
+/* vceqz - vector. */
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceqz_f32 (float32x2_t __a)
+{
+ float32x2_t __b = {0.0f, 0.0f};
+ return (uint32x2_t) __builtin_aarch64_cmeqv2sf (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceqz_f64 (float64x1_t __a)
+{
+ return __a == 0.0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceqz_p8 (poly8x8_t __a)
+{
+ poly8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x8_t) __builtin_aarch64_cmeqv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceqz_s8 (int8x8_t __a)
+{
+ int8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x8_t) __builtin_aarch64_cmeqv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vceqz_s16 (int16x4_t __a)
+{
+ int16x4_t __b = {0, 0, 0, 0};
+ return (uint16x4_t) __builtin_aarch64_cmeqv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceqz_s32 (int32x2_t __a)
+{
+ int32x2_t __b = {0, 0};
+ return (uint32x2_t) __builtin_aarch64_cmeqv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceqz_s64 (int64x1_t __a)
+{
+ return __a == 0ll ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceqz_u8 (uint8x8_t __a)
+{
+ uint8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x8_t) __builtin_aarch64_cmeqv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vceqz_u16 (uint16x4_t __a)
+{
+ uint16x4_t __b = {0, 0, 0, 0};
+ return (uint16x4_t) __builtin_aarch64_cmeqv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceqz_u32 (uint32x2_t __a)
+{
+ uint32x2_t __b = {0, 0};
+ return (uint32x2_t) __builtin_aarch64_cmeqv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceqz_u64 (uint64x1_t __a)
+{
+ return __a == 0ll ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqzq_f32 (float32x4_t __a)
+{
+ float32x4_t __b = {0.0f, 0.0f, 0.0f, 0.0f};
+ return (uint32x4_t) __builtin_aarch64_cmeqv4sf (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vceqzq_f64 (float64x2_t __a)
+{
+ float64x2_t __b = {0.0, 0.0};
+ return (uint64x2_t) __builtin_aarch64_cmeqv2df (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqzq_p8 (poly8x16_t __a)
+{
+ poly8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x16_t) __builtin_aarch64_cmeqv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqzq_s8 (int8x16_t __a)
+{
+ int8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x16_t) __builtin_aarch64_cmeqv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vceqzq_s16 (int16x8_t __a)
+{
+ int16x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint16x8_t) __builtin_aarch64_cmeqv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqzq_s32 (int32x4_t __a)
+{
+ int32x4_t __b = {0, 0, 0, 0};
+ return (uint32x4_t) __builtin_aarch64_cmeqv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vceqzq_s64 (int64x2_t __a)
+{
+ int64x2_t __b = {0, 0};
+ return (uint64x2_t) __builtin_aarch64_cmeqv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqzq_u8 (uint8x16_t __a)
+{
+ uint8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x16_t) __builtin_aarch64_cmeqv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vceqzq_u16 (uint16x8_t __a)
+{
+ uint16x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint16x8_t) __builtin_aarch64_cmeqv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqzq_u32 (uint32x4_t __a)
+{
+ uint32x4_t __b = {0, 0, 0, 0};
+ return (uint32x4_t) __builtin_aarch64_cmeqv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vceqzq_u64 (uint64x2_t __a)
+{
+ uint64x2_t __b = {0, 0};
+ return (uint64x2_t) __builtin_aarch64_cmeqv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+/* vceqz - scalar. */
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vceqzs_f32 (float32_t __a)
+{
+ return __a == 0.0f ? -1 : 0;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceqzd_s64 (int64x1_t __a)
+{
+ return __a == 0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceqzd_u64 (int64x1_t __a)
+{
+ return __a == 0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vceqzd_f64 (float64_t __a)
+{
+ return __a == 0.0 ? -1ll : 0ll;
+}
+
+/* vcge - vector. */
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgev2sf (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcge_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return __a >= __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcge_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgev8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcge_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgev8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcge_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmgev4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgev2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcge_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a >= __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcge_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgeuv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcge_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmgeuv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgeuv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcge_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a >= __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgev4sf (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgeq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgev2df (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgeq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgev16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgeq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgev16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgeq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmgev8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgev4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgeq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgev2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgeq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgeuv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgeq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmgeuv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgeuv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgeq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgeuv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+/* vcge - scalar. */
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcges_f32 (float32_t __a, float32_t __b)
+{
+ return __a >= __b ? -1 : 0;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcged_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a >= __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcged_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a >= __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcged_f64 (float64_t __a, float64_t __b)
+{
+ return __a >= __b ? -1ll : 0ll;
+}
+
+/* vcgez - vector. */
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgez_f32 (float32x2_t __a)
+{
+ float32x2_t __b = {0.0f, 0.0f};
+ return (uint32x2_t) __builtin_aarch64_cmgev2sf (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgez_f64 (float64x1_t __a)
+{
+ return __a >= 0.0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgez_p8 (poly8x8_t __a)
+{
+ poly8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x8_t) __builtin_aarch64_cmgev8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgez_s8 (int8x8_t __a)
+{
+ int8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x8_t) __builtin_aarch64_cmgev8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgez_s16 (int16x4_t __a)
+{
+ int16x4_t __b = {0, 0, 0, 0};
+ return (uint16x4_t) __builtin_aarch64_cmgev4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgez_s32 (int32x2_t __a)
+{
+ int32x2_t __b = {0, 0};
+ return (uint32x2_t) __builtin_aarch64_cmgev2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgez_s64 (int64x1_t __a)
+{
+ return __a >= 0ll ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgez_u8 (uint8x8_t __a)
+{
+ uint8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x8_t) __builtin_aarch64_cmgeuv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgez_u16 (uint16x4_t __a)
+{
+ uint16x4_t __b = {0, 0, 0, 0};
+ return (uint16x4_t) __builtin_aarch64_cmgeuv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgez_u32 (uint32x2_t __a)
+{
+ uint32x2_t __b = {0, 0};
+ return (uint32x2_t) __builtin_aarch64_cmgeuv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgez_u64 (uint64x1_t __a)
+{
+ return __a >= 0ll ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgezq_f32 (float32x4_t __a)
+{
+ float32x4_t __b = {0.0f, 0.0f, 0.0f, 0.0f};
+ return (uint32x4_t) __builtin_aarch64_cmgev4sf (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgezq_f64 (float64x2_t __a)
+{
+ float64x2_t __b = {0.0, 0.0};
+ return (uint64x2_t) __builtin_aarch64_cmgev2df (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgezq_p8 (poly8x16_t __a)
+{
+ poly8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x16_t) __builtin_aarch64_cmgev16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgezq_s8 (int8x16_t __a)
+{
+ int8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x16_t) __builtin_aarch64_cmgev16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgezq_s16 (int16x8_t __a)
+{
+ int16x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint16x8_t) __builtin_aarch64_cmgev8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgezq_s32 (int32x4_t __a)
+{
+ int32x4_t __b = {0, 0, 0, 0};
+ return (uint32x4_t) __builtin_aarch64_cmgev4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgezq_s64 (int64x2_t __a)
+{
+ int64x2_t __b = {0, 0};
+ return (uint64x2_t) __builtin_aarch64_cmgev2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgezq_u8 (uint8x16_t __a)
+{
+ uint8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x16_t) __builtin_aarch64_cmgeuv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgezq_u16 (uint16x8_t __a)
+{
+ uint16x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint16x8_t) __builtin_aarch64_cmgeuv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgezq_u32 (uint32x4_t __a)
+{
+ uint32x4_t __b = {0, 0, 0, 0};
+ return (uint32x4_t) __builtin_aarch64_cmgeuv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgezq_u64 (uint64x2_t __a)
+{
+ uint64x2_t __b = {0, 0};
+ return (uint64x2_t) __builtin_aarch64_cmgeuv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+/* vcgez - scalar. */
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcgezs_f32 (float32_t __a)
+{
+ return __a >= 0.0f ? -1 : 0;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgezd_s64 (int64x1_t __a)
+{
+ return __a >= 0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgezd_u64 (int64x1_t __a)
+{
+ return __a >= 0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcgezd_f64 (float64_t __a)
+{
+ return __a >= 0.0 ? -1ll : 0ll;
+}
+
+/* vcgt - vector. */
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgtv2sf (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgt_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return __a > __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgt_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgtv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgt_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgtv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgt_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmgtv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgtv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgt_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a > __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgt_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgtuv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgt_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmgtuv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgtuv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgt_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a > __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgtv4sf (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgtq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgtv2df (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgtv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgtv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgtq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmgtv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgtv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgtq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgtv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgtuv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgtq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmgtuv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgtuv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgtq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgtuv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+/* vcgt - scalar. */
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcgts_f32 (float32_t __a, float32_t __b)
+{
+ return __a > __b ? -1 : 0;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgtd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a > __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgtd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a > __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcgtd_f64 (float64_t __a, float64_t __b)
+{
+ return __a > __b ? -1ll : 0ll;
+}
+
+/* vcgtz - vector. */
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgtz_f32 (float32x2_t __a)
+{
+ float32x2_t __b = {0.0f, 0.0f};
+ return (uint32x2_t) __builtin_aarch64_cmgtv2sf (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgtz_f64 (float64x1_t __a)
+{
+ return __a > 0.0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgtz_p8 (poly8x8_t __a)
+{
+ poly8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x8_t) __builtin_aarch64_cmgtv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgtz_s8 (int8x8_t __a)
+{
+ int8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x8_t) __builtin_aarch64_cmgtv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgtz_s16 (int16x4_t __a)
+{
+ int16x4_t __b = {0, 0, 0, 0};
+ return (uint16x4_t) __builtin_aarch64_cmgtv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgtz_s32 (int32x2_t __a)
+{
+ int32x2_t __b = {0, 0};
+ return (uint32x2_t) __builtin_aarch64_cmgtv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgtz_s64 (int64x1_t __a)
+{
+ return __a > 0ll ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgtz_u8 (uint8x8_t __a)
+{
+ uint8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x8_t) __builtin_aarch64_cmgtuv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgtz_u16 (uint16x4_t __a)
+{
+ uint16x4_t __b = {0, 0, 0, 0};
+ return (uint16x4_t) __builtin_aarch64_cmgtuv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgtz_u32 (uint32x2_t __a)
+{
+ uint32x2_t __b = {0, 0};
+ return (uint32x2_t) __builtin_aarch64_cmgtuv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgtz_u64 (uint64x1_t __a)
+{
+ return __a > 0ll ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtzq_f32 (float32x4_t __a)
+{
+ float32x4_t __b = {0.0f, 0.0f, 0.0f, 0.0f};
+ return (uint32x4_t) __builtin_aarch64_cmgtv4sf (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgtzq_f64 (float64x2_t __a)
+{
+ float64x2_t __b = {0.0, 0.0};
+ return (uint64x2_t) __builtin_aarch64_cmgtv2df (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtzq_p8 (poly8x16_t __a)
+{
+ poly8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x16_t) __builtin_aarch64_cmgtv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtzq_s8 (int8x16_t __a)
+{
+ int8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x16_t) __builtin_aarch64_cmgtv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgtzq_s16 (int16x8_t __a)
+{
+ int16x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint16x8_t) __builtin_aarch64_cmgtv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtzq_s32 (int32x4_t __a)
+{
+ int32x4_t __b = {0, 0, 0, 0};
+ return (uint32x4_t) __builtin_aarch64_cmgtv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgtzq_s64 (int64x2_t __a)
+{
+ int64x2_t __b = {0, 0};
+ return (uint64x2_t) __builtin_aarch64_cmgtv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtzq_u8 (uint8x16_t __a)
+{
+ uint8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x16_t) __builtin_aarch64_cmgtuv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgtzq_u16 (uint16x8_t __a)
+{
+ uint16x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint16x8_t) __builtin_aarch64_cmgtuv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtzq_u32 (uint32x4_t __a)
+{
+ uint32x4_t __b = {0, 0, 0, 0};
+ return (uint32x4_t) __builtin_aarch64_cmgtuv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcgtzq_u64 (uint64x2_t __a)
+{
+ uint64x2_t __b = {0, 0};
+ return (uint64x2_t) __builtin_aarch64_cmgtuv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+/* vcgtz - scalar. */
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcgtzs_f32 (float32_t __a)
+{
+ return __a > 0.0f ? -1 : 0;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgtzd_s64 (int64x1_t __a)
+{
+ return __a > 0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcgtzd_u64 (int64x1_t __a)
+{
+ return __a > 0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcgtzd_f64 (float64_t __a)
+{
+ return __a > 0.0 ? -1ll : 0ll;
+}
+
+/* vcle - vector. */
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgev2sf (__b, __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcle_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return __a <= __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcle_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgev8qi ((int8x8_t) __b,
+ (int8x8_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcle_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgev8qi (__b, __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcle_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmgev4hi (__b, __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgev2si (__b, __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcle_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a <= __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcle_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgeuv8qi ((int8x8_t) __b,
+ (int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcle_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmgeuv4hi ((int16x4_t) __b,
+ (int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgeuv2si ((int32x2_t) __b,
+ (int32x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcle_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a <= __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgev4sf (__b, __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcleq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgev2df (__b, __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcleq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgev16qi ((int8x16_t) __b,
+ (int8x16_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcleq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgev16qi (__b, __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcleq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmgev8hi (__b, __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgev4si (__b, __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcleq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgev2di (__b, __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcleq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgeuv16qi ((int8x16_t) __b,
+ (int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcleq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmgeuv8hi ((int16x8_t) __b,
+ (int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgeuv4si ((int32x4_t) __b,
+ (int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcleq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgeuv2di ((int64x2_t) __b,
+ (int64x2_t) __a);
+}
+
+/* vcle - scalar. */
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcles_f32 (float32_t __a, float32_t __b)
+{
+ return __a <= __b ? -1 : 0;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcled_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a <= __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcled_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a <= __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcled_f64 (float64_t __a, float64_t __b)
+{
+ return __a <= __b ? -1ll : 0ll;
+}
+
+/* vclez - vector. */
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclez_f32 (float32x2_t __a)
+{
+ float32x2_t __b = {0.0f, 0.0f};
+ return (uint32x2_t) __builtin_aarch64_cmlev2sf (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclez_f64 (float64x1_t __a)
+{
+ return __a <= 0.0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclez_p8 (poly8x8_t __a)
+{
+ poly8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x8_t) __builtin_aarch64_cmlev8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclez_s8 (int8x8_t __a)
+{
+ int8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x8_t) __builtin_aarch64_cmlev8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclez_s16 (int16x4_t __a)
+{
+ int16x4_t __b = {0, 0, 0, 0};
+ return (uint16x4_t) __builtin_aarch64_cmlev4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclez_s32 (int32x2_t __a)
+{
+ int32x2_t __b = {0, 0};
+ return (uint32x2_t) __builtin_aarch64_cmlev2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclez_s64 (int64x1_t __a)
+{
+ return __a <= 0ll ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclez_u64 (uint64x1_t __a)
+{
+ return __a <= 0ll ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vclezq_f32 (float32x4_t __a)
+{
+ float32x4_t __b = {0.0f, 0.0f, 0.0f, 0.0f};
+ return (uint32x4_t) __builtin_aarch64_cmlev4sf (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vclezq_f64 (float64x2_t __a)
+{
+ float64x2_t __b = {0.0, 0.0};
+ return (uint64x2_t) __builtin_aarch64_cmlev2df (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vclezq_p8 (poly8x16_t __a)
+{
+ poly8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x16_t) __builtin_aarch64_cmlev16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vclezq_s8 (int8x16_t __a)
+{
+ int8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x16_t) __builtin_aarch64_cmlev16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vclezq_s16 (int16x8_t __a)
+{
+ int16x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint16x8_t) __builtin_aarch64_cmlev8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vclezq_s32 (int32x4_t __a)
+{
+ int32x4_t __b = {0, 0, 0, 0};
+ return (uint32x4_t) __builtin_aarch64_cmlev4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vclezq_s64 (int64x2_t __a)
+{
+ int64x2_t __b = {0, 0};
+ return (uint64x2_t) __builtin_aarch64_cmlev2di (__a, __b);
+}
+
+/* vclez - scalar. */
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vclezs_f32 (float32_t __a)
+{
+ return __a <= 0.0f ? -1 : 0;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclezd_s64 (int64x1_t __a)
+{
+ return __a <= 0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclezd_u64 (int64x1_t __a)
+{
+ return __a <= 0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vclezd_f64 (float64_t __a)
+{
+ return __a <= 0.0 ? -1ll : 0ll;
+}
+
+/* vclt - vector. */
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgtv2sf (__b, __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclt_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return __a < __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclt_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgtv8qi ((int8x8_t) __b,
+ (int8x8_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclt_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgtv8qi (__b, __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclt_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmgtv4hi (__b, __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgtv2si (__b, __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclt_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a < __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclt_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmgtuv8qi ((int8x8_t) __b,
+ (int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclt_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmgtuv4hi ((int16x4_t) __b,
+ (int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmgtuv2si ((int32x2_t) __b,
+ (int32x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vclt_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a < __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgtv4sf (__b, __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcltq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgtv2df (__b, __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgtv16qi ((int8x16_t) __b,
+ (int8x16_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgtv16qi (__b, __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcltq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmgtv8hi (__b, __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgtv4si (__b, __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcltq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgtv2di (__b, __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmgtuv16qi ((int8x16_t) __b,
+ (int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcltq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmgtuv8hi ((int16x8_t) __b,
+ (int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmgtuv4si ((int32x4_t) __b,
+ (int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcltq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmgtuv2di ((int64x2_t) __b,
+ (int64x2_t) __a);
+}
+
+/* vclt - scalar. */
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vclts_f32 (float32_t __a, float32_t __b)
+{
+ return __a < __b ? -1 : 0;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcltd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a < __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcltd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a < __b ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcltd_f64 (float64_t __a, float64_t __b)
+{
+ return __a < __b ? -1ll : 0ll;
+}
+
+/* vcltz - vector. */
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcltz_f32 (float32x2_t __a)
+{
+ float32x2_t __b = {0.0f, 0.0f};
+ return (uint32x2_t) __builtin_aarch64_cmltv2sf (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcltz_f64 (float64x1_t __a)
+{
+ return __a < 0.0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcltz_p8 (poly8x8_t __a)
+{
+ poly8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x8_t) __builtin_aarch64_cmltv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcltz_s8 (int8x8_t __a)
+{
+ int8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x8_t) __builtin_aarch64_cmltv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcltz_s16 (int16x4_t __a)
+{
+ int16x4_t __b = {0, 0, 0, 0};
+ return (uint16x4_t) __builtin_aarch64_cmltv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcltz_s32 (int32x2_t __a)
+{
+ int32x2_t __b = {0, 0};
+ return (uint32x2_t) __builtin_aarch64_cmltv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcltz_s64 (int64x1_t __a)
+{
+ return __a < 0ll ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltzq_f32 (float32x4_t __a)
+{
+ float32x4_t __b = {0.0f, 0.0f, 0.0f, 0.0f};
+ return (uint32x4_t) __builtin_aarch64_cmltv4sf (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcltzq_f64 (float64x2_t __a)
+{
+ float64x2_t __b = {0.0, 0.0};
+ return (uint64x2_t) __builtin_aarch64_cmltv2df (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltzq_p8 (poly8x16_t __a)
+{
+ poly8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x16_t) __builtin_aarch64_cmltv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltzq_s8 (int8x16_t __a)
+{
+ int8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint8x16_t) __builtin_aarch64_cmltv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcltzq_s16 (int16x8_t __a)
+{
+ int16x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0};
+ return (uint16x8_t) __builtin_aarch64_cmltv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltzq_s32 (int32x4_t __a)
+{
+ int32x4_t __b = {0, 0, 0, 0};
+ return (uint32x4_t) __builtin_aarch64_cmltv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcltzq_s64 (int64x2_t __a)
+{
+ int64x2_t __b = {0, 0};
+ return (uint64x2_t) __builtin_aarch64_cmltv2di (__a, __b);
+}
+
+/* vcltz - scalar. */
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcltzs_f32 (float32_t __a)
+{
+ return __a < 0.0f ? -1 : 0;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcltzd_s64 (int64x1_t __a)
+{
+ return __a < 0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcltzd_u64 (int64x1_t __a)
+{
+ return __a < 0 ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcltzd_f64 (float64_t __a)
+{
+ return __a < 0.0 ? -1ll : 0ll;
+}
+
+/* vclz. */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vclz_s8 (int8x8_t __a)
+{
+ return __builtin_aarch64_clzv8qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vclz_s16 (int16x4_t __a)
+{
+ return __builtin_aarch64_clzv4hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vclz_s32 (int32x2_t __a)
+{
+ return __builtin_aarch64_clzv2si (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclz_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_aarch64_clzv8qi ((int8x8_t)__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclz_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t)__builtin_aarch64_clzv4hi ((int16x4_t)__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclz_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_aarch64_clzv2si ((int32x2_t)__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vclzq_s8 (int8x16_t __a)
+{
+ return __builtin_aarch64_clzv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vclzq_s16 (int16x8_t __a)
+{
+ return __builtin_aarch64_clzv8hi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vclzq_s32 (int32x4_t __a)
+{
+ return __builtin_aarch64_clzv4si (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vclzq_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_aarch64_clzv16qi ((int8x16_t)__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vclzq_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t)__builtin_aarch64_clzv8hi ((int16x8_t)__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vclzq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_aarch64_clzv4si ((int32x4_t)__a);
+}
+
+/* vcvt (double -> float). */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_float_truncate_lo_v2sf (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvt_high_f32_f64 (float32x2_t __a, float64x2_t __b)
+{
+ return __builtin_aarch64_float_truncate_hi_v4sf (__a, __b);
+}
+
+/* vcvt (float -> double). */
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcvt_f64_f32 (float32x2_t __a)
+{
+
+ return __builtin_aarch64_float_extend_lo_v2df (__a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcvt_high_f64_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_vec_unpacks_hi_v4sf (__a);
+}
+
+/* vcvt (<u>int -> float) */
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtd_f64_s64 (int64_t __a)
+{
+ return (float64_t) __a;
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtd_f64_u64 (uint64_t __a)
+{
+ return (float64_t) __a;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvts_f32_s32 (int32_t __a)
+{
+ return (float32_t) __a;
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvts_f32_u32 (uint32_t __a)
+{
+ return (float32_t) __a;
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_s32 (int32x2_t __a)
+{
+ return __builtin_aarch64_floatv2siv2sf (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_u32 (uint32x2_t __a)
+{
+ return __builtin_aarch64_floatunsv2siv2sf ((int32x2_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_f32_s32 (int32x4_t __a)
+{
+ return __builtin_aarch64_floatv4siv4sf (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_f32_u32 (uint32x4_t __a)
+{
+ return __builtin_aarch64_floatunsv4siv4sf ((int32x4_t) __a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcvtq_f64_s64 (int64x2_t __a)
+{
+ return __builtin_aarch64_floatv2div2df (__a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vcvtq_f64_u64 (uint64x2_t __a)
+{
+ return __builtin_aarch64_floatunsv2div2df ((int64x2_t) __a);
+}
+
+/* vcvt (float -> <u>int) */
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vcvtd_s64_f64 (float64_t __a)
+{
+ return (int64_t) __a;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcvtd_u64_f64 (float64_t __a)
+{
+ return (uint64_t) __a;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvts_s32_f32 (float32_t __a)
+{
+ return (int32_t) __a;
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvts_u32_f32 (float32_t __a)
+{
+ return (uint32_t) __a;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvt_s32_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_lbtruncv2sfv2si (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvt_u32_f32 (float32x2_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint32x2_t) __builtin_aarch64_lbtruncuv2sfv2si (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtq_s32_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_lbtruncv4sfv4si (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtq_u32_f32 (float32x4_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint32x4_t) __builtin_aarch64_lbtruncuv4sfv4si (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtq_s64_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_lbtruncv2dfv2di (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtq_u64_f64 (float64x2_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint64x2_t) __builtin_aarch64_lbtruncuv2dfv2di (__a);
+}
+
+/* vcvta */
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vcvtad_s64_f64 (float64_t __a)
+{
+ return __builtin_aarch64_lrounddfdi (__a);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcvtad_u64_f64 (float64_t __a)
+{
+ return __builtin_aarch64_lroundudfdi (__a);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvtas_s32_f32 (float32_t __a)
+{
+ return __builtin_aarch64_lroundsfsi (__a);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvtas_u32_f32 (float32_t __a)
+{
+ return __builtin_aarch64_lroundusfsi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvta_s32_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_lroundv2sfv2si (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvta_u32_f32 (float32x2_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint32x2_t) __builtin_aarch64_lrounduv2sfv2si (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtaq_s32_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_lroundv4sfv4si (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtaq_u32_f32 (float32x4_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint32x4_t) __builtin_aarch64_lrounduv4sfv4si (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtaq_s64_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_lroundv2dfv2di (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtaq_u64_f64 (float64x2_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint64x2_t) __builtin_aarch64_lrounduv2dfv2di (__a);
+}
+
+/* vcvtm */
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vcvtmd_s64_f64 (float64_t __a)
+{
+ return __builtin_llfloor (__a);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcvtmd_u64_f64 (float64_t __a)
+{
+ return __builtin_aarch64_lfloorudfdi (__a);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvtms_s32_f32 (float32_t __a)
+{
+ return __builtin_ifloorf (__a);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvtms_u32_f32 (float32_t __a)
+{
+ return __builtin_aarch64_lfloorusfsi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvtm_s32_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_lfloorv2sfv2si (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvtm_u32_f32 (float32x2_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint32x2_t) __builtin_aarch64_lflooruv2sfv2si (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtmq_s32_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_lfloorv4sfv4si (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtmq_u32_f32 (float32x4_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint32x4_t) __builtin_aarch64_lflooruv4sfv4si (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtmq_s64_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_lfloorv2dfv2di (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtmq_u64_f64 (float64x2_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint64x2_t) __builtin_aarch64_lflooruv2dfv2di (__a);
+}
+
+/* vcvtn */
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vcvtnd_s64_f64 (float64_t __a)
+{
+ return __builtin_aarch64_lfrintndfdi (__a);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcvtnd_u64_f64 (float64_t __a)
+{
+ return __builtin_aarch64_lfrintnudfdi (__a);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvtns_s32_f32 (float32_t __a)
+{
+ return __builtin_aarch64_lfrintnsfsi (__a);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvtns_u32_f32 (float32_t __a)
+{
+ return __builtin_aarch64_lfrintnusfsi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvtn_s32_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_lfrintnv2sfv2si (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvtn_u32_f32 (float32x2_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint32x2_t) __builtin_aarch64_lfrintnuv2sfv2si (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtnq_s32_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_lfrintnv4sfv4si (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtnq_u32_f32 (float32x4_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint32x4_t) __builtin_aarch64_lfrintnuv4sfv4si (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtnq_s64_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_lfrintnv2dfv2di (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtnq_u64_f64 (float64x2_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint64x2_t) __builtin_aarch64_lfrintnuv2dfv2di (__a);
+}
+
+/* vcvtp */
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vcvtpd_s64_f64 (float64_t __a)
+{
+ return __builtin_llceil (__a);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcvtpd_u64_f64 (float64_t __a)
+{
+ return __builtin_aarch64_lceiludfdi (__a);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvtps_s32_f32 (float32_t __a)
+{
+ return __builtin_iceilf (__a);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvtps_u32_f32 (float32_t __a)
+{
+ return __builtin_aarch64_lceilusfsi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvtp_s32_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_lceilv2sfv2si (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvtp_u32_f32 (float32x2_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint32x2_t) __builtin_aarch64_lceiluv2sfv2si (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtpq_s32_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_lceilv4sfv4si (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtpq_u32_f32 (float32x4_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint32x4_t) __builtin_aarch64_lceiluv4sfv4si (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcvtpq_s64_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_lceilv2dfv2di (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcvtpq_u64_f64 (float64x2_t __a)
+{
+ /* TODO: This cast should go away when builtins have
+ their correct types. */
+ return (uint64x2_t) __builtin_aarch64_lceiluv2dfv2di (__a);
+}
+
+/* vdup_n */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vdup_n_f32 (float32_t __a)
+{
+ return (float32x2_t) {__a, __a};
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vdup_n_f64 (float64_t __a)
+{
+ return __a;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vdup_n_p8 (poly8_t __a)
+{
+ return (poly8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vdup_n_p16 (poly16_t __a)
+{
+ return (poly16x4_t) {__a, __a, __a, __a};
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vdup_n_s8 (int8_t __a)
+{
+ return (int8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vdup_n_s16 (int16_t __a)
+{
+ return (int16x4_t) {__a, __a, __a, __a};
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vdup_n_s32 (int32_t __a)
+{
+ return (int32x2_t) {__a, __a};
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vdup_n_s64 (int64_t __a)
+{
+ return __a;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vdup_n_u8 (uint8_t __a)
+{
+ return (uint8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vdup_n_u16 (uint16_t __a)
+{
+ return (uint16x4_t) {__a, __a, __a, __a};
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vdup_n_u32 (uint32_t __a)
+{
+ return (uint32x2_t) {__a, __a};
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vdup_n_u64 (uint64_t __a)
+{
+ return __a;
+}
+
+/* vdupq_n */
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vdupq_n_f32 (float32_t __a)
+{
+ return (float32x4_t) {__a, __a, __a, __a};
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vdupq_n_f64 (float64_t __a)
+{
+ return (float64x2_t) {__a, __a};
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vdupq_n_p8 (uint32_t __a)
+{
+ return (poly8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a,
+ __a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vdupq_n_p16 (uint32_t __a)
+{
+ return (poly16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vdupq_n_s8 (int32_t __a)
+{
+ return (int8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a,
+ __a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vdupq_n_s16 (int32_t __a)
+{
+ return (int16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vdupq_n_s32 (int32_t __a)
+{
+ return (int32x4_t) {__a, __a, __a, __a};
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vdupq_n_s64 (int64_t __a)
+{
+ return (int64x2_t) {__a, __a};
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vdupq_n_u8 (uint32_t __a)
+{
+ return (uint8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a,
+ __a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vdupq_n_u16 (uint32_t __a)
+{
+ return (uint16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vdupq_n_u32 (uint32_t __a)
+{
+ return (uint32x4_t) {__a, __a, __a, __a};
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vdupq_n_u64 (uint64_t __a)
+{
+ return (uint64x2_t) {__a, __a};
+}
+
+/* vdup_lane */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vdup_lane_f32 (float32x2_t __a, const int __b)
+{
+ return __aarch64_vdup_lane_f32 (__a, __b);
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vdup_lane_f64 (float64x1_t __a, const int __b)
+{
+ return __aarch64_vdup_lane_f64 (__a, __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vdup_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return __aarch64_vdup_lane_p8 (__a, __b);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vdup_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return __aarch64_vdup_lane_p16 (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vdup_lane_s8 (int8x8_t __a, const int __b)
+{
+ return __aarch64_vdup_lane_s8 (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vdup_lane_s16 (int16x4_t __a, const int __b)
+{
+ return __aarch64_vdup_lane_s16 (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vdup_lane_s32 (int32x2_t __a, const int __b)
+{
+ return __aarch64_vdup_lane_s32 (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vdup_lane_s64 (int64x1_t __a, const int __b)
+{
+ return __aarch64_vdup_lane_s64 (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vdup_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return __aarch64_vdup_lane_u8 (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vdup_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return __aarch64_vdup_lane_u16 (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vdup_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return __aarch64_vdup_lane_u32 (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vdup_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return __aarch64_vdup_lane_u64 (__a, __b);
+}
+
+/* vdup_laneq */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vdup_laneq_f32 (float32x4_t __a, const int __b)
+{
+ return __aarch64_vdup_laneq_f32 (__a, __b);
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vdup_laneq_f64 (float64x2_t __a, const int __b)
+{
+ return __aarch64_vdup_laneq_f64 (__a, __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vdup_laneq_p8 (poly8x16_t __a, const int __b)
+{
+ return __aarch64_vdup_laneq_p8 (__a, __b);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vdup_laneq_p16 (poly16x8_t __a, const int __b)
+{
+ return __aarch64_vdup_laneq_p16 (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vdup_laneq_s8 (int8x16_t __a, const int __b)
+{
+ return __aarch64_vdup_laneq_s8 (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vdup_laneq_s16 (int16x8_t __a, const int __b)
+{
+ return __aarch64_vdup_laneq_s16 (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vdup_laneq_s32 (int32x4_t __a, const int __b)
+{
+ return __aarch64_vdup_laneq_s32 (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vdup_laneq_s64 (int64x2_t __a, const int __b)
+{
+ return __aarch64_vdup_laneq_s64 (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vdup_laneq_u8 (uint8x16_t __a, const int __b)
+{
+ return __aarch64_vdup_laneq_u8 (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vdup_laneq_u16 (uint16x8_t __a, const int __b)
+{
+ return __aarch64_vdup_laneq_u16 (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vdup_laneq_u32 (uint32x4_t __a, const int __b)
+{
+ return __aarch64_vdup_laneq_u32 (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vdup_laneq_u64 (uint64x2_t __a, const int __b)
+{
+ return __aarch64_vdup_laneq_u64 (__a, __b);
+}
+
+/* vdupq_lane */
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vdupq_lane_f32 (float32x2_t __a, const int __b)
+{
+ return __aarch64_vdupq_lane_f32 (__a, __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vdupq_lane_f64 (float64x1_t __a, const int __b)
+{
+ return __aarch64_vdupq_lane_f64 (__a, __b);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vdupq_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return __aarch64_vdupq_lane_p8 (__a, __b);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vdupq_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return __aarch64_vdupq_lane_p16 (__a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vdupq_lane_s8 (int8x8_t __a, const int __b)
+{
+ return __aarch64_vdupq_lane_s8 (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vdupq_lane_s16 (int16x4_t __a, const int __b)
+{
+ return __aarch64_vdupq_lane_s16 (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vdupq_lane_s32 (int32x2_t __a, const int __b)
+{
+ return __aarch64_vdupq_lane_s32 (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vdupq_lane_s64 (int64x1_t __a, const int __b)
+{
+ return __aarch64_vdupq_lane_s64 (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vdupq_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return __aarch64_vdupq_lane_u8 (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vdupq_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return __aarch64_vdupq_lane_u16 (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vdupq_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return __aarch64_vdupq_lane_u32 (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vdupq_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return __aarch64_vdupq_lane_u64 (__a, __b);
+}
+
+/* vdupq_laneq */
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vdupq_laneq_f32 (float32x4_t __a, const int __b)
+{
+ return __aarch64_vdupq_laneq_f32 (__a, __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vdupq_laneq_f64 (float64x2_t __a, const int __b)
+{
+ return __aarch64_vdupq_laneq_f64 (__a, __b);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vdupq_laneq_p8 (poly8x16_t __a, const int __b)
+{
+ return __aarch64_vdupq_laneq_p8 (__a, __b);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vdupq_laneq_p16 (poly16x8_t __a, const int __b)
+{
+ return __aarch64_vdupq_laneq_p16 (__a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vdupq_laneq_s8 (int8x16_t __a, const int __b)
+{
+ return __aarch64_vdupq_laneq_s8 (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vdupq_laneq_s16 (int16x8_t __a, const int __b)
+{
+ return __aarch64_vdupq_laneq_s16 (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vdupq_laneq_s32 (int32x4_t __a, const int __b)
+{
+ return __aarch64_vdupq_laneq_s32 (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vdupq_laneq_s64 (int64x2_t __a, const int __b)
+{
+ return __aarch64_vdupq_laneq_s64 (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vdupq_laneq_u8 (uint8x16_t __a, const int __b)
+{
+ return __aarch64_vdupq_laneq_u8 (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vdupq_laneq_u16 (uint16x8_t __a, const int __b)
+{
+ return __aarch64_vdupq_laneq_u16 (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vdupq_laneq_u32 (uint32x4_t __a, const int __b)
+{
+ return __aarch64_vdupq_laneq_u32 (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vdupq_laneq_u64 (uint64x2_t __a, const int __b)
+{
+ return __aarch64_vdupq_laneq_u64 (__a, __b);
+}
+
+/* vdupb_lane */
+__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+vdupb_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return __aarch64_vget_lane_p8 (__a, __b);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vdupb_lane_s8 (int8x8_t __a, const int __b)
+{
+ return __aarch64_vget_lane_s8 (__a, __b);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vdupb_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return __aarch64_vget_lane_u8 (__a, __b);
+}
+
+/* vduph_lane */
+__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+vduph_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return __aarch64_vget_lane_p16 (__a, __b);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vduph_lane_s16 (int16x4_t __a, const int __b)
+{
+ return __aarch64_vget_lane_s16 (__a, __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vduph_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return __aarch64_vget_lane_u16 (__a, __b);
+}
+
+/* vdups_lane */
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vdups_lane_f32 (float32x2_t __a, const int __b)
+{
+ return __aarch64_vget_lane_f32 (__a, __b);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vdups_lane_s32 (int32x2_t __a, const int __b)
+{
+ return __aarch64_vget_lane_s32 (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vdups_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return __aarch64_vget_lane_u32 (__a, __b);
+}
+
+/* vdupd_lane */
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vdupd_lane_f64 (float64x1_t __a, const int __attribute__ ((unused)) __b)
+{
+ return __a;
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vdupd_lane_s64 (int64x1_t __a, const int __attribute__ ((unused)) __b)
+{
+ return __a;
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vdupd_lane_u64 (uint64x1_t __a, const int __attribute__ ((unused)) __b)
+{
+ return __a;
+}
+
+/* vdupb_laneq */
+__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+vdupb_laneq_p8 (poly8x16_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_p8 (__a, __b);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vdupb_laneq_s8 (int8x16_t __a, const int __attribute__ ((unused)) __b)
+{
+ return __aarch64_vgetq_lane_s8 (__a, __b);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vdupb_laneq_u8 (uint8x16_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_u8 (__a, __b);
+}
+
+/* vduph_laneq */
+__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+vduph_laneq_p16 (poly16x8_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_p16 (__a, __b);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vduph_laneq_s16 (int16x8_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_s16 (__a, __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vduph_laneq_u16 (uint16x8_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_u16 (__a, __b);
+}
+
+/* vdups_laneq */
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vdups_laneq_f32 (float32x4_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_f32 (__a, __b);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vdups_laneq_s32 (int32x4_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_s32 (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vdups_laneq_u32 (uint32x4_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_u32 (__a, __b);
+}
+
+/* vdupd_laneq */
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vdupd_laneq_f64 (float64x2_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_f64 (__a, __b);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vdupd_laneq_s64 (int64x2_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_s64 (__a, __b);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vdupd_laneq_u64 (uint64x2_t __a, const int __b)
+{
+ return __aarch64_vgetq_lane_u64 (__a, __b);
+}
+
+/* vfma_lane */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vfma_lane_f32 (float32x2_t __a, float32x2_t __b,
+ float32x2_t __c, const int __lane)
+{
+ return __builtin_aarch64_fmav2sf (__b,
+ __aarch64_vdup_lane_f32 (__c, __lane),
+ __a);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vfma_lane_f64 (float64_t __a, float64_t __b,
+ float64_t __c, const int __lane)
+{
+ return __builtin_fma (__b, __c, __a);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vfmad_lane_f64 (float64_t __a, float64_t __b,
+ float64_t __c, const int __lane)
+{
+ return __builtin_fma (__b, __c, __a);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vfmas_lane_f32 (float32_t __a, float32_t __b,
+ float32x2_t __c, const int __lane)
+{
+ return __builtin_fmaf (__b, __aarch64_vget_lane_f32 (__c, __lane), __a);
+}
+
+/* vfma_laneq */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vfma_laneq_f32 (float32x2_t __a, float32x2_t __b,
+ float32x4_t __c, const int __lane)
+{
+ return __builtin_aarch64_fmav2sf (__b,
+ __aarch64_vdup_laneq_f32 (__c, __lane),
+ __a);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vfma_laneq_f64 (float64_t __a, float64_t __b,
+ float64x2_t __c, const int __lane)
+{
+ return __builtin_fma (__b, __aarch64_vgetq_lane_f64 (__c, __lane), __a);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vfmad_laneq_f64 (float64_t __a, float64_t __b,
+ float64x2_t __c, const int __lane)
+{
+ return __builtin_fma (__b, __aarch64_vgetq_lane_f64 (__c, __lane), __a);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vfmas_laneq_f32 (float32_t __a, float32_t __b,
+ float32x4_t __c, const int __lane)
+{
+ return __builtin_fmaf (__b, __aarch64_vgetq_lane_f32 (__c, __lane), __a);
+}
+
+/* vfmaq_lane */
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vfmaq_lane_f32 (float32x4_t __a, float32x4_t __b,
+ float32x2_t __c, const int __lane)
+{
+ return __builtin_aarch64_fmav4sf (__b,
+ __aarch64_vdupq_lane_f32 (__c, __lane),
+ __a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vfmaq_lane_f64 (float64x2_t __a, float64x2_t __b,
+ float64_t __c, const int __lane)
+{
+ return __builtin_aarch64_fmav2df (__b, vdupq_n_f64 (__c), __a);
+}
+
+/* vfmaq_laneq */
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vfmaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
+ float32x4_t __c, const int __lane)
+{
+ return __builtin_aarch64_fmav4sf (__b,
+ __aarch64_vdupq_laneq_f32 (__c, __lane),
+ __a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vfmaq_laneq_f64 (float64x2_t __a, float64x2_t __b,
+ float64x2_t __c, const int __lane)
+{
+ return __builtin_aarch64_fmav2df (__b,
+ __aarch64_vdupq_laneq_f64 (__c, __lane),
+ __a);
+}
+
+/* vfms_lane */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vfms_lane_f32 (float32x2_t __a, float32x2_t __b,
+ float32x2_t __c, const int __lane)
+{
+ return __builtin_aarch64_fmav2sf (-__b,
+ __aarch64_vdup_lane_f32 (__c, __lane),
+ __a);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vfms_lane_f64 (float64_t __a, float64_t __b,
+ float64_t __c, const int __lane)
+{
+ return __builtin_fma (-__b, __c, __a);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vfmsd_lane_f64 (float64_t __a, float64_t __b,
+ float64_t __c, const int __lane)
+{
+ return __builtin_fma (-__b, __c, __a);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vfmss_lane_f32 (float32_t __a, float32_t __b,
+ float32x2_t __c, const int __lane)
+{
+ return __builtin_fmaf (-__b, __aarch64_vget_lane_f32 (__c, __lane), __a);
+}
+
+/* vfms_laneq */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vfms_laneq_f32 (float32x2_t __a, float32x2_t __b,
+ float32x4_t __c, const int __lane)
+{
+ return __builtin_aarch64_fmav2sf (-__b,
+ __aarch64_vdup_laneq_f32 (__c, __lane),
+ __a);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vfms_laneq_f64 (float64_t __a, float64_t __b,
+ float64x2_t __c, const int __lane)
+{
+ return __builtin_fma (-__b, __aarch64_vgetq_lane_f64 (__c, __lane), __a);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vfmsd_laneq_f64 (float64_t __a, float64_t __b,
+ float64x2_t __c, const int __lane)
+{
+ return __builtin_fma (-__b, __aarch64_vgetq_lane_f64 (__c, __lane), __a);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vfmss_laneq_f32 (float32_t __a, float32_t __b,
+ float32x4_t __c, const int __lane)
+{
+ return __builtin_fmaf (-__b, __aarch64_vgetq_lane_f32 (__c, __lane), __a);
+}
+
+/* vfmsq_lane */
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vfmsq_lane_f32 (float32x4_t __a, float32x4_t __b,
+ float32x2_t __c, const int __lane)
+{
+ return __builtin_aarch64_fmav4sf (-__b,
+ __aarch64_vdupq_lane_f32 (__c, __lane),
+ __a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vfmsq_lane_f64 (float64x2_t __a, float64x2_t __b,
+ float64_t __c, const int __lane)
+{
+ return __builtin_aarch64_fmav2df (-__b, vdupq_n_f64 (__c), __a);
+}
+
+/* vfmsq_laneq */
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vfmsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
+ float32x4_t __c, const int __lane)
+{
+ return __builtin_aarch64_fmav4sf (-__b,
+ __aarch64_vdupq_laneq_f32 (__c, __lane),
+ __a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vfmsq_laneq_f64 (float64x2_t __a, float64x2_t __b,
+ float64x2_t __c, const int __lane)
+{
+ return __builtin_aarch64_fmav2df (-__b,
+ __aarch64_vdupq_laneq_f64 (__c, __lane),
+ __a);
+}
+
+/* vld1 */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_f32 (const float32_t *a)
+{
+ return __builtin_aarch64_ld1v2sf ((const __builtin_aarch64_simd_sf *) a);
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vld1_f64 (const float64_t *a)
+{
+ return *a;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_p8 (const poly8_t *a)
+{
+ return (poly8x8_t)
+ __builtin_aarch64_ld1v8qi ((const __builtin_aarch64_simd_qi *) a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_p16 (const poly16_t *a)
+{
+ return (poly16x4_t)
+ __builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_s8 (const int8_t *a)
+{
+ return __builtin_aarch64_ld1v8qi ((const __builtin_aarch64_simd_qi *) a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_s16 (const int16_t *a)
+{
+ return __builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_s32 (const int32_t *a)
+{
+ return __builtin_aarch64_ld1v2si ((const __builtin_aarch64_simd_si *) a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_s64 (const int64_t *a)
+{
+ return *a;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_u8 (const uint8_t *a)
+{
+ return (uint8x8_t)
+ __builtin_aarch64_ld1v8qi ((const __builtin_aarch64_simd_qi *) a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_u16 (const uint16_t *a)
+{
+ return (uint16x4_t)
+ __builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_u32 (const uint32_t *a)
+{
+ return (uint32x2_t)
+ __builtin_aarch64_ld1v2si ((const __builtin_aarch64_simd_si *) a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_u64 (const uint64_t *a)
+{
+ return *a;
+}
+
+/* vld1q */
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_f32 (const float32_t *a)
+{
+ return __builtin_aarch64_ld1v4sf ((const __builtin_aarch64_simd_sf *) a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vld1q_f64 (const float64_t *a)
+{
+ return __builtin_aarch64_ld1v2df ((const __builtin_aarch64_simd_df *) a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_p8 (const poly8_t *a)
+{
+ return (poly8x16_t)
+ __builtin_aarch64_ld1v16qi ((const __builtin_aarch64_simd_qi *) a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_p16 (const poly16_t *a)
+{
+ return (poly16x8_t)
+ __builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_s8 (const int8_t *a)
+{
+ return __builtin_aarch64_ld1v16qi ((const __builtin_aarch64_simd_qi *) a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_s16 (const int16_t *a)
+{
+ return __builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_s32 (const int32_t *a)
+{
+ return __builtin_aarch64_ld1v4si ((const __builtin_aarch64_simd_si *) a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_s64 (const int64_t *a)
+{
+ return __builtin_aarch64_ld1v2di ((const __builtin_aarch64_simd_di *) a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_u8 (const uint8_t *a)
+{
+ return (uint8x16_t)
+ __builtin_aarch64_ld1v16qi ((const __builtin_aarch64_simd_qi *) a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_u16 (const uint16_t *a)
+{
+ return (uint16x8_t)
+ __builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_u32 (const uint32_t *a)
+{
+ return (uint32x4_t)
+ __builtin_aarch64_ld1v4si ((const __builtin_aarch64_simd_si *) a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_u64 (const uint64_t *a)
+{
+ return (uint64x2_t)
+ __builtin_aarch64_ld1v2di ((const __builtin_aarch64_simd_di *) a);
+}
+
+/* vldn */
+
+__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
+vld2_s64 (const int64_t * __a)
+{
+ int64x1x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__))
+vld2_u64 (const uint64_t * __a)
+{
+ uint64x1x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
+ ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline float64x1x2_t __attribute__ ((__always_inline__))
+vld2_f64 (const float64_t * __a)
+{
+ float64x1x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2df ((const __builtin_aarch64_simd_df *) __a);
+ ret.val[0] = (float64x1_t) __builtin_aarch64_get_dregoidf (__o, 0);
+ ret.val[1] = (float64x1_t) __builtin_aarch64_get_dregoidf (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vld2_s8 (const int8_t * __a)
+{
+ int8x8x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
+ ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vld2_p8 (const poly8_t * __a)
+{
+ poly8x8x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
+ ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vld2_s16 (const int16_t * __a)
+{
+ int16x4x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
+ ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vld2_p16 (const poly16_t * __a)
+{
+ poly16x4x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
+ ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vld2_s32 (const int32_t * __a)
+{
+ int32x2x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v2si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0);
+ ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vld2_u8 (const uint8_t * __a)
+{
+ uint8x8x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
+ ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vld2_u16 (const uint16_t * __a)
+{
+ uint16x4x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
+ ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vld2_u32 (const uint32_t * __a)
+{
+ uint32x2x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v2si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0);
+ ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vld2_f32 (const float32_t * __a)
+{
+ float32x2x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v2sf ((const __builtin_aarch64_simd_sf *) __a);
+ ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 0);
+ ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vld2q_s8 (const int8_t * __a)
+{
+ int8x16x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
+ ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vld2q_p8 (const poly8_t * __a)
+{
+ poly8x16x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
+ ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vld2q_s16 (const int16_t * __a)
+{
+ int16x8x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
+ ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vld2q_p16 (const poly16_t * __a)
+{
+ poly16x8x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
+ ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vld2q_s32 (const int32_t * __a)
+{
+ int32x4x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v4si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int64x2x2_t __attribute__ ((__always_inline__))
+vld2q_s64 (const int64_t * __a)
+{
+ int64x2x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
+ ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vld2q_u8 (const uint8_t * __a)
+{
+ uint8x16x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
+ ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vld2q_u16 (const uint16_t * __a)
+{
+ uint16x8x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
+ ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vld2q_u32 (const uint32_t * __a)
+{
+ uint32x4x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v4si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
+ ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline uint64x2x2_t __attribute__ ((__always_inline__))
+vld2q_u64 (const uint64_t * __a)
+{
+ uint64x2x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
+ ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vld2q_f32 (const float32_t * __a)
+{
+ float32x4x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v4sf ((const __builtin_aarch64_simd_sf *) __a);
+ ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 0);
+ ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline float64x2x2_t __attribute__ ((__always_inline__))
+vld2q_f64 (const float64_t * __a)
+{
+ float64x2x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v2df ((const __builtin_aarch64_simd_df *) __a);
+ ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 0);
+ ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 1);
+ return ret;
+}
+
+__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
+vld3_s64 (const int64_t * __a)
+{
+ int64x1x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
+ ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
+ ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__))
+vld3_u64 (const uint64_t * __a)
+{
+ uint64x1x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
+ ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
+ ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline float64x1x3_t __attribute__ ((__always_inline__))
+vld3_f64 (const float64_t * __a)
+{
+ float64x1x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3df ((const __builtin_aarch64_simd_df *) __a);
+ ret.val[0] = (float64x1_t) __builtin_aarch64_get_dregcidf (__o, 0);
+ ret.val[1] = (float64x1_t) __builtin_aarch64_get_dregcidf (__o, 1);
+ ret.val[2] = (float64x1_t) __builtin_aarch64_get_dregcidf (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
+vld3_s8 (const int8_t * __a)
+{
+ int8x8x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
+ ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
+ ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
+vld3_p8 (const poly8_t * __a)
+{
+ poly8x8x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
+ ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
+ ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
+vld3_s16 (const int16_t * __a)
+{
+ int16x4x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
+ ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
+ ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
+vld3_p16 (const poly16_t * __a)
+{
+ poly16x4x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
+ ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
+ ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
+vld3_s32 (const int32_t * __a)
+{
+ int32x2x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v2si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0);
+ ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1);
+ ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
+vld3_u8 (const uint8_t * __a)
+{
+ uint8x8x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
+ ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
+ ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
+vld3_u16 (const uint16_t * __a)
+{
+ uint16x4x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
+ ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
+ ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
+vld3_u32 (const uint32_t * __a)
+{
+ uint32x2x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v2si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0);
+ ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1);
+ ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
+vld3_f32 (const float32_t * __a)
+{
+ float32x2x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v2sf ((const __builtin_aarch64_simd_sf *) __a);
+ ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 0);
+ ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 1);
+ ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int8x16x3_t __attribute__ ((__always_inline__))
+vld3q_s8 (const int8_t * __a)
+{
+ int8x16x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
+ ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
+ ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline poly8x16x3_t __attribute__ ((__always_inline__))
+vld3q_p8 (const poly8_t * __a)
+{
+ poly8x16x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
+ ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
+ ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__))
+vld3q_s16 (const int16_t * __a)
+{
+ int16x8x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
+ ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
+ ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__))
+vld3q_p16 (const poly16_t * __a)
+{
+ poly16x8x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
+ ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
+ ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__))
+vld3q_s32 (const int32_t * __a)
+{
+ int32x4x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v4si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0);
+ ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1);
+ ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int64x2x3_t __attribute__ ((__always_inline__))
+vld3q_s64 (const int64_t * __a)
+{
+ int64x2x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
+ ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
+ ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint8x16x3_t __attribute__ ((__always_inline__))
+vld3q_u8 (const uint8_t * __a)
+{
+ uint8x16x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
+ ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
+ ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__))
+vld3q_u16 (const uint16_t * __a)
+{
+ uint16x8x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
+ ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
+ ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__))
+vld3q_u32 (const uint32_t * __a)
+{
+ uint32x4x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v4si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0);
+ ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1);
+ ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline uint64x2x3_t __attribute__ ((__always_inline__))
+vld3q_u64 (const uint64_t * __a)
+{
+ uint64x2x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
+ ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
+ ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__))
+vld3q_f32 (const float32_t * __a)
+{
+ float32x4x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v4sf ((const __builtin_aarch64_simd_sf *) __a);
+ ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 0);
+ ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 1);
+ ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline float64x2x3_t __attribute__ ((__always_inline__))
+vld3q_f64 (const float64_t * __a)
+{
+ float64x2x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v2df ((const __builtin_aarch64_simd_df *) __a);
+ ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 0);
+ ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 1);
+ ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 2);
+ return ret;
+}
+
+__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
+vld4_s64 (const int64_t * __a)
+{
+ int64x1x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 0);
+ ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 1);
+ ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 2);
+ ret.val[3] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__))
+vld4_u64 (const uint64_t * __a)
+{
+ uint64x1x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 0);
+ ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 1);
+ ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 2);
+ ret.val[3] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline float64x1x4_t __attribute__ ((__always_inline__))
+vld4_f64 (const float64_t * __a)
+{
+ float64x1x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4df ((const __builtin_aarch64_simd_df *) __a);
+ ret.val[0] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 0);
+ ret.val[1] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 1);
+ ret.val[2] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 2);
+ ret.val[3] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
+vld4_s8 (const int8_t * __a)
+{
+ int8x8x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
+ ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
+ ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
+ ret.val[3] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
+vld4_p8 (const poly8_t * __a)
+{
+ poly8x8x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
+ ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
+ ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
+ ret.val[3] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
+vld4_s16 (const int16_t * __a)
+{
+ int16x4x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
+ ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
+ ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
+ ret.val[3] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
+vld4_p16 (const poly16_t * __a)
+{
+ poly16x4x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
+ ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
+ ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
+ ret.val[3] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
+vld4_s32 (const int32_t * __a)
+{
+ int32x2x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v2si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0);
+ ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1);
+ ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2);
+ ret.val[3] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
+vld4_u8 (const uint8_t * __a)
+{
+ uint8x8x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
+ ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
+ ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
+ ret.val[3] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
+vld4_u16 (const uint16_t * __a)
+{
+ uint16x4x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
+ ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
+ ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
+ ret.val[3] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
+vld4_u32 (const uint32_t * __a)
+{
+ uint32x2x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v2si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0);
+ ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1);
+ ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2);
+ ret.val[3] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
+vld4_f32 (const float32_t * __a)
+{
+ float32x2x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v2sf ((const __builtin_aarch64_simd_sf *) __a);
+ ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 0);
+ ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 1);
+ ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 2);
+ ret.val[3] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline int8x16x4_t __attribute__ ((__always_inline__))
+vld4q_s8 (const int8_t * __a)
+{
+ int8x16x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
+ ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
+ ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
+ ret.val[3] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline poly8x16x4_t __attribute__ ((__always_inline__))
+vld4q_p8 (const poly8_t * __a)
+{
+ poly8x16x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
+ ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
+ ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
+ ret.val[3] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__))
+vld4q_s16 (const int16_t * __a)
+{
+ int16x8x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
+ ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
+ ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
+ ret.val[3] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__))
+vld4q_p16 (const poly16_t * __a)
+{
+ poly16x8x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
+ ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
+ ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
+ ret.val[3] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__))
+vld4q_s32 (const int32_t * __a)
+{
+ int32x4x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v4si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0);
+ ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1);
+ ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2);
+ ret.val[3] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline int64x2x4_t __attribute__ ((__always_inline__))
+vld4q_s64 (const int64_t * __a)
+{
+ int64x2x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0);
+ ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1);
+ ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2);
+ ret.val[3] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint8x16x4_t __attribute__ ((__always_inline__))
+vld4q_u8 (const uint8_t * __a)
+{
+ uint8x16x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
+ ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
+ ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
+ ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
+ ret.val[3] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__))
+vld4q_u16 (const uint16_t * __a)
+{
+ uint16x8x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
+ ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
+ ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
+ ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
+ ret.val[3] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__))
+vld4q_u32 (const uint32_t * __a)
+{
+ uint32x4x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v4si ((const __builtin_aarch64_simd_si *) __a);
+ ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0);
+ ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1);
+ ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2);
+ ret.val[3] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline uint64x2x4_t __attribute__ ((__always_inline__))
+vld4q_u64 (const uint64_t * __a)
+{
+ uint64x2x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0);
+ ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1);
+ ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2);
+ ret.val[3] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__))
+vld4q_f32 (const float32_t * __a)
+{
+ float32x4x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v4sf ((const __builtin_aarch64_simd_sf *) __a);
+ ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 0);
+ ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 1);
+ ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 2);
+ ret.val[3] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 3);
+ return ret;
+}
+
+__extension__ static __inline float64x2x4_t __attribute__ ((__always_inline__))
+vld4q_f64 (const float64_t * __a)
+{
+ float64x2x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v2df ((const __builtin_aarch64_simd_df *) __a);
+ ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 0);
+ ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 1);
+ ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 2);
+ ret.val[3] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 3);
+ return ret;
+}
+
+/* vmax */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmax_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_aarch64_smax_nanv2sf (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmax_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __builtin_aarch64_smaxv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmax_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __builtin_aarch64_smaxv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmax_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __builtin_aarch64_smaxv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmax_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_umaxv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmax_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_umaxv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmax_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_umaxv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmaxq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_aarch64_smax_nanv4sf (__a, __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmaxq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __builtin_aarch64_smax_nanv2df (__a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmaxq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_aarch64_smaxv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmaxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_aarch64_smaxv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmaxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_aarch64_smaxv4si (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_umaxv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_umaxv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_umaxv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+/* vmaxnm */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmaxnm_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_aarch64_smaxv2sf (__a, __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmaxnmq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_aarch64_smaxv4sf (__a, __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmaxnmq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __builtin_aarch64_smaxv2df (__a, __b);
+}
+
+/* vmaxv */
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vmaxv_f32 (float32x2_t __a)
+{
+ return vget_lane_f32 (__builtin_aarch64_reduc_smax_nan_v2sf (__a),
+ 0);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vmaxv_s8 (int8x8_t __a)
+{
+ return vget_lane_s8 (__builtin_aarch64_reduc_smax_v8qi (__a), 0);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vmaxv_s16 (int16x4_t __a)
+{
+ return vget_lane_s16 (__builtin_aarch64_reduc_smax_v4hi (__a), 0);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vmaxv_s32 (int32x2_t __a)
+{
+ return vget_lane_s32 (__builtin_aarch64_reduc_smax_v2si (__a), 0);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vmaxv_u8 (uint8x8_t __a)
+{
+ return vget_lane_u8 ((uint8x8_t)
+ __builtin_aarch64_reduc_umax_v8qi ((int8x8_t) __a),
+ 0);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vmaxv_u16 (uint16x4_t __a)
+{
+ return vget_lane_u16 ((uint16x4_t)
+ __builtin_aarch64_reduc_umax_v4hi ((int16x4_t) __a),
+ 0);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vmaxv_u32 (uint32x2_t __a)
+{
+ return vget_lane_u32 ((uint32x2_t)
+ __builtin_aarch64_reduc_umax_v2si ((int32x2_t) __a),
+ 0);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vmaxvq_f32 (float32x4_t __a)
+{
+ return vgetq_lane_f32 (__builtin_aarch64_reduc_smax_nan_v4sf (__a),
+ 0);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vmaxvq_f64 (float64x2_t __a)
+{
+ return vgetq_lane_f64 (__builtin_aarch64_reduc_smax_nan_v2df (__a),
+ 0);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vmaxvq_s8 (int8x16_t __a)
+{
+ return vgetq_lane_s8 (__builtin_aarch64_reduc_smax_v16qi (__a), 0);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vmaxvq_s16 (int16x8_t __a)
+{
+ return vgetq_lane_s16 (__builtin_aarch64_reduc_smax_v8hi (__a), 0);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vmaxvq_s32 (int32x4_t __a)
+{
+ return vgetq_lane_s32 (__builtin_aarch64_reduc_smax_v4si (__a), 0);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vmaxvq_u8 (uint8x16_t __a)
+{
+ return vgetq_lane_u8 ((uint8x16_t)
+ __builtin_aarch64_reduc_umax_v16qi ((int8x16_t) __a),
+ 0);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vmaxvq_u16 (uint16x8_t __a)
+{
+ return vgetq_lane_u16 ((uint16x8_t)
+ __builtin_aarch64_reduc_umax_v8hi ((int16x8_t) __a),
+ 0);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vmaxvq_u32 (uint32x4_t __a)
+{
+ return vgetq_lane_u32 ((uint32x4_t)
+ __builtin_aarch64_reduc_umax_v4si ((int32x4_t) __a),
+ 0);
+}
+
+/* vmaxnmv */
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vmaxnmv_f32 (float32x2_t __a)
+{
+ return vget_lane_f32 (__builtin_aarch64_reduc_smax_v2sf (__a),
+ 0);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vmaxnmvq_f32 (float32x4_t __a)
+{
+ return vgetq_lane_f32 (__builtin_aarch64_reduc_smax_v4sf (__a), 0);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vmaxnmvq_f64 (float64x2_t __a)
+{
+ return vgetq_lane_f64 (__builtin_aarch64_reduc_smax_v2df (__a), 0);
+}
+
+/* vmin */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmin_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_aarch64_smin_nanv2sf (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmin_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __builtin_aarch64_sminv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmin_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __builtin_aarch64_sminv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmin_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __builtin_aarch64_sminv2si (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmin_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uminv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmin_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uminv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmin_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uminv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vminq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_aarch64_smin_nanv4sf (__a, __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vminq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __builtin_aarch64_smin_nanv2df (__a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vminq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_aarch64_sminv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vminq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_aarch64_sminv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vminq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_aarch64_sminv4si (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vminq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_uminv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vminq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uminv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vminq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uminv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+/* vminnm */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vminnm_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_aarch64_sminv2sf (__a, __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vminnmq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_aarch64_sminv4sf (__a, __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vminnmq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __builtin_aarch64_sminv2df (__a, __b);
+}
+
+/* vminv */
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vminv_f32 (float32x2_t __a)
+{
+ return vget_lane_f32 (__builtin_aarch64_reduc_smin_nan_v2sf (__a),
+ 0);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vminv_s8 (int8x8_t __a)
+{
+ return vget_lane_s8 (__builtin_aarch64_reduc_smin_v8qi (__a),
+ 0);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vminv_s16 (int16x4_t __a)
+{
+ return vget_lane_s16 (__builtin_aarch64_reduc_smin_v4hi (__a), 0);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vminv_s32 (int32x2_t __a)
+{
+ return vget_lane_s32 (__builtin_aarch64_reduc_smin_v2si (__a), 0);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vminv_u8 (uint8x8_t __a)
+{
+ return vget_lane_u8 ((uint8x8_t)
+ __builtin_aarch64_reduc_umin_v8qi ((int8x8_t) __a),
+ 0);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vminv_u16 (uint16x4_t __a)
+{
+ return vget_lane_u16 ((uint16x4_t)
+ __builtin_aarch64_reduc_umin_v4hi ((int16x4_t) __a),
+ 0);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vminv_u32 (uint32x2_t __a)
+{
+ return vget_lane_u32 ((uint32x2_t)
+ __builtin_aarch64_reduc_umin_v2si ((int32x2_t) __a),
+ 0);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vminvq_f32 (float32x4_t __a)
+{
+ return vgetq_lane_f32 (__builtin_aarch64_reduc_smin_nan_v4sf (__a),
+ 0);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vminvq_f64 (float64x2_t __a)
+{
+ return vgetq_lane_f64 (__builtin_aarch64_reduc_smin_nan_v2df (__a),
+ 0);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vminvq_s8 (int8x16_t __a)
+{
+ return vgetq_lane_s8 (__builtin_aarch64_reduc_smin_v16qi (__a), 0);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vminvq_s16 (int16x8_t __a)
+{
+ return vgetq_lane_s16 (__builtin_aarch64_reduc_smin_v8hi (__a), 0);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vminvq_s32 (int32x4_t __a)
+{
+ return vgetq_lane_s32 (__builtin_aarch64_reduc_smin_v4si (__a), 0);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vminvq_u8 (uint8x16_t __a)
+{
+ return vgetq_lane_u8 ((uint8x16_t)
+ __builtin_aarch64_reduc_umin_v16qi ((int8x16_t) __a),
+ 0);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vminvq_u16 (uint16x8_t __a)
+{
+ return vgetq_lane_u16 ((uint16x8_t)
+ __builtin_aarch64_reduc_umin_v8hi ((int16x8_t) __a),
+ 0);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vminvq_u32 (uint32x4_t __a)
+{
+ return vgetq_lane_u32 ((uint32x4_t)
+ __builtin_aarch64_reduc_umin_v4si ((int32x4_t) __a),
+ 0);
+}
+
+/* vminnmv */
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vminnmv_f32 (float32x2_t __a)
+{
+ return vget_lane_f32 (__builtin_aarch64_reduc_smin_v2sf (__a), 0);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vminnmvq_f32 (float32x4_t __a)
+{
+ return vgetq_lane_f32 (__builtin_aarch64_reduc_smin_v4sf (__a), 0);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vminnmvq_f64 (float64x2_t __a)
+{
+ return vgetq_lane_f64 (__builtin_aarch64_reduc_smin_v2df (__a), 0);
+}
+
+/* vmla */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
+{
+ return a + b * c;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
+{
+ return a + b * c;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmlaq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
+{
+ return a + b * c;
+}
+
+/* vmla_lane */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
+ float32x2_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vget_lane_f32 (__c, __lane)));
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_lane_s16 (int16x4_t __a, int16x4_t __b,
+ int16x4_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vget_lane_s16 (__c, __lane)));
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_lane_s32 (int32x2_t __a, int32x2_t __b,
+ int32x2_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vget_lane_s32 (__c, __lane)));
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_lane_u16 (uint16x4_t __a, uint16x4_t __b,
+ uint16x4_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vget_lane_u16 (__c, __lane)));
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_lane_u32 (uint32x2_t __a, uint32x2_t __b,
+ uint32x2_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vget_lane_u32 (__c, __lane)));
+}
+
+/* vmla_laneq */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_laneq_f32 (float32x2_t __a, float32x2_t __b,
+ float32x4_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vgetq_lane_f32 (__c, __lane)));
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_laneq_s16 (int16x4_t __a, int16x4_t __b,
+ int16x8_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vgetq_lane_s16 (__c, __lane)));
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_laneq_s32 (int32x2_t __a, int32x2_t __b,
+ int32x4_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vgetq_lane_s32 (__c, __lane)));
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_laneq_u16 (uint16x4_t __a, uint16x4_t __b,
+ uint16x8_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vgetq_lane_u16 (__c, __lane)));
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_laneq_u32 (uint32x2_t __a, uint32x2_t __b,
+ uint32x4_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vgetq_lane_u32 (__c, __lane)));
+}
+
+/* vmlaq_lane */
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
+ float32x2_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vget_lane_f32 (__c, __lane)));
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_lane_s16 (int16x8_t __a, int16x8_t __b,
+ int16x4_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vget_lane_s16 (__c, __lane)));
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_lane_s32 (int32x4_t __a, int32x4_t __b,
+ int32x2_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vget_lane_s32 (__c, __lane)));
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_lane_u16 (uint16x8_t __a, uint16x8_t __b,
+ uint16x4_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vget_lane_u16 (__c, __lane)));
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_lane_u32 (uint32x4_t __a, uint32x4_t __b,
+ uint32x2_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vget_lane_u32 (__c, __lane)));
+}
+
+ /* vmlaq_laneq */
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
+ float32x4_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vgetq_lane_f32 (__c, __lane)));
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_laneq_s16 (int16x8_t __a, int16x8_t __b,
+ int16x8_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vgetq_lane_s16 (__c, __lane)));
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_laneq_s32 (int32x4_t __a, int32x4_t __b,
+ int32x4_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vgetq_lane_s32 (__c, __lane)));
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_laneq_u16 (uint16x8_t __a, uint16x8_t __b,
+ uint16x8_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vgetq_lane_u16 (__c, __lane)));
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_laneq_u32 (uint32x4_t __a, uint32x4_t __b,
+ uint32x4_t __c, const int __lane)
+{
+ return (__a + (__b * __aarch64_vgetq_lane_u32 (__c, __lane)));
+}
+
+/* vmls */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
+{
+ return a - b * c;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
+{
+ return a - b * c;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmlsq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
+{
+ return a - b * c;
+}
+
+/* vmls_lane */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
+ float32x2_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vget_lane_f32 (__c, __lane)));
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_lane_s16 (int16x4_t __a, int16x4_t __b,
+ int16x4_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vget_lane_s16 (__c, __lane)));
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_lane_s32 (int32x2_t __a, int32x2_t __b,
+ int32x2_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vget_lane_s32 (__c, __lane)));
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_lane_u16 (uint16x4_t __a, uint16x4_t __b,
+ uint16x4_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vget_lane_u16 (__c, __lane)));
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_lane_u32 (uint32x2_t __a, uint32x2_t __b,
+ uint32x2_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vget_lane_u32 (__c, __lane)));
+}
+
+/* vmls_laneq */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_laneq_f32 (float32x2_t __a, float32x2_t __b,
+ float32x4_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vgetq_lane_f32 (__c, __lane)));
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_laneq_s16 (int16x4_t __a, int16x4_t __b,
+ int16x8_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vgetq_lane_s16 (__c, __lane)));
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_laneq_s32 (int32x2_t __a, int32x2_t __b,
+ int32x4_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vgetq_lane_s32 (__c, __lane)));
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_laneq_u16 (uint16x4_t __a, uint16x4_t __b,
+ uint16x8_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vgetq_lane_u16 (__c, __lane)));
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_laneq_u32 (uint32x2_t __a, uint32x2_t __b,
+ uint32x4_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vgetq_lane_u32 (__c, __lane)));
+}
+
+/* vmlsq_lane */
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
+ float32x2_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vget_lane_f32 (__c, __lane)));
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_lane_s16 (int16x8_t __a, int16x8_t __b,
+ int16x4_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vget_lane_s16 (__c, __lane)));
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_lane_s32 (int32x4_t __a, int32x4_t __b,
+ int32x2_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vget_lane_s32 (__c, __lane)));
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_lane_u16 (uint16x8_t __a, uint16x8_t __b,
+ uint16x4_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vget_lane_u16 (__c, __lane)));
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_lane_u32 (uint32x4_t __a, uint32x4_t __b,
+ uint32x2_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vget_lane_u32 (__c, __lane)));
+}
+
+ /* vmlsq_laneq */
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
+ float32x4_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vgetq_lane_f32 (__c, __lane)));
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_laneq_s16 (int16x8_t __a, int16x8_t __b,
+ int16x8_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vgetq_lane_s16 (__c, __lane)));
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_laneq_s32 (int32x4_t __a, int32x4_t __b,
+ int32x4_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vgetq_lane_s32 (__c, __lane)));
+}
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_laneq_u16 (uint16x8_t __a, uint16x8_t __b,
+ uint16x8_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vgetq_lane_u16 (__c, __lane)));
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_laneq_u32 (uint32x4_t __a, uint32x4_t __b,
+ uint32x4_t __c, const int __lane)
+{
+ return (__a - (__b * __aarch64_vgetq_lane_u32 (__c, __lane)));
+}
+
+/* vmov_n_ */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmov_n_f32 (float32_t __a)
+{
+ return vdup_n_f32 (__a);
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vmov_n_f64 (float64_t __a)
+{
+ return __a;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmov_n_p8 (poly8_t __a)
+{
+ return vdup_n_p8 (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vmov_n_p16 (poly16_t __a)
+{
+ return vdup_n_p16 (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmov_n_s8 (int8_t __a)
+{
+ return vdup_n_s8 (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmov_n_s16 (int16_t __a)
+{
+ return vdup_n_s16 (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmov_n_s32 (int32_t __a)
+{
+ return vdup_n_s32 (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vmov_n_s64 (int64_t __a)
+{
+ return __a;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmov_n_u8 (uint8_t __a)
+{
+ return vdup_n_u8 (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmov_n_u16 (uint16_t __a)
+{
+ return vdup_n_u16 (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmov_n_u32 (uint32_t __a)
+{
+ return vdup_n_u32 (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vmov_n_u64 (uint64_t __a)
+{
+ return __a;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmovq_n_f32 (float32_t __a)
+{
+ return vdupq_n_f32 (__a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmovq_n_f64 (float64_t __a)
+{
+ return vdupq_n_f64 (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmovq_n_p8 (poly8_t __a)
+{
+ return vdupq_n_p8 (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmovq_n_p16 (poly16_t __a)
+{
+ return vdupq_n_p16 (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmovq_n_s8 (int8_t __a)
+{
+ return vdupq_n_s8 (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovq_n_s16 (int16_t __a)
+{
+ return vdupq_n_s16 (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovq_n_s32 (int32_t __a)
+{
+ return vdupq_n_s32 (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovq_n_s64 (int64_t __a)
+{
+ return vdupq_n_s64 (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmovq_n_u8 (uint8_t __a)
+{
+ return vdupq_n_u8 (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovq_n_u16 (uint16_t __a)
+{
+ return vdupq_n_u16 (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovq_n_u32 (uint32_t __a)
+{
+ return vdupq_n_u32 (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovq_n_u64 (uint64_t __a)
+{
+ return vdupq_n_u64 (__a);
+}
+
+/* vmul_lane */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_lane_f32 (float32x2_t __a, float32x2_t __b, const int __lane)
+{
+ return __a * __aarch64_vget_lane_f32 (__b, __lane);
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vmul_lane_f64 (float64x1_t __a, float64x1_t __b, const int __lane)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_lane_s16 (int16x4_t __a, int16x4_t __b, const int __lane)
+{
+ return __a * __aarch64_vget_lane_s16 (__b, __lane);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_lane_s32 (int32x2_t __a, int32x2_t __b, const int __lane)
+{
+ return __a * __aarch64_vget_lane_s32 (__b, __lane);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __lane)
+{
+ return __a * __aarch64_vget_lane_u16 (__b, __lane);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __lane)
+{
+ return __a * __aarch64_vget_lane_u32 (__b, __lane);
+}
+
+/* vmul_laneq */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_laneq_f32 (float32x2_t __a, float32x4_t __b, const int __lane)
+{
+ return __a * __aarch64_vgetq_lane_f32 (__b, __lane);
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vmul_laneq_f64 (float64x1_t __a, float64x2_t __b, const int __lane)
+{
+ return __a * __aarch64_vgetq_lane_f64 (__b, __lane);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __lane)
+{
+ return __a * __aarch64_vgetq_lane_s16 (__b, __lane);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __lane)
+{
+ return __a * __aarch64_vgetq_lane_s32 (__b, __lane);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_laneq_u16 (uint16x4_t __a, uint16x8_t __b, const int __lane)
+{
+ return __a * __aarch64_vgetq_lane_u16 (__b, __lane);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_laneq_u32 (uint32x2_t __a, uint32x4_t __b, const int __lane)
+{
+ return __a * __aarch64_vgetq_lane_u32 (__b, __lane);
+}
+
+/* vmulq_lane */
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __lane)
+{
+ return __a * __aarch64_vget_lane_f32 (__b, __lane);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmulq_lane_f64 (float64x2_t __a, float64x1_t __b, const int __lane)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __lane)
+{
+ return __a * __aarch64_vget_lane_s16 (__b, __lane);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __lane)
+{
+ return __a * __aarch64_vget_lane_s32 (__b, __lane);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_lane_u16 (uint16x8_t __a, uint16x4_t __b, const int __lane)
+{
+ return __a * __aarch64_vget_lane_u16 (__b, __lane);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __lane)
+{
+ return __a * __aarch64_vget_lane_u32 (__b, __lane);
+}
+
+/* vmulq_laneq */
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_laneq_f32 (float32x4_t __a, float32x4_t __b, const int __lane)
+{
+ return __a * __aarch64_vgetq_lane_f32 (__b, __lane);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmulq_laneq_f64 (float64x2_t __a, float64x2_t __b, const int __lane)
+{
+ return __a * __aarch64_vgetq_lane_f64 (__b, __lane);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __lane)
+{
+ return __a * __aarch64_vgetq_lane_s16 (__b, __lane);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __lane)
+{
+ return __a * __aarch64_vgetq_lane_s32 (__b, __lane);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_laneq_u16 (uint16x8_t __a, uint16x8_t __b, const int __lane)
+{
+ return __a * __aarch64_vgetq_lane_u16 (__b, __lane);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_laneq_u32 (uint32x4_t __a, uint32x4_t __b, const int __lane)
+{
+ return __a * __aarch64_vgetq_lane_u32 (__b, __lane);
+}
+
+/* vneg */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vneg_f32 (float32x2_t __a)
+{
+ return -__a;
+}
+
+__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+vneg_f64 (float64x1_t __a)
+{
+ return -__a;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vneg_s8 (int8x8_t __a)
+{
+ return -__a;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vneg_s16 (int16x4_t __a)
+{
+ return -__a;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vneg_s32 (int32x2_t __a)
+{
+ return -__a;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vneg_s64 (int64x1_t __a)
+{
+ return -__a;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vnegq_f32 (float32x4_t __a)
+{
+ return -__a;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vnegq_f64 (float64x2_t __a)
+{
+ return -__a;
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vnegq_s8 (int8x16_t __a)
+{
+ return -__a;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vnegq_s16 (int16x8_t __a)
+{
+ return -__a;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vnegq_s32 (int32x4_t __a)
+{
+ return -__a;
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vnegq_s64 (int64x2_t __a)
+{
+ return -__a;
+}
+
+/* vqabs */
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqabsq_s64 (int64x2_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_sqabsv2di (__a);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqabsb_s8 (int8x1_t __a)
+{
+ return (int8x1_t) __builtin_aarch64_sqabsqi (__a);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqabsh_s16 (int16x1_t __a)
+{
+ return (int16x1_t) __builtin_aarch64_sqabshi (__a);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqabss_s32 (int32x1_t __a)
+{
+ return (int32x1_t) __builtin_aarch64_sqabssi (__a);
+}
+
+/* vqadd */
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqaddb_s8 (int8x1_t __a, int8x1_t __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqaddqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqaddh_s16 (int16x1_t __a, int16x1_t __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqaddhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqadds_s32 (int32x1_t __a, int32x1_t __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqaddsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqaddd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqadddi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqaddb_u8 (uint8x1_t __a, uint8x1_t __b)
+{
+ return (uint8x1_t) __builtin_aarch64_uqaddqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqaddh_u16 (uint16x1_t __a, uint16x1_t __b)
+{
+ return (uint16x1_t) __builtin_aarch64_uqaddhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqadds_u32 (uint32x1_t __a, uint32x1_t __b)
+{
+ return (uint32x1_t) __builtin_aarch64_uqaddsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqaddd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqadddi (__a, __b);
+}
+
+/* vqdmlal */
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return __builtin_aarch64_sqdmlalv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_aarch64_sqdmlal2v8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_high_lane_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlal2_lanev8hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_high_laneq_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlal2_laneqv8hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
+{
+ return __builtin_aarch64_sqdmlal2_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d)
+{
+ int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (__AARCH64_INT64_C (0)));
+ return __builtin_aarch64_sqdmlal_lanev4hi (__a, __b, __tmp, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
+{
+ return __builtin_aarch64_sqdmlal_laneqv4hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return __builtin_aarch64_sqdmlal_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return __builtin_aarch64_sqdmlalv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_aarch64_sqdmlal2v4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlal2_lanev4si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlal2_laneqv4si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
+{
+ return __builtin_aarch64_sqdmlal2_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d)
+{
+ int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (__AARCH64_INT64_C (0)));
+ return __builtin_aarch64_sqdmlal_lanev2si (__a, __b, __tmp, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
+{
+ return __builtin_aarch64_sqdmlal_laneqv2si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return __builtin_aarch64_sqdmlal_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmlalh_s16 (int32x1_t __a, int16x1_t __b, int16x1_t __c)
+{
+ return __builtin_aarch64_sqdmlalhi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmlalh_lane_s16 (int32x1_t __a, int16x1_t __b, int16x8_t __c, const int __d)
+{
+ return __builtin_aarch64_sqdmlal_lanehi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmlals_s32 (int64x1_t __a, int32x1_t __b, int32x1_t __c)
+{
+ return __builtin_aarch64_sqdmlalsi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmlals_lane_s32 (int64x1_t __a, int32x1_t __b, int32x4_t __c, const int __d)
+{
+ return __builtin_aarch64_sqdmlal_lanesi (__a, __b, __c, __d);
+}
+
+/* vqdmlsl */
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return __builtin_aarch64_sqdmlslv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_aarch64_sqdmlsl2v8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_high_lane_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlsl2_lanev8hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_high_laneq_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlsl2_laneqv8hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
+{
+ return __builtin_aarch64_sqdmlsl2_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d)
+{
+ int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (__AARCH64_INT64_C (0)));
+ return __builtin_aarch64_sqdmlsl_lanev4hi (__a, __b, __tmp, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
+{
+ return __builtin_aarch64_sqdmlsl_laneqv4hi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return __builtin_aarch64_sqdmlsl_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return __builtin_aarch64_sqdmlslv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_aarch64_sqdmlsl2v4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlsl2_lanev4si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
+ int const __d)
+{
+ return __builtin_aarch64_sqdmlsl2_laneqv4si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
+{
+ return __builtin_aarch64_sqdmlsl2_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d)
+{
+ int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (__AARCH64_INT64_C (0)));
+ return __builtin_aarch64_sqdmlsl_lanev2si (__a, __b, __tmp, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
+{
+ return __builtin_aarch64_sqdmlsl_laneqv2si (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return __builtin_aarch64_sqdmlsl_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmlslh_s16 (int32x1_t __a, int16x1_t __b, int16x1_t __c)
+{
+ return __builtin_aarch64_sqdmlslhi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmlslh_lane_s16 (int32x1_t __a, int16x1_t __b, int16x8_t __c, const int __d)
+{
+ return __builtin_aarch64_sqdmlsl_lanehi (__a, __b, __c, __d);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmlsls_s32 (int64x1_t __a, int32x1_t __b, int32x1_t __c)
+{
+ return __builtin_aarch64_sqdmlslsi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmlsls_lane_s32 (int64x1_t __a, int32x1_t __b, int32x4_t __c, const int __d)
+{
+ return __builtin_aarch64_sqdmlsl_lanesi (__a, __b, __c, __d);
+}
+
+/* vqdmulh */
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanev4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanev2si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanev8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanev4si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqdmulhh_s16 (int16x1_t __a, int16x1_t __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqdmulhhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqdmulhh_lane_s16 (int16x1_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanehi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmulhs_s32 (int32x1_t __a, int32x1_t __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqdmulhsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmulhs_lane_s32 (int32x1_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmulh_lanesi (__a, __b, __c);
+}
+
+/* vqdmull */
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __builtin_aarch64_sqdmullv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_high_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_aarch64_sqdmull2v8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_high_lane_s16 (int16x8_t __a, int16x8_t __b, int const __c)
+{
+ return __builtin_aarch64_sqdmull2_lanev8hi (__a, __b,__c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_high_laneq_s16 (int16x8_t __a, int16x8_t __b, int const __c)
+{
+ return __builtin_aarch64_sqdmull2_laneqv8hi (__a, __b,__c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_high_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_aarch64_sqdmull2_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, int const __c)
+{
+ int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (__AARCH64_INT64_C (0)));
+ return __builtin_aarch64_sqdmull_lanev4hi (__a, __tmp, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_laneq_s16 (int16x4_t __a, int16x8_t __b, int const __c)
+{
+ return __builtin_aarch64_sqdmull_laneqv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return __builtin_aarch64_sqdmull_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __builtin_aarch64_sqdmullv2si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_high_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_aarch64_sqdmull2v4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_high_lane_s32 (int32x4_t __a, int32x4_t __b, int const __c)
+{
+ return __builtin_aarch64_sqdmull2_lanev4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_high_laneq_s32 (int32x4_t __a, int32x4_t __b, int const __c)
+{
+ return __builtin_aarch64_sqdmull2_laneqv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_high_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_aarch64_sqdmull2_nv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, int const __c)
+{
+ int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (__AARCH64_INT64_C (0)));
+ return __builtin_aarch64_sqdmull_lanev2si (__a, __tmp, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_laneq_s32 (int32x2_t __a, int32x4_t __b, int const __c)
+{
+ return __builtin_aarch64_sqdmull_laneqv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return __builtin_aarch64_sqdmull_nv2si (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmullh_s16 (int16x1_t __a, int16x1_t __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqdmullhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqdmullh_lane_s16 (int16x1_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmull_lanehi (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmulls_s32 (int32x1_t __a, int32x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqdmullsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqdmulls_lane_s32 (int32x1_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqdmull_lanesi (__a, __b, __c);
+}
+
+/* vqmovn */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqmovn_s16 (int16x8_t __a)
+{
+ return (int8x8_t) __builtin_aarch64_sqmovnv8hi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqmovn_s32 (int32x4_t __a)
+{
+ return (int16x4_t) __builtin_aarch64_sqmovnv4si (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqmovn_s64 (int64x2_t __a)
+{
+ return (int32x2_t) __builtin_aarch64_sqmovnv2di (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqmovn_u16 (uint16x8_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_uqmovnv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqmovn_u32 (uint32x4_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_uqmovnv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqmovn_u64 (uint64x2_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_uqmovnv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqmovnh_s16 (int16x1_t __a)
+{
+ return (int8x1_t) __builtin_aarch64_sqmovnhi (__a);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqmovns_s32 (int32x1_t __a)
+{
+ return (int16x1_t) __builtin_aarch64_sqmovnsi (__a);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqmovnd_s64 (int64x1_t __a)
+{
+ return (int32x1_t) __builtin_aarch64_sqmovndi (__a);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqmovnh_u16 (uint16x1_t __a)
+{
+ return (uint8x1_t) __builtin_aarch64_uqmovnhi (__a);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqmovns_u32 (uint32x1_t __a)
+{
+ return (uint16x1_t) __builtin_aarch64_uqmovnsi (__a);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqmovnd_u64 (uint64x1_t __a)
+{
+ return (uint32x1_t) __builtin_aarch64_uqmovndi (__a);
+}
+
+/* vqmovun */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqmovun_s16 (int16x8_t __a)
+{
+ return (uint8x8_t) __builtin_aarch64_sqmovunv8hi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqmovun_s32 (int32x4_t __a)
+{
+ return (uint16x4_t) __builtin_aarch64_sqmovunv4si (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqmovun_s64 (int64x2_t __a)
+{
+ return (uint32x2_t) __builtin_aarch64_sqmovunv2di (__a);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqmovunh_s16 (int16x1_t __a)
+{
+ return (int8x1_t) __builtin_aarch64_sqmovunhi (__a);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqmovuns_s32 (int32x1_t __a)
+{
+ return (int16x1_t) __builtin_aarch64_sqmovunsi (__a);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqmovund_s64 (int64x1_t __a)
+{
+ return (int32x1_t) __builtin_aarch64_sqmovundi (__a);
+}
+
+/* vqneg */
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqnegq_s64 (int64x2_t __a)
+{
+ return (int64x2_t) __builtin_aarch64_sqnegv2di (__a);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqnegb_s8 (int8x1_t __a)
+{
+ return (int8x1_t) __builtin_aarch64_sqnegqi (__a);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqnegh_s16 (int16x1_t __a)
+{
+ return (int16x1_t) __builtin_aarch64_sqneghi (__a);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqnegs_s32 (int32x1_t __a)
+{
+ return (int32x1_t) __builtin_aarch64_sqnegsi (__a);
+}
+
+/* vqrdmulh */
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanev4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanev2si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanev8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanev4si (__a, __b, __c);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrdmulhh_s16 (int16x1_t __a, int16x1_t __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqrdmulhhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrdmulhh_lane_s16 (int16x1_t __a, int16x8_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanehi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrdmulhs_s32 (int32x1_t __a, int32x1_t __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqrdmulhsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrdmulhs_lane_s32 (int32x1_t __a, int32x4_t __b, const int __c)
+{
+ return __builtin_aarch64_sqrdmulh_lanesi (__a, __b, __c);
+}
+
+/* vqrshl */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqrshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __builtin_aarch64_sqrshlv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __builtin_aarch64_sqrshlv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __builtin_aarch64_sqrshlv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqrshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __builtin_aarch64_sqrshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uqrshlv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uqrshlv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uqrshlv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqrshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqrshldi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_aarch64_sqrshlv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_aarch64_sqrshlv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_aarch64_sqrshlv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqrshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __builtin_aarch64_sqrshlv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_uqrshlv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uqrshlv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uqrshlv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqrshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uqrshlv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqrshlb_s8 (int8x1_t __a, int8x1_t __b)
+{
+ return __builtin_aarch64_sqrshlqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrshlh_s16 (int16x1_t __a, int16x1_t __b)
+{
+ return __builtin_aarch64_sqrshlhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrshls_s32 (int32x1_t __a, int32x1_t __b)
+{
+ return __builtin_aarch64_sqrshlsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqrshld_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __builtin_aarch64_sqrshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqrshlb_u8 (uint8x1_t __a, uint8x1_t __b)
+{
+ return (uint8x1_t) __builtin_aarch64_uqrshlqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqrshlh_u16 (uint16x1_t __a, uint16x1_t __b)
+{
+ return (uint16x1_t) __builtin_aarch64_uqrshlhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqrshls_u32 (uint32x1_t __a, uint32x1_t __b)
+{
+ return (uint32x1_t) __builtin_aarch64_uqrshlsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqrshld_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqrshldi (__a, __b);
+}
+
+/* vqrshrn */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqrshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t) __builtin_aarch64_sqrshrn_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t) __builtin_aarch64_sqrshrn_nv4si (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t) __builtin_aarch64_sqrshrn_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uqrshrn_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uqrshrn_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uqrshrn_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqrshrnh_n_s16 (int16x1_t __a, const int __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqrshrn_nhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrshrns_n_s32 (int32x1_t __a, const int __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqrshrn_nsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrshrnd_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqrshrn_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqrshrnh_n_u16 (uint16x1_t __a, const int __b)
+{
+ return (uint8x1_t) __builtin_aarch64_uqrshrn_nhi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqrshrns_n_u32 (uint32x1_t __a, const int __b)
+{
+ return (uint16x1_t) __builtin_aarch64_uqrshrn_nsi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqrshrnd_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint32x1_t) __builtin_aarch64_uqrshrn_ndi (__a, __b);
+}
+
+/* vqrshrun */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshrun_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_sqrshrun_nv8hi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshrun_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_sqrshrun_nv4si (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshrun_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_sqrshrun_nv2di (__a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqrshrunh_n_s16 (int16x1_t __a, const int __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqrshrun_nhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqrshruns_n_s32 (int32x1_t __a, const int __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqrshrun_nsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqrshrund_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqrshrun_ndi (__a, __b);
+}
+
+/* vqshl */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __builtin_aarch64_sqshlv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __builtin_aarch64_sqshlv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __builtin_aarch64_sqshlv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __builtin_aarch64_sqshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uqshlv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uqshlv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uqshlv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqshldi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_aarch64_sqshlv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_aarch64_sqshlv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_aarch64_sqshlv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __builtin_aarch64_sqshlv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_uqshlv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uqshlv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uqshlv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uqshlv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshlb_s8 (int8x1_t __a, int8x1_t __b)
+{
+ return __builtin_aarch64_sqshlqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshlh_s16 (int16x1_t __a, int16x1_t __b)
+{
+ return __builtin_aarch64_sqshlhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshls_s32 (int32x1_t __a, int32x1_t __b)
+{
+ return __builtin_aarch64_sqshlsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshld_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __builtin_aarch64_sqshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqshlb_u8 (uint8x1_t __a, uint8x1_t __b)
+{
+ return (uint8x1_t) __builtin_aarch64_uqshlqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqshlh_u16 (uint16x1_t __a, uint16x1_t __b)
+{
+ return (uint16x1_t) __builtin_aarch64_uqshlhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqshls_u32 (uint32x1_t __a, uint32x1_t __b)
+{
+ return (uint32x1_t) __builtin_aarch64_uqshlsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshld_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqshldi (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshl_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t) __builtin_aarch64_sqshl_nv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshl_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t) __builtin_aarch64_sqshl_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshl_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t) __builtin_aarch64_sqshl_nv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshl_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqshl_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshl_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uqshl_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshl_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uqshl_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshl_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uqshl_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshl_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqshl_ndi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqshlq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t) __builtin_aarch64_sqshl_nv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqshlq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t) __builtin_aarch64_sqshl_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqshlq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t) __builtin_aarch64_sqshl_nv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqshlq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t) __builtin_aarch64_sqshl_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshlq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t) __builtin_aarch64_uqshl_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshlq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t) __builtin_aarch64_uqshl_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshlq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t) __builtin_aarch64_uqshl_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshlq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t) __builtin_aarch64_uqshl_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshlb_n_s8 (int8x1_t __a, const int __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqshl_nqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshlh_n_s16 (int16x1_t __a, const int __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqshl_nhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshls_n_s32 (int32x1_t __a, const int __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqshl_nsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshld_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqshl_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqshlb_n_u8 (uint8x1_t __a, const int __b)
+{
+ return (uint8x1_t) __builtin_aarch64_uqshl_nqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqshlh_n_u16 (uint16x1_t __a, const int __b)
+{
+ return (uint16x1_t) __builtin_aarch64_uqshl_nhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqshls_n_u32 (uint32x1_t __a, const int __b)
+{
+ return (uint32x1_t) __builtin_aarch64_uqshl_nsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshld_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqshl_ndi (__a, __b);
+}
+
+/* vqshlu */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshlu_n_s8 (int8x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_sqshlu_nv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshlu_n_s16 (int16x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_sqshlu_nv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshlu_n_s32 (int32x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_sqshlu_nv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshlu_n_s64 (int64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_sqshlu_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshluq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (uint8x16_t) __builtin_aarch64_sqshlu_nv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshluq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint16x8_t) __builtin_aarch64_sqshlu_nv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshluq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint32x4_t) __builtin_aarch64_sqshlu_nv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshluq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint64x2_t) __builtin_aarch64_sqshlu_nv2di (__a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshlub_n_s8 (int8x1_t __a, const int __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqshlu_nqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshluh_n_s16 (int16x1_t __a, const int __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqshlu_nhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshlus_n_s32 (int32x1_t __a, const int __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqshlu_nsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshlud_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqshlu_ndi (__a, __b);
+}
+
+/* vqshrn */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t) __builtin_aarch64_sqshrn_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t) __builtin_aarch64_sqshrn_nv4si (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t) __builtin_aarch64_sqshrn_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_uqshrn_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_uqshrn_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_uqshrn_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshrnh_n_s16 (int16x1_t __a, const int __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqshrn_nhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshrns_n_s32 (int32x1_t __a, const int __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqshrn_nsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshrnd_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqshrn_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqshrnh_n_u16 (uint16x1_t __a, const int __b)
+{
+ return (uint8x1_t) __builtin_aarch64_uqshrn_nhi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqshrns_n_u32 (uint32x1_t __a, const int __b)
+{
+ return (uint16x1_t) __builtin_aarch64_uqshrn_nsi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqshrnd_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint32x1_t) __builtin_aarch64_uqshrn_ndi (__a, __b);
+}
+
+/* vqshrun */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshrun_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_sqshrun_nv8hi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshrun_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_sqshrun_nv4si (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshrun_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_sqshrun_nv2di (__a, __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqshrunh_n_s16 (int16x1_t __a, const int __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqshrun_nhi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqshruns_n_s32 (int32x1_t __a, const int __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqshrun_nsi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqshrund_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqshrun_ndi (__a, __b);
+}
+
+/* vqsub */
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vqsubb_s8 (int8x1_t __a, int8x1_t __b)
+{
+ return (int8x1_t) __builtin_aarch64_sqsubqi (__a, __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vqsubh_s16 (int16x1_t __a, int16x1_t __b)
+{
+ return (int16x1_t) __builtin_aarch64_sqsubhi (__a, __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vqsubs_s32 (int32x1_t __a, int32x1_t __b)
+{
+ return (int32x1_t) __builtin_aarch64_sqsubsi (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqsubd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_sqsubdi (__a, __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vqsubb_u8 (uint8x1_t __a, uint8x1_t __b)
+{
+ return (uint8x1_t) __builtin_aarch64_uqsubqi (__a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vqsubh_u16 (uint16x1_t __a, uint16x1_t __b)
+{
+ return (uint16x1_t) __builtin_aarch64_uqsubhi (__a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vqsubs_u32 (uint32x1_t __a, uint32x1_t __b)
+{
+ return (uint32x1_t) __builtin_aarch64_uqsubsi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqsubd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_uqsubdi (__a, __b);
+}
+
+/* vrecpe */
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrecpes_f32 (float32_t __a)
+{
+ return __builtin_aarch64_frecpesf (__a);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrecped_f64 (float64_t __a)
+{
+ return __builtin_aarch64_frecpedf (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrecpe_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_frecpev2sf (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrecpeq_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_frecpev4sf (__a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrecpeq_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_frecpev2df (__a);
+}
+
+/* vrecps */
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrecpss_f32 (float32_t __a, float32_t __b)
+{
+ return __builtin_aarch64_frecpssf (__a, __b);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrecpsd_f64 (float64_t __a, float64_t __b)
+{
+ return __builtin_aarch64_frecpsdf (__a, __b);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrecps_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_aarch64_frecpsv2sf (__a, __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrecpsq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_aarch64_frecpsv4sf (__a, __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrecpsq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __builtin_aarch64_frecpsv2df (__a, __b);
+}
+
+/* vrecpx */
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vrecpxs_f32 (float32_t __a)
+{
+ return __builtin_aarch64_frecpxsf (__a);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vrecpxd_f64 (float64_t __a)
+{
+ return __builtin_aarch64_frecpxdf (__a);
+}
+
+/* vrnd */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrnd_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_btruncv2sf (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndq_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_btruncv4sf (__a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndq_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_btruncv2df (__a);
+}
+
+/* vrnda */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrnda_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_roundv2sf (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndaq_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_roundv4sf (__a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndaq_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_roundv2df (__a);
+}
+
+/* vrndi */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndi_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_nearbyintv2sf (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndiq_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_nearbyintv4sf (__a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndiq_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_nearbyintv2df (__a);
+}
+
+/* vrndm */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndm_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_floorv2sf (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndmq_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_floorv4sf (__a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndmq_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_floorv2df (__a);
+}
+
+/* vrndn */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndn_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_frintnv2sf (__a);
+}
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndnq_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_frintnv4sf (__a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndnq_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_frintnv2df (__a);
+}
+
+/* vrndp */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndp_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_ceilv2sf (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndpq_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_ceilv4sf (__a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndpq_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_ceilv2df (__a);
+}
+
+/* vrndx */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndx_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_rintv2sf (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndxq_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_rintv4sf (__a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vrndxq_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_rintv2df (__a);
+}
+
+/* vrshl */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_srshlv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_srshlv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_srshlv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_srshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_urshlv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_urshlv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_urshlv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_urshldi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_srshlv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_srshlv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_srshlv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_srshlv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_urshlv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_urshlv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_urshlv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_urshlv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshld_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_srshldi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshld_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_urshldi (__a, __b);
+}
+
+/* vrshr */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshr_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t) __builtin_aarch64_srshr_nv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshr_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t) __builtin_aarch64_srshr_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshr_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t) __builtin_aarch64_srshr_nv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshr_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_srshr_ndi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshr_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_urshr_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshr_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_urshr_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshr_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_urshr_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshr_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_urshr_ndi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrshrq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t) __builtin_aarch64_srshr_nv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrshrq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t) __builtin_aarch64_srshr_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrshrq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t) __builtin_aarch64_srshr_nv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrshrq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t) __builtin_aarch64_srshr_nv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrshrq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t) __builtin_aarch64_urshr_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrshrq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t) __builtin_aarch64_urshr_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrshrq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t) __builtin_aarch64_urshr_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrshrq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t) __builtin_aarch64_urshr_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshrd_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_srshr_ndi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshrd_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_urshr_ndi (__a, __b);
+}
+
+/* vrsra */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t) __builtin_aarch64_srsra_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t) __builtin_aarch64_srsra_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t) __builtin_aarch64_srsra_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_srsra_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t) __builtin_aarch64_ursra_nv8qi ((int8x8_t) __a,
+ (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t) __builtin_aarch64_ursra_nv4hi ((int16x4_t) __a,
+ (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t) __builtin_aarch64_ursra_nv2si ((int32x2_t) __a,
+ (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_ursra_ndi ((int64x1_t) __a,
+ (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t) __builtin_aarch64_srsra_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t) __builtin_aarch64_srsra_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t) __builtin_aarch64_srsra_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t) __builtin_aarch64_srsra_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t) __builtin_aarch64_ursra_nv16qi ((int8x16_t) __a,
+ (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t) __builtin_aarch64_ursra_nv8hi ((int16x8_t) __a,
+ (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t) __builtin_aarch64_ursra_nv4si ((int32x4_t) __a,
+ (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t) __builtin_aarch64_ursra_nv2di ((int64x2_t) __a,
+ (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrsrad_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_srsra_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrsrad_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_ursra_ndi (__a, __b, __c);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+
+/* vsha1 */
+
+static __inline uint32x4_t
+vsha1cq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
+{
+ return __builtin_aarch64_crypto_sha1cv4si_uuuu (hash_abcd, hash_e, wk);
+}
+static __inline uint32x4_t
+vsha1mq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
+{
+ return __builtin_aarch64_crypto_sha1mv4si_uuuu (hash_abcd, hash_e, wk);
+}
+static __inline uint32x4_t
+vsha1pq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
+{
+ return __builtin_aarch64_crypto_sha1pv4si_uuuu (hash_abcd, hash_e, wk);
+}
+
+static __inline uint32_t
+vsha1h_u32 (uint32_t hash_e)
+{
+ return __builtin_aarch64_crypto_sha1hsi_uu (hash_e);
+}
+
+static __inline uint32x4_t
+vsha1su0q_u32 (uint32x4_t w0_3, uint32x4_t w4_7, uint32x4_t w8_11)
+{
+ return __builtin_aarch64_crypto_sha1su0v4si_uuuu (w0_3, w4_7, w8_11);
+}
+
+static __inline uint32x4_t
+vsha1su1q_u32 (uint32x4_t tw0_3, uint32x4_t w12_15)
+{
+ return __builtin_aarch64_crypto_sha1su1v4si_uuu (tw0_3, w12_15);
+}
+
+static __inline uint32x4_t
+vsha256hq_u32 (uint32x4_t hash_abcd, uint32x4_t hash_efgh, uint32x4_t wk)
+{
+ return __builtin_aarch64_crypto_sha256hv4si_uuuu (hash_abcd, hash_efgh, wk);
+}
+
+static __inline uint32x4_t
+vsha256h2q_u32 (uint32x4_t hash_efgh, uint32x4_t hash_abcd, uint32x4_t wk)
+{
+ return __builtin_aarch64_crypto_sha256h2v4si_uuuu (hash_efgh, hash_abcd, wk);
+}
+
+static __inline uint32x4_t
+vsha256su0q_u32 (uint32x4_t w0_3, uint32x4_t w4_7)
+{
+ return __builtin_aarch64_crypto_sha256su0v4si_uuu (w0_3, w4_7);
+}
+
+static __inline uint32x4_t
+vsha256su1q_u32 (uint32x4_t tw0_3, uint32x4_t w8_11, uint32x4_t w12_15)
+{
+ return __builtin_aarch64_crypto_sha256su1v4si_uuuu (tw0_3, w8_11, w12_15);
+}
+
+static __inline poly128_t
+vmull_p64 (poly64_t a, poly64_t b)
+{
+ return
+ __builtin_aarch64_crypto_pmulldi_ppp (a, b);
+}
+
+static __inline poly128_t
+vmull_high_p64 (poly64x2_t a, poly64x2_t b)
+{
+ return __builtin_aarch64_crypto_pmullv2di_ppp (a, b);
+}
+
+#endif
+
+/* vshl */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshl_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t) __builtin_aarch64_ashlv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshl_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t) __builtin_aarch64_ashlv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshl_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t) __builtin_aarch64_ashlv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshl_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_ashldi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshl_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_ashlv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshl_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_ashlv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshl_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_ashlv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshl_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_ashldi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshlq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t) __builtin_aarch64_ashlv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshlq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t) __builtin_aarch64_ashlv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshlq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t) __builtin_aarch64_ashlv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshlq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t) __builtin_aarch64_ashlv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshlq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t) __builtin_aarch64_ashlv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshlq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t) __builtin_aarch64_ashlv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshlq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t) __builtin_aarch64_ashlv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshlq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t) __builtin_aarch64_ashlv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshld_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_ashldi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshld_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t) __builtin_aarch64_ashldi (__a, __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_sshlv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_sshlv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_sshlv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_sshldi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_ushlv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_ushlv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_ushlv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_ushldi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_sshlv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_sshlv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_sshlv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_sshlv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_ushlv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_ushlv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_ushlv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_ushlv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshld_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_sshldi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshld_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_ushldi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshll_high_n_s8 (int8x16_t __a, const int __b)
+{
+ return __builtin_aarch64_sshll2_nv16qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshll_high_n_s16 (int16x8_t __a, const int __b)
+{
+ return __builtin_aarch64_sshll2_nv8hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshll_high_n_s32 (int32x4_t __a, const int __b)
+{
+ return __builtin_aarch64_sshll2_nv4si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshll_high_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint16x8_t) __builtin_aarch64_ushll2_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshll_high_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint32x4_t) __builtin_aarch64_ushll2_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshll_high_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint64x2_t) __builtin_aarch64_ushll2_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshll_n_s8 (int8x8_t __a, const int __b)
+{
+ return __builtin_aarch64_sshll_nv8qi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshll_n_s16 (int16x4_t __a, const int __b)
+{
+ return __builtin_aarch64_sshll_nv4hi (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshll_n_s32 (int32x2_t __a, const int __b)
+{
+ return __builtin_aarch64_sshll_nv2si (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshll_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint16x8_t) __builtin_aarch64_ushll_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshll_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint32x4_t) __builtin_aarch64_ushll_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshll_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint64x2_t) __builtin_aarch64_ushll_nv2si ((int32x2_t) __a, __b);
+}
+
+/* vshr */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshr_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t) __builtin_aarch64_ashrv8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshr_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t) __builtin_aarch64_ashrv4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshr_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t) __builtin_aarch64_ashrv2si (__a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshr_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_ashr_simddi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshr_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t) __builtin_aarch64_lshrv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshr_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t) __builtin_aarch64_lshrv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshr_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t) __builtin_aarch64_lshrv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshr_n_u64 (uint64x1_t __a, const int __b)
+{
+ return __builtin_aarch64_lshr_simddi_uus ( __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshrq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t) __builtin_aarch64_ashrv16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshrq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t) __builtin_aarch64_ashrv8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshrq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t) __builtin_aarch64_ashrv4si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshrq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t) __builtin_aarch64_ashrv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshrq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t) __builtin_aarch64_lshrv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshrq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t) __builtin_aarch64_lshrv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshrq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t) __builtin_aarch64_lshrv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshrq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t) __builtin_aarch64_lshrv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshrd_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t) __builtin_aarch64_ashr_simddi (__a, __b);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vshrd_n_u64 (uint64_t __a, const int __b)
+{
+ return __builtin_aarch64_lshr_simddi_uus (__a, __b);
+}
+
+/* vsli */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsli_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t) __builtin_aarch64_ssli_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsli_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t) __builtin_aarch64_ssli_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsli_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t) __builtin_aarch64_ssli_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsli_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_ssli_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsli_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t) __builtin_aarch64_usli_nv8qi ((int8x8_t) __a,
+ (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsli_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t) __builtin_aarch64_usli_nv4hi ((int16x4_t) __a,
+ (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsli_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t) __builtin_aarch64_usli_nv2si ((int32x2_t) __a,
+ (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsli_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_usli_ndi ((int64x1_t) __a,
+ (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t) __builtin_aarch64_ssli_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t) __builtin_aarch64_ssli_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t) __builtin_aarch64_ssli_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsliq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t) __builtin_aarch64_ssli_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t) __builtin_aarch64_usli_nv16qi ((int8x16_t) __a,
+ (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t) __builtin_aarch64_usli_nv8hi ((int16x8_t) __a,
+ (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t) __builtin_aarch64_usli_nv4si ((int32x4_t) __a,
+ (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsliq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t) __builtin_aarch64_usli_nv2di ((int64x2_t) __a,
+ (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vslid_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_ssli_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vslid_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_usli_ndi (__a, __b, __c);
+}
+
+/* vsqadd */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsqadd_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_usqaddv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsqadd_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_usqaddv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsqadd_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_usqaddv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsqadd_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_usqadddi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsqaddq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_usqaddv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsqaddq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_usqaddv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsqaddq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_usqaddv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsqaddq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_usqaddv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
+vsqaddb_u8 (uint8x1_t __a, int8x1_t __b)
+{
+ return (uint8x1_t) __builtin_aarch64_usqaddqi ((int8x1_t) __a, __b);
+}
+
+__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
+vsqaddh_u16 (uint16x1_t __a, int16x1_t __b)
+{
+ return (uint16x1_t) __builtin_aarch64_usqaddhi ((int16x1_t) __a, __b);
+}
+
+__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
+vsqadds_u32 (uint32x1_t __a, int32x1_t __b)
+{
+ return (uint32x1_t) __builtin_aarch64_usqaddsi ((int32x1_t) __a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsqaddd_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t) __builtin_aarch64_usqadddi ((int64x1_t) __a, __b);
+}
+
+/* vsqrt */
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vsqrt_f32 (float32x2_t a)
+{
+ return __builtin_aarch64_sqrtv2sf (a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vsqrtq_f32 (float32x4_t a)
+{
+ return __builtin_aarch64_sqrtv4sf (a);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vsqrtq_f64 (float64x2_t a)
+{
+ return __builtin_aarch64_sqrtv2df (a);
+}
+
+/* vsra */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t) __builtin_aarch64_ssra_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t) __builtin_aarch64_ssra_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t) __builtin_aarch64_ssra_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_ssra_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t) __builtin_aarch64_usra_nv8qi ((int8x8_t) __a,
+ (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t) __builtin_aarch64_usra_nv4hi ((int16x4_t) __a,
+ (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t) __builtin_aarch64_usra_nv2si ((int32x2_t) __a,
+ (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_usra_ndi ((int64x1_t) __a,
+ (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t) __builtin_aarch64_ssra_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t) __builtin_aarch64_ssra_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t) __builtin_aarch64_ssra_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t) __builtin_aarch64_ssra_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t) __builtin_aarch64_usra_nv16qi ((int8x16_t) __a,
+ (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t) __builtin_aarch64_usra_nv8hi ((int16x8_t) __a,
+ (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t) __builtin_aarch64_usra_nv4si ((int32x4_t) __a,
+ (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t) __builtin_aarch64_usra_nv2di ((int64x2_t) __a,
+ (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsrad_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_ssra_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsrad_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_usra_ndi (__a, __b, __c);
+}
+
+/* vsri */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsri_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t) __builtin_aarch64_ssri_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsri_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t) __builtin_aarch64_ssri_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsri_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t) __builtin_aarch64_ssri_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsri_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_ssri_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsri_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t) __builtin_aarch64_usri_nv8qi ((int8x8_t) __a,
+ (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsri_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t) __builtin_aarch64_usri_nv4hi ((int16x4_t) __a,
+ (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsri_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t) __builtin_aarch64_usri_nv2si ((int32x2_t) __a,
+ (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsri_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_usri_ndi ((int64x1_t) __a,
+ (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t) __builtin_aarch64_ssri_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t) __builtin_aarch64_ssri_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t) __builtin_aarch64_ssri_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsriq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t) __builtin_aarch64_ssri_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t) __builtin_aarch64_usri_nv16qi ((int8x16_t) __a,
+ (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t) __builtin_aarch64_usri_nv8hi ((int16x8_t) __a,
+ (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t) __builtin_aarch64_usri_nv4si ((int32x4_t) __a,
+ (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsriq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t) __builtin_aarch64_usri_nv2di ((int64x2_t) __a,
+ (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsrid_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t) __builtin_aarch64_ssri_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsrid_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t) __builtin_aarch64_usri_ndi (__a, __b, __c);
+}
+
+/* vst1 */
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_f32 (float32_t *a, float32x2_t b)
+{
+ __builtin_aarch64_st1v2sf ((__builtin_aarch64_simd_sf *) a, b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_f64 (float64_t *a, float64x1_t b)
+{
+ *a = b;
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p8 (poly8_t *a, poly8x8_t b)
+{
+ __builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) a,
+ (int8x8_t) b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p16 (poly16_t *a, poly16x4_t b)
+{
+ __builtin_aarch64_st1v4hi ((__builtin_aarch64_simd_hi *) a,
+ (int16x4_t) b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s8 (int8_t *a, int8x8_t b)
+{
+ __builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) a, b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s16 (int16_t *a, int16x4_t b)
+{
+ __builtin_aarch64_st1v4hi ((__builtin_aarch64_simd_hi *) a, b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s32 (int32_t *a, int32x2_t b)
+{
+ __builtin_aarch64_st1v2si ((__builtin_aarch64_simd_si *) a, b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s64 (int64_t *a, int64x1_t b)
+{
+ *a = b;
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u8 (uint8_t *a, uint8x8_t b)
+{
+ __builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) a,
+ (int8x8_t) b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u16 (uint16_t *a, uint16x4_t b)
+{
+ __builtin_aarch64_st1v4hi ((__builtin_aarch64_simd_hi *) a,
+ (int16x4_t) b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u32 (uint32_t *a, uint32x2_t b)
+{
+ __builtin_aarch64_st1v2si ((__builtin_aarch64_simd_si *) a,
+ (int32x2_t) b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u64 (uint64_t *a, uint64x1_t b)
+{
+ *a = b;
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_f32 (float32_t *a, float32x4_t b)
+{
+ __builtin_aarch64_st1v4sf ((__builtin_aarch64_simd_sf *) a, b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_f64 (float64_t *a, float64x2_t b)
+{
+ __builtin_aarch64_st1v2df ((__builtin_aarch64_simd_df *) a, b);
+}
+
+/* vst1q */
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p8 (poly8_t *a, poly8x16_t b)
+{
+ __builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) a,
+ (int8x16_t) b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p16 (poly16_t *a, poly16x8_t b)
+{
+ __builtin_aarch64_st1v8hi ((__builtin_aarch64_simd_hi *) a,
+ (int16x8_t) b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s8 (int8_t *a, int8x16_t b)
+{
+ __builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) a, b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s16 (int16_t *a, int16x8_t b)
+{
+ __builtin_aarch64_st1v8hi ((__builtin_aarch64_simd_hi *) a, b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s32 (int32_t *a, int32x4_t b)
+{
+ __builtin_aarch64_st1v4si ((__builtin_aarch64_simd_si *) a, b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s64 (int64_t *a, int64x2_t b)
+{
+ __builtin_aarch64_st1v2di ((__builtin_aarch64_simd_di *) a, b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u8 (uint8_t *a, uint8x16_t b)
+{
+ __builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) a,
+ (int8x16_t) b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u16 (uint16_t *a, uint16x8_t b)
+{
+ __builtin_aarch64_st1v8hi ((__builtin_aarch64_simd_hi *) a,
+ (int16x8_t) b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u32 (uint32_t *a, uint32x4_t b)
+{
+ __builtin_aarch64_st1v4si ((__builtin_aarch64_simd_si *) a,
+ (int32x4_t) b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u64 (uint64_t *a, uint64x2_t b)
+{
+ __builtin_aarch64_st1v2di ((__builtin_aarch64_simd_di *) a,
+ (int64x2_t) b);
+}
+
+/* vstn */
+
+__extension__ static __inline void
+vst2_s64 (int64_t * __a, int64x1x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ int64x2x2_t temp;
+ temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (__AARCH64_INT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[1], 1);
+ __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst2_u64 (uint64_t * __a, uint64x1x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint64x2x2_t temp;
+ temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[1], 1);
+ __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst2_f64 (float64_t * __a, float64x1x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ float64x2x2_t temp;
+ temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) temp.val[1], 1);
+ __builtin_aarch64_st2df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst2_s8 (int8_t * __a, int8x8x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ int8x16x2_t temp;
+ temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (__AARCH64_INT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p8 (poly8_t * __a, poly8x8x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ poly8x16x2_t temp;
+ temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s16 (int16_t * __a, int16x4x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ int16x8x2_t temp;
+ temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (__AARCH64_INT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p16 (poly16_t * __a, poly16x4x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ poly16x8x2_t temp;
+ temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s32 (int32_t * __a, int32x2x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ int32x4x2_t temp;
+ temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (__AARCH64_INT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[1], 1);
+ __builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u8 (uint8_t * __a, uint8x8x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint8x16x2_t temp;
+ temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u16 (uint16_t * __a, uint16x4x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint16x8x2_t temp;
+ temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u32 (uint32_t * __a, uint32x2x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ uint32x4x2_t temp;
+ temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[1], 1);
+ __builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_f32 (float32_t * __a, float32x2x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ float32x4x2_t temp;
+ temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) temp.val[1], 1);
+ __builtin_aarch64_st2v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s8 (int8_t * __a, int8x16x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1);
+ __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_p8 (poly8_t * __a, poly8x16x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1);
+ __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s16 (int16_t * __a, int16x8x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1);
+ __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_p16 (poly16_t * __a, poly16x8x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1);
+ __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s32 (int32_t * __a, int32x4x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[1], 1);
+ __builtin_aarch64_st2v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s64 (int64_t * __a, int64x2x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[1], 1);
+ __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u8 (uint8_t * __a, uint8x16x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1);
+ __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u16 (uint16_t * __a, uint16x8x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1);
+ __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u32 (uint32_t * __a, uint32x4x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[1], 1);
+ __builtin_aarch64_st2v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u64 (uint64_t * __a, uint64x2x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[1], 1);
+ __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_f32 (float32_t * __a, float32x4x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) val.val[1], 1);
+ __builtin_aarch64_st2v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_f64 (float64_t * __a, float64x2x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) val.val[1], 1);
+ __builtin_aarch64_st2v2df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst3_s64 (int64_t * __a, int64x1x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ int64x2x3_t temp;
+ temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (__AARCH64_INT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[2], 2);
+ __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst3_u64 (uint64_t * __a, uint64x1x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ uint64x2x3_t temp;
+ temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[2], 2);
+ __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst3_f64 (float64_t * __a, float64x1x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ float64x2x3_t temp;
+ temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[2], 2);
+ __builtin_aarch64_st3df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst3_s8 (int8_t * __a, int8x8x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ int8x16x3_t temp;
+ temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (__AARCH64_INT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
+ __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p8 (poly8_t * __a, poly8x8x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ poly8x16x3_t temp;
+ temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
+ __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s16 (int16_t * __a, int16x4x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ int16x8x3_t temp;
+ temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (__AARCH64_INT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
+ __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p16 (poly16_t * __a, poly16x4x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ poly16x8x3_t temp;
+ temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
+ __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s32 (int32_t * __a, int32x2x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ int32x4x3_t temp;
+ temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (__AARCH64_INT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[2], 2);
+ __builtin_aarch64_st3v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u8 (uint8_t * __a, uint8x8x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ uint8x16x3_t temp;
+ temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
+ __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u16 (uint16_t * __a, uint16x4x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ uint16x8x3_t temp;
+ temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
+ __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u32 (uint32_t * __a, uint32x2x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ uint32x4x3_t temp;
+ temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[2], 2);
+ __builtin_aarch64_st3v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_f32 (float32_t * __a, float32x2x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ float32x4x3_t temp;
+ temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[2], 2);
+ __builtin_aarch64_st3v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s8 (int8_t * __a, int8x16x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2);
+ __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_p8 (poly8_t * __a, poly8x16x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2);
+ __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s16 (int16_t * __a, int16x8x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2);
+ __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_p16 (poly16_t * __a, poly16x8x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2);
+ __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s32 (int32_t * __a, int32x4x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[2], 2);
+ __builtin_aarch64_st3v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s64 (int64_t * __a, int64x2x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[2], 2);
+ __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u8 (uint8_t * __a, uint8x16x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2);
+ __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u16 (uint16_t * __a, uint16x8x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2);
+ __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u32 (uint32_t * __a, uint32x4x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[2], 2);
+ __builtin_aarch64_st3v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u64 (uint64_t * __a, uint64x2x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[2], 2);
+ __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_f32 (float32_t * __a, float32x4x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[2], 2);
+ __builtin_aarch64_st3v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_f64 (float64_t * __a, float64x2x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[2], 2);
+ __builtin_aarch64_st3v2df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst4_s64 (int64_t * __a, int64x1x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ int64x2x4_t temp;
+ temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (__AARCH64_INT64_C (0)));
+ temp.val[3] = vcombine_s64 (val.val[3], vcreate_s64 (__AARCH64_INT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[3], 3);
+ __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst4_u64 (uint64_t * __a, uint64x1x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ uint64x2x4_t temp;
+ temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_u64 (val.val[3], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[3], 3);
+ __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void
+vst4_f64 (float64_t * __a, float64x1x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ float64x2x4_t temp;
+ temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_f64 (val.val[3], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[3], 3);
+ __builtin_aarch64_st4df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+__extension__ static __inline void
+vst4_s8 (int8_t * __a, int8x8x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ int8x16x4_t temp;
+ temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (__AARCH64_INT64_C (0)));
+ temp.val[3] = vcombine_s8 (val.val[3], vcreate_s8 (__AARCH64_INT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3);
+ __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p8 (poly8_t * __a, poly8x8x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ poly8x16x4_t temp;
+ temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_p8 (val.val[3], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3);
+ __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s16 (int16_t * __a, int16x4x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ int16x8x4_t temp;
+ temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (__AARCH64_INT64_C (0)));
+ temp.val[3] = vcombine_s16 (val.val[3], vcreate_s16 (__AARCH64_INT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3);
+ __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p16 (poly16_t * __a, poly16x4x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ poly16x8x4_t temp;
+ temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_p16 (val.val[3], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3);
+ __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s32 (int32_t * __a, int32x2x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ int32x4x4_t temp;
+ temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (__AARCH64_INT64_C (0)));
+ temp.val[3] = vcombine_s32 (val.val[3], vcreate_s32 (__AARCH64_INT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[3], 3);
+ __builtin_aarch64_st4v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u8 (uint8_t * __a, uint8x8x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ uint8x16x4_t temp;
+ temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_u8 (val.val[3], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3);
+ __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u16 (uint16_t * __a, uint16x4x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ uint16x8x4_t temp;
+ temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_u16 (val.val[3], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3);
+ __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u32 (uint32_t * __a, uint32x2x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ uint32x4x4_t temp;
+ temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_u32 (val.val[3], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[3], 3);
+ __builtin_aarch64_st4v2si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_f32 (float32_t * __a, float32x2x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ float32x4x4_t temp;
+ temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_f32 (val.val[3], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[3], 3);
+ __builtin_aarch64_st4v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s8 (int8_t * __a, int8x16x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3);
+ __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_p8 (poly8_t * __a, poly8x16x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3);
+ __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s16 (int16_t * __a, int16x8x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3);
+ __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_p16 (poly16_t * __a, poly16x8x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3);
+ __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s32 (int32_t * __a, int32x4x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[3], 3);
+ __builtin_aarch64_st4v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s64 (int64_t * __a, int64x2x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[3], 3);
+ __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u8 (uint8_t * __a, uint8x16x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3);
+ __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u16 (uint16_t * __a, uint16x8x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3);
+ __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u32 (uint32_t * __a, uint32x4x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[3], 3);
+ __builtin_aarch64_st4v4si ((__builtin_aarch64_simd_si *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u64 (uint64_t * __a, uint64x2x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[3], 3);
+ __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_f32 (float32_t * __a, float32x4x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[3], 3);
+ __builtin_aarch64_st4v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_f64 (float64_t * __a, float64x2x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[3], 3);
+ __builtin_aarch64_st4v2df ((__builtin_aarch64_simd_df *) __a, __o);
+}
+
+/* vsub */
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsubd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsubd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a - __b;
+}
+
+/* vtbx1 */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx1_s8 (int8x8_t __r, int8x8_t __tab, int8x8_t __idx)
+{
+ uint8x8_t __mask = vclt_u8 (vreinterpret_u8_s8 (__idx),
+ vmov_n_u8 (8));
+ int8x8_t __tbl = vtbl1_s8 (__tab, __idx);
+
+ return vbsl_s8 (__mask, __tbl, __r);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx1_u8 (uint8x8_t __r, uint8x8_t __tab, uint8x8_t __idx)
+{
+ uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (8));
+ uint8x8_t __tbl = vtbl1_u8 (__tab, __idx);
+
+ return vbsl_u8 (__mask, __tbl, __r);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx1_p8 (poly8x8_t __r, poly8x8_t __tab, uint8x8_t __idx)
+{
+ uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (8));
+ poly8x8_t __tbl = vtbl1_p8 (__tab, __idx);
+
+ return vbsl_p8 (__mask, __tbl, __r);
+}
+
+/* vtbx3 */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx3_s8 (int8x8_t __r, int8x8x3_t __tab, int8x8_t __idx)
+{
+ uint8x8_t __mask = vclt_u8 (vreinterpret_u8_s8 (__idx),
+ vmov_n_u8 (24));
+ int8x8_t __tbl = vtbl3_s8 (__tab, __idx);
+
+ return vbsl_s8 (__mask, __tbl, __r);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx3_u8 (uint8x8_t __r, uint8x8x3_t __tab, uint8x8_t __idx)
+{
+ uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (24));
+ uint8x8_t __tbl = vtbl3_u8 (__tab, __idx);
+
+ return vbsl_u8 (__mask, __tbl, __r);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx3_p8 (poly8x8_t __r, poly8x8x3_t __tab, uint8x8_t __idx)
+{
+ uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (24));
+ poly8x8_t __tbl = vtbl3_p8 (__tab, __idx);
+
+ return vbsl_p8 (__mask, __tbl, __r);
+}
+
+/* vtrn */
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vtrn_f32 (float32x2_t a, float32x2_t b)
+{
+ return (float32x2x2_t) {vtrn1_f32 (a, b), vtrn2_f32 (a, b)};
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vtrn_p8 (poly8x8_t a, poly8x8_t b)
+{
+ return (poly8x8x2_t) {vtrn1_p8 (a, b), vtrn2_p8 (a, b)};
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vtrn_p16 (poly16x4_t a, poly16x4_t b)
+{
+ return (poly16x4x2_t) {vtrn1_p16 (a, b), vtrn2_p16 (a, b)};
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vtrn_s8 (int8x8_t a, int8x8_t b)
+{
+ return (int8x8x2_t) {vtrn1_s8 (a, b), vtrn2_s8 (a, b)};
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vtrn_s16 (int16x4_t a, int16x4_t b)
+{
+ return (int16x4x2_t) {vtrn1_s16 (a, b), vtrn2_s16 (a, b)};
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vtrn_s32 (int32x2_t a, int32x2_t b)
+{
+ return (int32x2x2_t) {vtrn1_s32 (a, b), vtrn2_s32 (a, b)};
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vtrn_u8 (uint8x8_t a, uint8x8_t b)
+{
+ return (uint8x8x2_t) {vtrn1_u8 (a, b), vtrn2_u8 (a, b)};
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vtrn_u16 (uint16x4_t a, uint16x4_t b)
+{
+ return (uint16x4x2_t) {vtrn1_u16 (a, b), vtrn2_u16 (a, b)};
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vtrn_u32 (uint32x2_t a, uint32x2_t b)
+{
+ return (uint32x2x2_t) {vtrn1_u32 (a, b), vtrn2_u32 (a, b)};
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_f32 (float32x4_t a, float32x4_t b)
+{
+ return (float32x4x2_t) {vtrn1q_f32 (a, b), vtrn2q_f32 (a, b)};
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_p8 (poly8x16_t a, poly8x16_t b)
+{
+ return (poly8x16x2_t) {vtrn1q_p8 (a, b), vtrn2q_p8 (a, b)};
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_p16 (poly16x8_t a, poly16x8_t b)
+{
+ return (poly16x8x2_t) {vtrn1q_p16 (a, b), vtrn2q_p16 (a, b)};
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_s8 (int8x16_t a, int8x16_t b)
+{
+ return (int8x16x2_t) {vtrn1q_s8 (a, b), vtrn2q_s8 (a, b)};
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_s16 (int16x8_t a, int16x8_t b)
+{
+ return (int16x8x2_t) {vtrn1q_s16 (a, b), vtrn2q_s16 (a, b)};
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_s32 (int32x4_t a, int32x4_t b)
+{
+ return (int32x4x2_t) {vtrn1q_s32 (a, b), vtrn2q_s32 (a, b)};
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_u8 (uint8x16_t a, uint8x16_t b)
+{
+ return (uint8x16x2_t) {vtrn1q_u8 (a, b), vtrn2q_u8 (a, b)};
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_u16 (uint16x8_t a, uint16x8_t b)
+{
+ return (uint16x8x2_t) {vtrn1q_u16 (a, b), vtrn2q_u16 (a, b)};
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_u32 (uint32x4_t a, uint32x4_t b)
+{
+ return (uint32x4x2_t) {vtrn1q_u32 (a, b), vtrn2q_u32 (a, b)};
+}
+
+/* vtst */
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmtstv8qi (__a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmtstv4hi (__a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtst_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmtstv2si (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtst_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (__a & __b) ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) __builtin_aarch64_cmtstv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) __builtin_aarch64_cmtstv4hi ((int16x4_t) __a,
+ (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtst_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) __builtin_aarch64_cmtstv2si ((int32x2_t) __a,
+ (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtst_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (__a & __b) ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmtstv16qi (__a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmtstv8hi (__a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtstq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmtstv4si (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vtstq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmtstv2di (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) __builtin_aarch64_cmtstv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) __builtin_aarch64_cmtstv8hi ((int16x8_t) __a,
+ (int16x8_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtstq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) __builtin_aarch64_cmtstv4si ((int32x4_t) __a,
+ (int32x4_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vtstq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t) __builtin_aarch64_cmtstv2di ((int64x2_t) __a,
+ (int64x2_t) __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtstd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (__a & __b) ? -1ll : 0ll;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtstd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (__a & __b) ? -1ll : 0ll;
+}
+
+/* vuqadd */
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vuqadd_s8 (int8x8_t __a, uint8x8_t __b)
+{
+ return (int8x8_t) __builtin_aarch64_suqaddv8qi (__a, (int8x8_t) __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vuqadd_s16 (int16x4_t __a, uint16x4_t __b)
+{
+ return (int16x4_t) __builtin_aarch64_suqaddv4hi (__a, (int16x4_t) __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vuqadd_s32 (int32x2_t __a, uint32x2_t __b)
+{
+ return (int32x2_t) __builtin_aarch64_suqaddv2si (__a, (int32x2_t) __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vuqadd_s64 (int64x1_t __a, uint64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_suqadddi (__a, (int64x1_t) __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vuqaddq_s8 (int8x16_t __a, uint8x16_t __b)
+{
+ return (int8x16_t) __builtin_aarch64_suqaddv16qi (__a, (int8x16_t) __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vuqaddq_s16 (int16x8_t __a, uint16x8_t __b)
+{
+ return (int16x8_t) __builtin_aarch64_suqaddv8hi (__a, (int16x8_t) __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vuqaddq_s32 (int32x4_t __a, uint32x4_t __b)
+{
+ return (int32x4_t) __builtin_aarch64_suqaddv4si (__a, (int32x4_t) __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vuqaddq_s64 (int64x2_t __a, uint64x2_t __b)
+{
+ return (int64x2_t) __builtin_aarch64_suqaddv2di (__a, (int64x2_t) __b);
+}
+
+__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
+vuqaddb_s8 (int8x1_t __a, uint8x1_t __b)
+{
+ return (int8x1_t) __builtin_aarch64_suqaddqi (__a, (int8x1_t) __b);
+}
+
+__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
+vuqaddh_s16 (int16x1_t __a, uint16x1_t __b)
+{
+ return (int16x1_t) __builtin_aarch64_suqaddhi (__a, (int16x1_t) __b);
+}
+
+__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
+vuqadds_s32 (int32x1_t __a, uint32x1_t __b)
+{
+ return (int32x1_t) __builtin_aarch64_suqaddsi (__a, (int32x1_t) __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vuqaddd_s64 (int64x1_t __a, uint64x1_t __b)
+{
+ return (int64x1_t) __builtin_aarch64_suqadddi (__a, (int64x1_t) __b);
+}
+
+#define __DEFINTERLEAVE(op, rettype, intype, funcsuffix, Q) \
+ __extension__ static __inline rettype \
+ __attribute__ ((__always_inline__)) \
+ v ## op ## Q ## _ ## funcsuffix (intype a, intype b) \
+ { \
+ return (rettype) {v ## op ## 1 ## Q ## _ ## funcsuffix (a, b), \
+ v ## op ## 2 ## Q ## _ ## funcsuffix (a, b)}; \
+ }
+
+#define __INTERLEAVE_LIST(op) \
+ __DEFINTERLEAVE (op, float32x2x2_t, float32x2_t, f32,) \
+ __DEFINTERLEAVE (op, poly8x8x2_t, poly8x8_t, p8,) \
+ __DEFINTERLEAVE (op, poly16x4x2_t, poly16x4_t, p16,) \
+ __DEFINTERLEAVE (op, int8x8x2_t, int8x8_t, s8,) \
+ __DEFINTERLEAVE (op, int16x4x2_t, int16x4_t, s16,) \
+ __DEFINTERLEAVE (op, int32x2x2_t, int32x2_t, s32,) \
+ __DEFINTERLEAVE (op, uint8x8x2_t, uint8x8_t, u8,) \
+ __DEFINTERLEAVE (op, uint16x4x2_t, uint16x4_t, u16,) \
+ __DEFINTERLEAVE (op, uint32x2x2_t, uint32x2_t, u32,) \
+ __DEFINTERLEAVE (op, float32x4x2_t, float32x4_t, f32, q) \
+ __DEFINTERLEAVE (op, poly8x16x2_t, poly8x16_t, p8, q) \
+ __DEFINTERLEAVE (op, poly16x8x2_t, poly16x8_t, p16, q) \
+ __DEFINTERLEAVE (op, int8x16x2_t, int8x16_t, s8, q) \
+ __DEFINTERLEAVE (op, int16x8x2_t, int16x8_t, s16, q) \
+ __DEFINTERLEAVE (op, int32x4x2_t, int32x4_t, s32, q) \
+ __DEFINTERLEAVE (op, uint8x16x2_t, uint8x16_t, u8, q) \
+ __DEFINTERLEAVE (op, uint16x8x2_t, uint16x8_t, u16, q) \
+ __DEFINTERLEAVE (op, uint32x4x2_t, uint32x4_t, u32, q)
+
+/* vuzp */
+
+__INTERLEAVE_LIST (uzp)
+
+/* vzip */
+
+__INTERLEAVE_LIST (zip)
+
+#undef __INTERLEAVE_LIST
+#undef __DEFINTERLEAVE
+
+/* End of optimal implementations in approved order. */
+
+#undef __aarch64_vget_lane_any
+#undef __aarch64_vget_lane_f32
+#undef __aarch64_vget_lane_f64
+#undef __aarch64_vget_lane_p8
+#undef __aarch64_vget_lane_p16
+#undef __aarch64_vget_lane_s8
+#undef __aarch64_vget_lane_s16
+#undef __aarch64_vget_lane_s32
+#undef __aarch64_vget_lane_s64
+#undef __aarch64_vget_lane_u8
+#undef __aarch64_vget_lane_u16
+#undef __aarch64_vget_lane_u32
+#undef __aarch64_vget_lane_u64
+
+#undef __aarch64_vgetq_lane_f32
+#undef __aarch64_vgetq_lane_f64
+#undef __aarch64_vgetq_lane_p8
+#undef __aarch64_vgetq_lane_p16
+#undef __aarch64_vgetq_lane_s8
+#undef __aarch64_vgetq_lane_s16
+#undef __aarch64_vgetq_lane_s32
+#undef __aarch64_vgetq_lane_s64
+#undef __aarch64_vgetq_lane_u8
+#undef __aarch64_vgetq_lane_u16
+#undef __aarch64_vgetq_lane_u32
+#undef __aarch64_vgetq_lane_u64
+
+#undef __aarch64_vdup_lane_any
+#undef __aarch64_vdup_lane_f32
+#undef __aarch64_vdup_lane_f64
+#undef __aarch64_vdup_lane_p8
+#undef __aarch64_vdup_lane_p16
+#undef __aarch64_vdup_lane_s8
+#undef __aarch64_vdup_lane_s16
+#undef __aarch64_vdup_lane_s32
+#undef __aarch64_vdup_lane_s64
+#undef __aarch64_vdup_lane_u8
+#undef __aarch64_vdup_lane_u16
+#undef __aarch64_vdup_lane_u32
+#undef __aarch64_vdup_lane_u64
+#undef __aarch64_vdup_laneq_f32
+#undef __aarch64_vdup_laneq_f64
+#undef __aarch64_vdup_laneq_p8
+#undef __aarch64_vdup_laneq_p16
+#undef __aarch64_vdup_laneq_s8
+#undef __aarch64_vdup_laneq_s16
+#undef __aarch64_vdup_laneq_s32
+#undef __aarch64_vdup_laneq_s64
+#undef __aarch64_vdup_laneq_u8
+#undef __aarch64_vdup_laneq_u16
+#undef __aarch64_vdup_laneq_u32
+#undef __aarch64_vdup_laneq_u64
+#undef __aarch64_vdupq_lane_f32
+#undef __aarch64_vdupq_lane_f64
+#undef __aarch64_vdupq_lane_p8
+#undef __aarch64_vdupq_lane_p16
+#undef __aarch64_vdupq_lane_s8
+#undef __aarch64_vdupq_lane_s16
+#undef __aarch64_vdupq_lane_s32
+#undef __aarch64_vdupq_lane_s64
+#undef __aarch64_vdupq_lane_u8
+#undef __aarch64_vdupq_lane_u16
+#undef __aarch64_vdupq_lane_u32
+#undef __aarch64_vdupq_lane_u64
+#undef __aarch64_vdupq_laneq_f32
+#undef __aarch64_vdupq_laneq_f64
+#undef __aarch64_vdupq_laneq_p8
+#undef __aarch64_vdupq_laneq_p16
+#undef __aarch64_vdupq_laneq_s8
+#undef __aarch64_vdupq_laneq_s16
+#undef __aarch64_vdupq_laneq_s32
+#undef __aarch64_vdupq_laneq_s64
+#undef __aarch64_vdupq_laneq_u8
+#undef __aarch64_vdupq_laneq_u16
+#undef __aarch64_vdupq_laneq_u32
+#undef __aarch64_vdupq_laneq_u64
+
+#endif
diff --git a/gcc-4.9/gcc/config/aarch64/atomics.md b/gcc-4.9/gcc/config/aarch64/atomics.md
new file mode 100644
index 000000000..bffa465de
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/atomics.md
@@ -0,0 +1,382 @@
+;; Machine description for AArch64 processor synchronization primitives.
+;; Copyright (C) 2009-2014 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_c_enum "unspecv"
+ [
+ UNSPECV_LX ; Represent a load-exclusive.
+ UNSPECV_SX ; Represent a store-exclusive.
+ UNSPECV_LDA ; Represent an atomic load or load-acquire.
+ UNSPECV_STL ; Represent an atomic store or store-release.
+ UNSPECV_ATOMIC_CMPSW ; Represent an atomic compare swap.
+ UNSPECV_ATOMIC_EXCHG ; Represent an atomic exchange.
+ UNSPECV_ATOMIC_OP ; Represent an atomic operation.
+])
+
+(define_expand "atomic_compare_and_swap<mode>"
+ [(match_operand:SI 0 "register_operand" "") ;; bool out
+ (match_operand:ALLI 1 "register_operand" "") ;; val out
+ (match_operand:ALLI 2 "aarch64_sync_memory_operand" "") ;; memory
+ (match_operand:ALLI 3 "general_operand" "") ;; expected
+ (match_operand:ALLI 4 "register_operand" "") ;; desired
+ (match_operand:SI 5 "const_int_operand") ;; is_weak
+ (match_operand:SI 6 "const_int_operand") ;; mod_s
+ (match_operand:SI 7 "const_int_operand")] ;; mod_f
+ ""
+ {
+ aarch64_expand_compare_and_swap (operands);
+ DONE;
+ }
+)
+
+(define_insn_and_split "atomic_compare_and_swap<mode>_1"
+ [(set (reg:CC CC_REGNUM) ;; bool out
+ (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
+ (set (match_operand:SI 0 "register_operand" "=&r") ;; val out
+ (zero_extend:SI
+ (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory
+ (set (match_dup 1)
+ (unspec_volatile:SHORT
+ [(match_operand:SI 2 "aarch64_plus_operand" "rI") ;; expected
+ (match_operand:SHORT 3 "register_operand" "r") ;; desired
+ (match_operand:SI 4 "const_int_operand") ;; is_weak
+ (match_operand:SI 5 "const_int_operand") ;; mod_s
+ (match_operand:SI 6 "const_int_operand")] ;; mod_f
+ UNSPECV_ATOMIC_CMPSW))
+ (clobber (match_scratch:SI 7 "=&r"))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ aarch64_split_compare_and_swap (operands);
+ DONE;
+ }
+)
+
+(define_insn_and_split "atomic_compare_and_swap<mode>_1"
+ [(set (reg:CC CC_REGNUM) ;; bool out
+ (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
+ (set (match_operand:GPI 0 "register_operand" "=&r") ;; val out
+ (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
+ (set (match_dup 1)
+ (unspec_volatile:GPI
+ [(match_operand:GPI 2 "aarch64_plus_operand" "rI") ;; expect
+ (match_operand:GPI 3 "register_operand" "r") ;; desired
+ (match_operand:SI 4 "const_int_operand") ;; is_weak
+ (match_operand:SI 5 "const_int_operand") ;; mod_s
+ (match_operand:SI 6 "const_int_operand")] ;; mod_f
+ UNSPECV_ATOMIC_CMPSW))
+ (clobber (match_scratch:SI 7 "=&r"))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ aarch64_split_compare_and_swap (operands);
+ DONE;
+ }
+)
+
+(define_insn_and_split "atomic_exchange<mode>"
+ [(set (match_operand:ALLI 0 "register_operand" "=&r") ;; output
+ (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
+ (set (match_dup 1)
+ (unspec_volatile:ALLI
+ [(match_operand:ALLI 2 "register_operand" "r") ;; input
+ (match_operand:SI 3 "const_int_operand" "")] ;; model
+ UNSPECV_ATOMIC_EXCHG))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ aarch64_split_atomic_op (SET, operands[0], NULL, operands[1],
+ operands[2], operands[3], operands[4]);
+ DONE;
+ }
+)
+
+(define_insn_and_split "atomic_<atomic_optab><mode>"
+ [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
+ (unspec_volatile:ALLI
+ [(atomic_op:ALLI (match_dup 0)
+ (match_operand:ALLI 1 "<atomic_op_operand>" "rn"))
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ UNSPECV_ATOMIC_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:ALLI 3 "=&r"))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ aarch64_split_atomic_op (<CODE>, NULL, operands[3], operands[0],
+ operands[1], operands[2], operands[4]);
+ DONE;
+ }
+)
+
+(define_insn_and_split "atomic_nand<mode>"
+ [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
+ (unspec_volatile:ALLI
+ [(not:ALLI
+ (and:ALLI (match_dup 0)
+ (match_operand:ALLI 1 "aarch64_logical_operand" "rn")))
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ UNSPECV_ATOMIC_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:ALLI 3 "=&r"))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ aarch64_split_atomic_op (NOT, NULL, operands[3], operands[0],
+ operands[1], operands[2], operands[4]);
+ DONE;
+ }
+)
+
+(define_insn_and_split "atomic_fetch_<atomic_optab><mode>"
+ [(set (match_operand:ALLI 0 "register_operand" "=&r")
+ (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
+ (set (match_dup 1)
+ (unspec_volatile:ALLI
+ [(atomic_op:ALLI (match_dup 1)
+ (match_operand:ALLI 2 "<atomic_op_operand>" "rn"))
+ (match_operand:SI 3 "const_int_operand")] ;; model
+ UNSPECV_ATOMIC_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:ALLI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "=&r"))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ aarch64_split_atomic_op (<CODE>, operands[0], operands[4], operands[1],
+ operands[2], operands[3], operands[5]);
+ DONE;
+ }
+)
+
+(define_insn_and_split "atomic_fetch_nand<mode>"
+ [(set (match_operand:ALLI 0 "register_operand" "=&r")
+ (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
+ (set (match_dup 1)
+ (unspec_volatile:ALLI
+ [(not:ALLI
+ (and:ALLI (match_dup 1)
+ (match_operand:ALLI 2 "aarch64_logical_operand" "rn")))
+ (match_operand:SI 3 "const_int_operand")] ;; model
+ UNSPECV_ATOMIC_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:ALLI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "=&r"))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ aarch64_split_atomic_op (NOT, operands[0], operands[4], operands[1],
+ operands[2], operands[3], operands[5]);
+ DONE;
+ }
+)
+
+(define_insn_and_split "atomic_<atomic_optab>_fetch<mode>"
+ [(set (match_operand:ALLI 0 "register_operand" "=&r")
+ (atomic_op:ALLI
+ (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
+ (match_operand:ALLI 2 "<atomic_op_operand>" "rn")))
+ (set (match_dup 1)
+ (unspec_volatile:ALLI
+ [(match_dup 1) (match_dup 2)
+ (match_operand:SI 3 "const_int_operand")] ;; model
+ UNSPECV_ATOMIC_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ aarch64_split_atomic_op (<CODE>, NULL, operands[0], operands[1],
+ operands[2], operands[3], operands[4]);
+ DONE;
+ }
+)
+
+(define_insn_and_split "atomic_nand_fetch<mode>"
+ [(set (match_operand:ALLI 0 "register_operand" "=&r")
+ (not:ALLI
+ (and:ALLI
+ (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
+ (match_operand:ALLI 2 "aarch64_logical_operand" "rn"))))
+ (set (match_dup 1)
+ (unspec_volatile:ALLI
+ [(match_dup 1) (match_dup 2)
+ (match_operand:SI 3 "const_int_operand")] ;; model
+ UNSPECV_ATOMIC_OP))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ aarch64_split_atomic_op (NOT, NULL, operands[0], operands[1],
+ operands[2], operands[3], operands[4]);
+ DONE;
+ }
+)
+
+(define_insn "atomic_load<mode>"
+ [(set (match_operand:ALLI 0 "register_operand" "=r")
+ (unspec_volatile:ALLI
+ [(match_operand:ALLI 1 "aarch64_sync_memory_operand" "Q")
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ UNSPECV_LDA))]
+ ""
+ {
+ enum memmodel model = (enum memmodel) INTVAL (operands[2]);
+ if (model == MEMMODEL_RELAXED
+ || model == MEMMODEL_CONSUME
+ || model == MEMMODEL_RELEASE)
+ return "ldr<atomic_sfx>\t%<w>0, %1";
+ else
+ return "ldar<atomic_sfx>\t%<w>0, %1";
+ }
+)
+
+(define_insn "atomic_store<mode>"
+ [(set (match_operand:ALLI 0 "memory_operand" "=Q")
+ (unspec_volatile:ALLI
+ [(match_operand:ALLI 1 "general_operand" "rZ")
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ UNSPECV_STL))]
+ ""
+ {
+ enum memmodel model = (enum memmodel) INTVAL (operands[2]);
+ if (model == MEMMODEL_RELAXED
+ || model == MEMMODEL_CONSUME
+ || model == MEMMODEL_ACQUIRE)
+ return "str<atomic_sfx>\t%<w>1, %0";
+ else
+ return "stlr<atomic_sfx>\t%<w>1, %0";
+ }
+)
+
+(define_insn "aarch64_load_exclusive<mode>"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI
+ (unspec_volatile:SHORT
+ [(match_operand:SHORT 1 "aarch64_sync_memory_operand" "Q")
+ (match_operand:SI 2 "const_int_operand")]
+ UNSPECV_LX)))]
+ ""
+ {
+ enum memmodel model = (enum memmodel) INTVAL (operands[2]);
+ if (model == MEMMODEL_RELAXED
+ || model == MEMMODEL_CONSUME
+ || model == MEMMODEL_RELEASE)
+ return "ldxr<atomic_sfx>\t%w0, %1";
+ else
+ return "ldaxr<atomic_sfx>\t%w0, %1";
+ }
+)
+
+(define_insn "aarch64_load_exclusive<mode>"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (unspec_volatile:GPI
+ [(match_operand:GPI 1 "aarch64_sync_memory_operand" "Q")
+ (match_operand:SI 2 "const_int_operand")]
+ UNSPECV_LX))]
+ ""
+ {
+ enum memmodel model = (enum memmodel) INTVAL (operands[2]);
+ if (model == MEMMODEL_RELAXED
+ || model == MEMMODEL_CONSUME
+ || model == MEMMODEL_RELEASE)
+ return "ldxr\t%<w>0, %1";
+ else
+ return "ldaxr\t%<w>0, %1";
+ }
+)
+
+(define_insn "aarch64_store_exclusive<mode>"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec_volatile:SI [(const_int 0)] UNSPECV_SX))
+ (set (match_operand:ALLI 1 "aarch64_sync_memory_operand" "=Q")
+ (unspec_volatile:ALLI
+ [(match_operand:ALLI 2 "register_operand" "r")
+ (match_operand:SI 3 "const_int_operand")]
+ UNSPECV_SX))]
+ ""
+ {
+ enum memmodel model = (enum memmodel) INTVAL (operands[3]);
+ if (model == MEMMODEL_RELAXED
+ || model == MEMMODEL_CONSUME
+ || model == MEMMODEL_ACQUIRE)
+ return "stxr<atomic_sfx>\t%w0, %<w>2, %1";
+ else
+ return "stlxr<atomic_sfx>\t%w0, %<w>2, %1";
+ }
+)
+
+(define_expand "mem_thread_fence"
+ [(match_operand:SI 0 "const_int_operand" "")]
+ ""
+ {
+ enum memmodel model = (enum memmodel) INTVAL (operands[0]);
+ if (model != MEMMODEL_RELAXED && model != MEMMODEL_CONSUME)
+ emit_insn (gen_dmb (operands[0]));
+ DONE;
+ }
+)
+
+(define_expand "dmb"
+ [(set (match_dup 1)
+ (unspec:BLK [(match_dup 1) (match_operand:SI 0 "const_int_operand")]
+ UNSPEC_MB))]
+ ""
+ {
+ operands[1] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[1]) = 1;
+ }
+)
+
+(define_insn "*dmb"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec:BLK [(match_dup 0) (match_operand:SI 1 "const_int_operand")]
+ UNSPEC_MB))]
+ ""
+ {
+ enum memmodel model = (enum memmodel) INTVAL (operands[1]);
+ if (model == MEMMODEL_ACQUIRE)
+ return "dmb\\tishld";
+ else
+ return "dmb\\tish";
+ }
+)
diff --git a/gcc-4.9/gcc/config/aarch64/biarchilp32.h b/gcc-4.9/gcc/config/aarch64/biarchilp32.h
new file mode 100644
index 000000000..579673ced
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/biarchilp32.h
@@ -0,0 +1,29 @@
+/* Make configure files to produce biarch compiler defaulting to ilp32 ABI.
+ This file must be included very first, while the OS specific file later
+ to overwrite otherwise wrong defaults.
+ Copyright (C) 2013-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#define AARCH64_ABI_DEFAULT AARCH64_ABI_ILP32
+#define TARGET_DATA_MODEL 2
diff --git a/gcc-4.9/gcc/config/aarch64/biarchlp64.h b/gcc-4.9/gcc/config/aarch64/biarchlp64.h
new file mode 100644
index 000000000..03dd35508
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/biarchlp64.h
@@ -0,0 +1,29 @@
+/* Make configure files to produce biarch compiler defaulting to ilp64 ABI.
+ This file must be included very first, while the OS specific file later
+ to overwrite otherwise wrong defaults.
+ Copyright (C) 2013-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#define AARCH64_ABI_DEFAULT AARCH64_ABI_LP64
+#define TARGET_DATA_MODEL 1
diff --git a/gcc-4.9/gcc/config/aarch64/constraints.md b/gcc-4.9/gcc/config/aarch64/constraints.md
new file mode 100644
index 000000000..12ab570c0
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/constraints.md
@@ -0,0 +1,188 @@
+;; Machine description for AArch64 architecture.
+;; Copyright (C) 2009-2014 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_register_constraint "k" "STACK_REG"
+ "@internal The stack register.")
+
+(define_register_constraint "w" "FP_REGS"
+ "Floating point and SIMD vector registers.")
+
+(define_register_constraint "x" "FP_LO_REGS"
+ "Floating point and SIMD vector registers V0 - V15.")
+
+(define_constraint "I"
+ "A constant that can be used with an ADD operation."
+ (and (match_code "const_int")
+ (match_test "aarch64_uimm12_shift (ival)")))
+
+(define_constraint "J"
+ "A constant that can be used with a SUB operation (once negated)."
+ (and (match_code "const_int")
+ (match_test "aarch64_uimm12_shift (-ival)")))
+
+;; We can't use the mode of a CONST_INT to determine the context in
+;; which it is being used, so we must have a separate constraint for
+;; each context.
+
+(define_constraint "K"
+ "A constant that can be used with a 32-bit logical operation."
+ (and (match_code "const_int")
+ (match_test "aarch64_bitmask_imm (ival, SImode)")))
+
+(define_constraint "L"
+ "A constant that can be used with a 64-bit logical operation."
+ (and (match_code "const_int")
+ (match_test "aarch64_bitmask_imm (ival, DImode)")))
+
+(define_constraint "M"
+ "A constant that can be used with a 32-bit MOV immediate operation."
+ (and (match_code "const_int")
+ (match_test "aarch64_move_imm (ival, SImode)")))
+
+(define_constraint "N"
+ "A constant that can be used with a 64-bit MOV immediate operation."
+ (and (match_code "const_int")
+ (match_test "aarch64_move_imm (ival, DImode)")))
+
+(define_constraint "S"
+ "A constraint that matches an absolute symbolic address."
+ (and (match_code "const,symbol_ref,label_ref")
+ (match_test "aarch64_symbolic_address_p (op)")))
+
+(define_constraint "Y"
+ "Floating point constant zero."
+ (and (match_code "const_double")
+ (match_test "aarch64_float_const_zero_rtx_p (op)")))
+
+(define_constraint "Z"
+ "Integer constant zero."
+ (match_test "op == const0_rtx"))
+
+(define_constraint "Ush"
+ "A constraint that matches an absolute symbolic address high part."
+ (and (match_code "high")
+ (match_test "aarch64_valid_symref (XEXP (op, 0), GET_MODE (XEXP (op, 0)))")))
+
+(define_constraint "Uss"
+ "@internal
+ A constraint that matches an immediate shift constant in SImode."
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) ival < 32")))
+
+(define_constraint "Usd"
+ "@internal
+ A constraint that matches an immediate shift constant in DImode."
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) ival < 64")))
+
+(define_constraint "UsM"
+ "@internal
+ A constraint that matches the immediate constant -1."
+ (match_test "op == constm1_rtx"))
+
+(define_constraint "Ui1"
+ "@internal
+ A constraint that matches the immediate constant +1."
+ (match_test "op == const1_rtx"))
+
+(define_constraint "Ui3"
+ "@internal
+ A constraint that matches the integers 0...4."
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) ival <= 4")))
+
+(define_constraint "Up3"
+ "@internal
+ A constraint that matches the integers 2^(0...4)."
+ (and (match_code "const_int")
+ (match_test "(unsigned) exact_log2 (ival) <= 4")))
+
+(define_memory_constraint "Q"
+ "A memory address which uses a single base register with no offset."
+ (and (match_code "mem")
+ (match_test "REG_P (XEXP (op, 0))")))
+
+(define_memory_constraint "Ump"
+ "@internal
+ A memory address suitable for a load/store pair operation."
+ (and (match_code "mem")
+ (match_test "aarch64_legitimate_address_p (GET_MODE (op), XEXP (op, 0),
+ PARALLEL, 1)")))
+
+(define_memory_constraint "Utv"
+ "@internal
+ An address valid for loading/storing opaque structure
+ types wider than TImode."
+ (and (match_code "mem")
+ (match_test "aarch64_simd_mem_operand_p (op)")))
+
+(define_constraint "Ufc"
+ "A floating point constant which can be used with an\
+ FMOV immediate operation."
+ (and (match_code "const_double")
+ (match_test "aarch64_float_const_representable_p (op)")))
+
+(define_constraint "Dn"
+ "@internal
+ A constraint that matches vector of immediates."
+ (and (match_code "const_vector")
+ (match_test "aarch64_simd_valid_immediate (op, GET_MODE (op),
+ false, NULL)")))
+
+(define_constraint "Dh"
+ "@internal
+ A constraint that matches an immediate operand valid for\
+ AdvSIMD scalar move in HImode."
+ (and (match_code "const_int")
+ (match_test "aarch64_simd_scalar_immediate_valid_for_move (op,
+ HImode)")))
+
+(define_constraint "Dq"
+ "@internal
+ A constraint that matches an immediate operand valid for\
+ AdvSIMD scalar move in QImode."
+ (and (match_code "const_int")
+ (match_test "aarch64_simd_scalar_immediate_valid_for_move (op,
+ QImode)")))
+
+(define_constraint "Dl"
+ "@internal
+ A constraint that matches vector of immediates for left shifts."
+ (and (match_code "const_vector")
+ (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
+ true)")))
+
+(define_constraint "Dr"
+ "@internal
+ A constraint that matches vector of immediates for right shifts."
+ (and (match_code "const_vector")
+ (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
+ false)")))
+(define_constraint "Dz"
+ "@internal
+ A constraint that matches vector of immediate zero."
+ (and (match_code "const_vector")
+ (match_test "aarch64_simd_imm_zero_p (op, GET_MODE (op))")))
+
+(define_constraint "Dd"
+ "@internal
+ A constraint that matches an immediate operand valid for AdvSIMD scalar."
+ (and (match_code "const_int")
+ (match_test "aarch64_simd_imm_scalar_p (op, GET_MODE (op))")))
diff --git a/gcc-4.9/gcc/config/aarch64/gentune.sh b/gcc-4.9/gcc/config/aarch64/gentune.sh
new file mode 100644
index 000000000..c0f2e794f
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/gentune.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+#
+# Copyright (C) 2011-2014 Free Software Foundation, Inc.
+# Contributed by ARM Ltd.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Generate aarch64-tune.md, a file containing the tune attribute from the list of
+# CPUs in aarch64-cores.def
+
+echo ";; -*- buffer-read-only: t -*-"
+echo ";; Generated automatically by gentune.sh from aarch64-cores.def"
+
+allcores=`awk -F'[(, ]+' '/^AARCH64_CORE/ { cores = cores$3"," } END { print cores } ' $1`
+
+echo "(define_attr \"tune\""
+echo " \"$allcores\"" | sed -e 's/,"$/"/'
+echo " (const (symbol_ref \"((enum attr_tune) aarch64_tune)\")))"
diff --git a/gcc-4.9/gcc/config/aarch64/iterators.md b/gcc-4.9/gcc/config/aarch64/iterators.md
new file mode 100644
index 000000000..f1339b8cc
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/iterators.md
@@ -0,0 +1,997 @@
+;; Machine description for AArch64 architecture.
+;; Copyright (C) 2009-2014 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; -------------------------------------------------------------------
+;; Mode Iterators
+;; -------------------------------------------------------------------
+
+
+;; Iterator for General Purpose Integer registers (32- and 64-bit modes)
+(define_mode_iterator GPI [SI DI])
+
+;; Iterator for QI and HI modes
+(define_mode_iterator SHORT [QI HI])
+
+;; Iterator for all integer modes (up to 64-bit)
+(define_mode_iterator ALLI [QI HI SI DI])
+
+;; Iterator scalar modes (up to 64-bit)
+(define_mode_iterator SDQ_I [QI HI SI DI])
+
+;; Iterator for all integer modes that can be extended (up to 64-bit)
+(define_mode_iterator ALLX [QI HI SI])
+
+;; Iterator for General Purpose Floating-point registers (32- and 64-bit modes)
+(define_mode_iterator GPF [SF DF])
+
+;; Integer vector modes.
+(define_mode_iterator VDQ [V8QI V16QI V4HI V8HI V2SI V4SI V2DI])
+
+;; Integer vector modes.
+(define_mode_iterator VDQ_I [V8QI V16QI V4HI V8HI V2SI V4SI V2DI])
+
+;; vector and scalar, 64 & 128-bit container, all integer modes
+(define_mode_iterator VSDQ_I [V8QI V16QI V4HI V8HI V2SI V4SI V2DI QI HI SI DI])
+
+;; vector and scalar, 64 & 128-bit container: all vector integer modes;
+;; 64-bit scalar integer mode
+(define_mode_iterator VSDQ_I_DI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI DI])
+
+;; Double vector modes.
+(define_mode_iterator VD [V8QI V4HI V2SI V2SF])
+
+;; vector, 64-bit container, all integer modes
+(define_mode_iterator VD_BHSI [V8QI V4HI V2SI])
+
+;; 128 and 64-bit container; 8, 16, 32-bit vector integer modes
+(define_mode_iterator VDQ_BHSI [V8QI V16QI V4HI V8HI V2SI V4SI])
+
+;; Quad vector modes.
+(define_mode_iterator VQ [V16QI V8HI V4SI V2DI V4SF V2DF])
+
+;; All vector modes, except double.
+(define_mode_iterator VQ_S [V8QI V16QI V4HI V8HI V2SI V4SI])
+
+;; Vector and scalar, 64 & 128-bit container: all vector integer mode;
+;; 8, 16, 32-bit scalar integer modes
+(define_mode_iterator VSDQ_I_BHSI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI QI HI SI])
+
+;; Vector modes for moves.
+(define_mode_iterator VDQM [V8QI V16QI V4HI V8HI V2SI V4SI])
+
+;; This mode iterator allows :P to be used for patterns that operate on
+;; addresses in different modes. In LP64, only DI will match, while in
+;; ILP32, either can match.
+(define_mode_iterator P [(SI "ptr_mode == SImode || Pmode == SImode")
+ (DI "ptr_mode == DImode || Pmode == DImode")])
+
+;; This mode iterator allows :PTR to be used for patterns that operate on
+;; pointer-sized quantities. Exactly one of the two alternatives will match.
+(define_mode_iterator PTR [(SI "ptr_mode == SImode") (DI "ptr_mode == DImode")])
+
+;; Vector Float modes.
+(define_mode_iterator VDQF [V2SF V4SF V2DF])
+
+;; Vector single Float modes.
+(define_mode_iterator VDQSF [V2SF V4SF])
+
+;; Modes suitable to use as the return type of a vcond expression.
+(define_mode_iterator VDQF_COND [V2SF V2SI V4SF V4SI V2DF V2DI])
+
+;; All Float modes.
+(define_mode_iterator VALLF [V2SF V4SF V2DF SF DF])
+
+;; Vector Float modes with 2 elements.
+(define_mode_iterator V2F [V2SF V2DF])
+
+;; All modes.
+(define_mode_iterator VALL [V8QI V16QI V4HI V8HI V2SI V4SI V2DI V2SF V4SF V2DF])
+
+;; All vector modes and DI.
+(define_mode_iterator VALLDI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI V2SF V4SF V2DF DI])
+
+;; All vector modes and DI and DF.
+(define_mode_iterator VALLDIF [V8QI V16QI V4HI V8HI V2SI V4SI
+ V2DI V2SF V4SF V2DF DI DF])
+
+;; Vector modes for Integer reduction across lanes.
+(define_mode_iterator VDQV [V8QI V16QI V4HI V8HI V4SI V2DI])
+
+;; Vector modes(except V2DI) for Integer reduction across lanes.
+(define_mode_iterator VDQV_S [V8QI V16QI V4HI V8HI V4SI])
+
+;; All double integer narrow-able modes.
+(define_mode_iterator VDN [V4HI V2SI DI])
+
+;; All quad integer narrow-able modes.
+(define_mode_iterator VQN [V8HI V4SI V2DI])
+
+;; All double integer widen-able modes.
+(define_mode_iterator VDW [V8QI V4HI V2SI])
+
+;; Vector and scalar 128-bit container: narrowable 16, 32, 64-bit integer modes
+(define_mode_iterator VSQN_HSDI [V8HI V4SI V2DI HI SI DI])
+
+;; All quad integer widen-able modes.
+(define_mode_iterator VQW [V16QI V8HI V4SI])
+
+;; Double vector modes for combines.
+(define_mode_iterator VDC [V8QI V4HI V2SI V2SF DI DF])
+
+;; Double vector modes for combines.
+(define_mode_iterator VDIC [V8QI V4HI V2SI])
+
+;; Double vector modes.
+(define_mode_iterator VD_RE [V8QI V4HI V2SI DI DF V2SF])
+
+;; Vector modes except double int.
+(define_mode_iterator VDQIF [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF V2DF])
+
+;; Vector modes for Q and H types.
+(define_mode_iterator VDQQH [V8QI V16QI V4HI V8HI])
+
+;; Vector modes for H and S types.
+(define_mode_iterator VDQHS [V4HI V8HI V2SI V4SI])
+
+;; Vector modes for Q, H and S types.
+(define_mode_iterator VDQQHS [V8QI V16QI V4HI V8HI V2SI V4SI])
+
+;; Vector and scalar integer modes for H and S
+(define_mode_iterator VSDQ_HSI [V4HI V8HI V2SI V4SI HI SI])
+
+;; Vector and scalar 64-bit container: 16, 32-bit integer modes
+(define_mode_iterator VSD_HSI [V4HI V2SI HI SI])
+
+;; Vector 64-bit container: 16, 32-bit integer modes
+(define_mode_iterator VD_HSI [V4HI V2SI])
+
+;; Scalar 64-bit container: 16, 32-bit integer modes
+(define_mode_iterator SD_HSI [HI SI])
+
+;; Vector 64-bit container: 16, 32-bit integer modes
+(define_mode_iterator VQ_HSI [V8HI V4SI])
+
+;; All byte modes.
+(define_mode_iterator VB [V8QI V16QI])
+
+(define_mode_iterator TX [TI TF])
+
+;; Opaque structure modes.
+(define_mode_iterator VSTRUCT [OI CI XI])
+
+;; Double scalar modes
+(define_mode_iterator DX [DI DF])
+
+;; Modes available for <f>mul lane operations.
+(define_mode_iterator VMUL [V4HI V8HI V2SI V4SI V2SF V4SF V2DF])
+
+;; Modes available for <f>mul lane operations changing lane count.
+(define_mode_iterator VMUL_CHANGE_NLANES [V4HI V8HI V2SI V4SI V2SF V4SF])
+
+;; ------------------------------------------------------------------
+;; Unspec enumerations for Advance SIMD. These could well go into
+;; aarch64.md but for their use in int_iterators here.
+;; ------------------------------------------------------------------
+
+(define_c_enum "unspec"
+ [
+ UNSPEC_ASHIFT_SIGNED ; Used in aarch-simd.md.
+ UNSPEC_ASHIFT_UNSIGNED ; Used in aarch64-simd.md.
+ UNSPEC_FMAX ; Used in aarch64-simd.md.
+ UNSPEC_FMAXNMV ; Used in aarch64-simd.md.
+ UNSPEC_FMAXV ; Used in aarch64-simd.md.
+ UNSPEC_FMIN ; Used in aarch64-simd.md.
+ UNSPEC_FMINNMV ; Used in aarch64-simd.md.
+ UNSPEC_FMINV ; Used in aarch64-simd.md.
+ UNSPEC_FADDV ; Used in aarch64-simd.md.
+ UNSPEC_SADDV ; Used in aarch64-simd.md.
+ UNSPEC_UADDV ; Used in aarch64-simd.md.
+ UNSPEC_SMAXV ; Used in aarch64-simd.md.
+ UNSPEC_SMINV ; Used in aarch64-simd.md.
+ UNSPEC_UMAXV ; Used in aarch64-simd.md.
+ UNSPEC_UMINV ; Used in aarch64-simd.md.
+ UNSPEC_SHADD ; Used in aarch64-simd.md.
+ UNSPEC_UHADD ; Used in aarch64-simd.md.
+ UNSPEC_SRHADD ; Used in aarch64-simd.md.
+ UNSPEC_URHADD ; Used in aarch64-simd.md.
+ UNSPEC_SHSUB ; Used in aarch64-simd.md.
+ UNSPEC_UHSUB ; Used in aarch64-simd.md.
+ UNSPEC_SRHSUB ; Used in aarch64-simd.md.
+ UNSPEC_URHSUB ; Used in aarch64-simd.md.
+ UNSPEC_ADDHN ; Used in aarch64-simd.md.
+ UNSPEC_RADDHN ; Used in aarch64-simd.md.
+ UNSPEC_SUBHN ; Used in aarch64-simd.md.
+ UNSPEC_RSUBHN ; Used in aarch64-simd.md.
+ UNSPEC_ADDHN2 ; Used in aarch64-simd.md.
+ UNSPEC_RADDHN2 ; Used in aarch64-simd.md.
+ UNSPEC_SUBHN2 ; Used in aarch64-simd.md.
+ UNSPEC_RSUBHN2 ; Used in aarch64-simd.md.
+ UNSPEC_SQDMULH ; Used in aarch64-simd.md.
+ UNSPEC_SQRDMULH ; Used in aarch64-simd.md.
+ UNSPEC_PMUL ; Used in aarch64-simd.md.
+ UNSPEC_USQADD ; Used in aarch64-simd.md.
+ UNSPEC_SUQADD ; Used in aarch64-simd.md.
+ UNSPEC_SQXTUN ; Used in aarch64-simd.md.
+ UNSPEC_SQXTN ; Used in aarch64-simd.md.
+ UNSPEC_UQXTN ; Used in aarch64-simd.md.
+ UNSPEC_SSRA ; Used in aarch64-simd.md.
+ UNSPEC_USRA ; Used in aarch64-simd.md.
+ UNSPEC_SRSRA ; Used in aarch64-simd.md.
+ UNSPEC_URSRA ; Used in aarch64-simd.md.
+ UNSPEC_SRSHR ; Used in aarch64-simd.md.
+ UNSPEC_URSHR ; Used in aarch64-simd.md.
+ UNSPEC_SQSHLU ; Used in aarch64-simd.md.
+ UNSPEC_SQSHL ; Used in aarch64-simd.md.
+ UNSPEC_UQSHL ; Used in aarch64-simd.md.
+ UNSPEC_SQSHRUN ; Used in aarch64-simd.md.
+ UNSPEC_SQRSHRUN ; Used in aarch64-simd.md.
+ UNSPEC_SQSHRN ; Used in aarch64-simd.md.
+ UNSPEC_UQSHRN ; Used in aarch64-simd.md.
+ UNSPEC_SQRSHRN ; Used in aarch64-simd.md.
+ UNSPEC_UQRSHRN ; Used in aarch64-simd.md.
+ UNSPEC_SSHL ; Used in aarch64-simd.md.
+ UNSPEC_USHL ; Used in aarch64-simd.md.
+ UNSPEC_SRSHL ; Used in aarch64-simd.md.
+ UNSPEC_URSHL ; Used in aarch64-simd.md.
+ UNSPEC_SQRSHL ; Used in aarch64-simd.md.
+ UNSPEC_UQRSHL ; Used in aarch64-simd.md.
+ UNSPEC_SSLI ; Used in aarch64-simd.md.
+ UNSPEC_USLI ; Used in aarch64-simd.md.
+ UNSPEC_SSRI ; Used in aarch64-simd.md.
+ UNSPEC_USRI ; Used in aarch64-simd.md.
+ UNSPEC_SSHLL ; Used in aarch64-simd.md.
+ UNSPEC_USHLL ; Used in aarch64-simd.md.
+ UNSPEC_ADDP ; Used in aarch64-simd.md.
+ UNSPEC_TBL ; Used in vector permute patterns.
+ UNSPEC_CONCAT ; Used in vector permute patterns.
+ UNSPEC_ZIP1 ; Used in vector permute patterns.
+ UNSPEC_ZIP2 ; Used in vector permute patterns.
+ UNSPEC_UZP1 ; Used in vector permute patterns.
+ UNSPEC_UZP2 ; Used in vector permute patterns.
+ UNSPEC_TRN1 ; Used in vector permute patterns.
+ UNSPEC_TRN2 ; Used in vector permute patterns.
+ UNSPEC_AESE ; Used in aarch64-simd.md.
+ UNSPEC_AESD ; Used in aarch64-simd.md.
+ UNSPEC_AESMC ; Used in aarch64-simd.md.
+ UNSPEC_AESIMC ; Used in aarch64-simd.md.
+ UNSPEC_SHA1C ; Used in aarch64-simd.md.
+ UNSPEC_SHA1M ; Used in aarch64-simd.md.
+ UNSPEC_SHA1P ; Used in aarch64-simd.md.
+ UNSPEC_SHA1H ; Used in aarch64-simd.md.
+ UNSPEC_SHA1SU0 ; Used in aarch64-simd.md.
+ UNSPEC_SHA1SU1 ; Used in aarch64-simd.md.
+ UNSPEC_SHA256H ; Used in aarch64-simd.md.
+ UNSPEC_SHA256H2 ; Used in aarch64-simd.md.
+ UNSPEC_SHA256SU0 ; Used in aarch64-simd.md.
+ UNSPEC_SHA256SU1 ; Used in aarch64-simd.md.
+ UNSPEC_PMULL ; Used in aarch64-simd.md.
+ UNSPEC_PMULL2 ; Used in aarch64-simd.md.
+])
+
+;; -------------------------------------------------------------------
+;; Mode attributes
+;; -------------------------------------------------------------------
+
+;; In GPI templates, a string like "%<w>0" will expand to "%w0" in the
+;; 32-bit version and "%x0" in the 64-bit version.
+(define_mode_attr w [(QI "w") (HI "w") (SI "w") (DI "x") (SF "s") (DF "d")])
+
+;; For constraints used in scalar immediate vector moves
+(define_mode_attr hq [(HI "h") (QI "q")])
+
+;; For scalar usage of vector/FP registers
+(define_mode_attr v [(QI "b") (HI "h") (SI "s") (DI "d")
+ (SF "s") (DF "d")
+ (V8QI "") (V16QI "")
+ (V4HI "") (V8HI "")
+ (V2SI "") (V4SI "")
+ (V2DI "") (V2SF "")
+ (V4SF "") (V2DF "")])
+
+;; For scalar usage of vector/FP registers, narrowing
+(define_mode_attr vn2 [(QI "") (HI "b") (SI "h") (DI "s")
+ (V8QI "") (V16QI "")
+ (V4HI "") (V8HI "")
+ (V2SI "") (V4SI "")
+ (V2DI "") (V2SF "")
+ (V4SF "") (V2DF "")])
+
+;; For scalar usage of vector/FP registers, widening
+(define_mode_attr vw2 [(DI "") (QI "h") (HI "s") (SI "d")
+ (V8QI "") (V16QI "")
+ (V4HI "") (V8HI "")
+ (V2SI "") (V4SI "")
+ (V2DI "") (V2SF "")
+ (V4SF "") (V2DF "")])
+
+;; Register Type Name and Vector Arrangement Specifier for when
+;; we are doing scalar for DI and SIMD for SI (ignoring all but
+;; lane 0).
+(define_mode_attr rtn [(DI "d") (SI "")])
+(define_mode_attr vas [(DI "") (SI ".2s")])
+
+;; Map a floating point mode to the appropriate register name prefix
+(define_mode_attr s [(SF "s") (DF "d")])
+
+;; Give the length suffix letter for a sign- or zero-extension.
+(define_mode_attr size [(QI "b") (HI "h") (SI "w")])
+
+;; Give the number of bits in the mode
+(define_mode_attr sizen [(QI "8") (HI "16") (SI "32") (DI "64")])
+
+;; Give the ordinal of the MSB in the mode
+(define_mode_attr sizem1 [(QI "#7") (HI "#15") (SI "#31") (DI "#63")])
+
+;; Attribute to describe constants acceptable in logical operations
+(define_mode_attr lconst [(SI "K") (DI "L")])
+
+;; Map a mode to a specific constraint character.
+(define_mode_attr cmode [(QI "q") (HI "h") (SI "s") (DI "d")])
+
+(define_mode_attr Vtype [(V8QI "8b") (V16QI "16b")
+ (V4HI "4h") (V8HI "8h")
+ (V2SI "2s") (V4SI "4s")
+ (DI "1d") (DF "1d")
+ (V2DI "2d") (V2SF "2s")
+ (V4SF "4s") (V2DF "2d")])
+
+(define_mode_attr Vmtype [(V8QI ".8b") (V16QI ".16b")
+ (V4HI ".4h") (V8HI ".8h")
+ (V2SI ".2s") (V4SI ".4s")
+ (V2DI ".2d") (V2SF ".2s")
+ (V4SF ".4s") (V2DF ".2d")
+ (DI "") (SI "")
+ (HI "") (QI "")
+ (TI "") (SF "")
+ (DF "")])
+
+;; Register suffix narrowed modes for VQN.
+(define_mode_attr Vmntype [(V8HI ".8b") (V4SI ".4h")
+ (V2DI ".2s")
+ (DI "") (SI "")
+ (HI "")])
+
+;; Mode-to-individual element type mapping.
+(define_mode_attr Vetype [(V8QI "b") (V16QI "b")
+ (V4HI "h") (V8HI "h")
+ (V2SI "s") (V4SI "s")
+ (V2DI "d") (V2SF "s")
+ (V4SF "s") (V2DF "d")
+ (SF "s") (DF "d")
+ (QI "b") (HI "h")
+ (SI "s") (DI "d")])
+
+;; Mode-to-bitwise operation type mapping.
+(define_mode_attr Vbtype [(V8QI "8b") (V16QI "16b")
+ (V4HI "8b") (V8HI "16b")
+ (V2SI "8b") (V4SI "16b")
+ (V2DI "16b") (V2SF "8b")
+ (V4SF "16b") (V2DF "16b")
+ (DI "8b") (DF "8b")])
+
+;; Define element mode for each vector mode.
+(define_mode_attr VEL [(V8QI "QI") (V16QI "QI")
+ (V4HI "HI") (V8HI "HI")
+ (V2SI "SI") (V4SI "SI")
+ (DI "DI") (V2DI "DI")
+ (V2SF "SF") (V4SF "SF")
+ (V2DF "DF") (DF "DF")
+ (SI "SI") (HI "HI")
+ (QI "QI")])
+
+;; Define container mode for lane selection.
+(define_mode_attr VCOND [(V4HI "V4HI") (V8HI "V4HI")
+ (V2SI "V2SI") (V4SI "V2SI")
+ (DI "DI") (V2DI "DI")
+ (V2SF "V2SF") (V4SF "V2SF")
+ (V2DF "DF")])
+
+;; Define container mode for lane selection.
+(define_mode_attr VCONQ [(V8QI "V16QI") (V16QI "V16QI")
+ (V4HI "V8HI") (V8HI "V8HI")
+ (V2SI "V4SI") (V4SI "V4SI")
+ (DI "V2DI") (V2DI "V2DI")
+ (V2SF "V2SF") (V4SF "V4SF")
+ (V2DF "V2DF") (SI "V4SI")
+ (HI "V8HI") (QI "V16QI")])
+
+;; Define container mode for lane selection.
+(define_mode_attr VCON [(V8QI "V16QI") (V16QI "V16QI")
+ (V4HI "V8HI") (V8HI "V8HI")
+ (V2SI "V4SI") (V4SI "V4SI")
+ (DI "V2DI") (V2DI "V2DI")
+ (V2SF "V4SF") (V4SF "V4SF")
+ (V2DF "V2DF") (SI "V4SI")
+ (HI "V8HI") (QI "V16QI")])
+
+;; Half modes of all vector modes.
+(define_mode_attr VHALF [(V8QI "V4QI") (V16QI "V8QI")
+ (V4HI "V2HI") (V8HI "V4HI")
+ (V2SI "SI") (V4SI "V2SI")
+ (V2DI "DI") (V2SF "SF")
+ (V4SF "V2SF") (V2DF "DF")])
+
+;; Double modes of vector modes.
+(define_mode_attr VDBL [(V8QI "V16QI") (V4HI "V8HI")
+ (V2SI "V4SI") (V2SF "V4SF")
+ (SI "V2SI") (DI "V2DI")
+ (DF "V2DF")])
+
+;; Double modes of vector modes (lower case).
+(define_mode_attr Vdbl [(V8QI "v16qi") (V4HI "v8hi")
+ (V2SI "v4si") (V2SF "v4sf")
+ (SI "v2si") (DI "v2di")
+ (DF "v2df")])
+
+;; Narrowed modes for VDN.
+(define_mode_attr VNARROWD [(V4HI "V8QI") (V2SI "V4HI")
+ (DI "V2SI")])
+
+;; Narrowed double-modes for VQN (Used for XTN).
+(define_mode_attr VNARROWQ [(V8HI "V8QI") (V4SI "V4HI")
+ (V2DI "V2SI")
+ (DI "SI") (SI "HI")
+ (HI "QI")])
+
+;; Narrowed quad-modes for VQN (Used for XTN2).
+(define_mode_attr VNARROWQ2 [(V8HI "V16QI") (V4SI "V8HI")
+ (V2DI "V4SI")])
+
+;; Register suffix narrowed modes for VQN.
+(define_mode_attr Vntype [(V8HI "8b") (V4SI "4h")
+ (V2DI "2s")])
+
+;; Register suffix narrowed modes for VQN.
+(define_mode_attr V2ntype [(V8HI "16b") (V4SI "8h")
+ (V2DI "4s")])
+
+;; Widened modes of vector modes.
+(define_mode_attr VWIDE [(V8QI "V8HI") (V4HI "V4SI")
+ (V2SI "V2DI") (V16QI "V8HI")
+ (V8HI "V4SI") (V4SI "V2DI")
+ (HI "SI") (SI "DI")]
+
+)
+
+;; Widened mode register suffixes for VDW/VQW.
+(define_mode_attr Vwtype [(V8QI "8h") (V4HI "4s")
+ (V2SI "2d") (V16QI "8h")
+ (V8HI "4s") (V4SI "2d")])
+
+;; Widened mode register suffixes for VDW/VQW.
+(define_mode_attr Vmwtype [(V8QI ".8h") (V4HI ".4s")
+ (V2SI ".2d") (V16QI ".8h")
+ (V8HI ".4s") (V4SI ".2d")
+ (SI "") (HI "")])
+
+;; Lower part register suffixes for VQW.
+(define_mode_attr Vhalftype [(V16QI "8b") (V8HI "4h")
+ (V4SI "2s")])
+
+;; Define corresponding core/FP element mode for each vector mode.
+(define_mode_attr vw [(V8QI "w") (V16QI "w")
+ (V4HI "w") (V8HI "w")
+ (V2SI "w") (V4SI "w")
+ (DI "x") (V2DI "x")
+ (V2SF "s") (V4SF "s")
+ (V2DF "d")])
+
+;; Corresponding core element mode for each vector mode. This is a
+;; variation on <vw> mapping FP modes to GP regs.
+(define_mode_attr vwcore [(V8QI "w") (V16QI "w")
+ (V4HI "w") (V8HI "w")
+ (V2SI "w") (V4SI "w")
+ (DI "x") (V2DI "x")
+ (V2SF "w") (V4SF "w")
+ (V2DF "x")])
+
+;; Double vector types for ALLX.
+(define_mode_attr Vallxd [(QI "8b") (HI "4h") (SI "2s")])
+
+;; Mode of result of comparison operations.
+(define_mode_attr V_cmp_result [(V8QI "V8QI") (V16QI "V16QI")
+ (V4HI "V4HI") (V8HI "V8HI")
+ (V2SI "V2SI") (V4SI "V4SI")
+ (DI "DI") (V2DI "V2DI")
+ (V2SF "V2SI") (V4SF "V4SI")
+ (V2DF "V2DI") (DF "DI")
+ (SF "SI")])
+
+;; Lower case mode of results of comparison operations.
+(define_mode_attr v_cmp_result [(V8QI "v8qi") (V16QI "v16qi")
+ (V4HI "v4hi") (V8HI "v8hi")
+ (V2SI "v2si") (V4SI "v4si")
+ (DI "di") (V2DI "v2di")
+ (V2SF "v2si") (V4SF "v4si")
+ (V2DF "v2di") (DF "di")
+ (SF "si")])
+
+;; Vm for lane instructions is restricted to FP_LO_REGS.
+(define_mode_attr vwx [(V4HI "x") (V8HI "x") (HI "x")
+ (V2SI "w") (V4SI "w") (SI "w")])
+
+(define_mode_attr Vendreg [(OI "T") (CI "U") (XI "V")])
+
+(define_mode_attr nregs [(OI "2") (CI "3") (XI "4")])
+
+(define_mode_attr VRL2 [(V8QI "V32QI") (V4HI "V16HI")
+ (V2SI "V8SI") (V2SF "V8SF")
+ (DI "V4DI") (DF "V4DF")
+ (V16QI "V32QI") (V8HI "V16HI")
+ (V4SI "V8SI") (V4SF "V8SF")
+ (V2DI "V4DI") (V2DF "V4DF")])
+
+(define_mode_attr VRL3 [(V8QI "V48QI") (V4HI "V24HI")
+ (V2SI "V12SI") (V2SF "V12SF")
+ (DI "V6DI") (DF "V6DF")
+ (V16QI "V48QI") (V8HI "V24HI")
+ (V4SI "V12SI") (V4SF "V12SF")
+ (V2DI "V6DI") (V2DF "V6DF")])
+
+(define_mode_attr VRL4 [(V8QI "V64QI") (V4HI "V32HI")
+ (V2SI "V16SI") (V2SF "V16SF")
+ (DI "V8DI") (DF "V8DF")
+ (V16QI "V64QI") (V8HI "V32HI")
+ (V4SI "V16SI") (V4SF "V16SF")
+ (V2DI "V8DI") (V2DF "V8DF")])
+
+(define_mode_attr VSTRUCT_DREG [(OI "TI") (CI "EI") (XI "OI")])
+
+;; Mode for atomic operation suffixes
+(define_mode_attr atomic_sfx
+ [(QI "b") (HI "h") (SI "") (DI "")])
+
+(define_mode_attr fcvt_target [(V2DF "v2di") (V4SF "v4si") (V2SF "v2si")])
+(define_mode_attr FCVT_TARGET [(V2DF "V2DI") (V4SF "V4SI") (V2SF "V2SI")])
+
+(define_mode_attr VSWAP_WIDTH [(V8QI "V16QI") (V16QI "V8QI")
+ (V4HI "V8HI") (V8HI "V4HI")
+ (V2SI "V4SI") (V4SI "V2SI")
+ (DI "V2DI") (V2DI "DI")
+ (V2SF "V4SF") (V4SF "V2SF")
+ (DF "V2DF") (V2DF "DF")])
+
+(define_mode_attr vswap_width_name [(V8QI "to_128") (V16QI "to_64")
+ (V4HI "to_128") (V8HI "to_64")
+ (V2SI "to_128") (V4SI "to_64")
+ (DI "to_128") (V2DI "to_64")
+ (V2SF "to_128") (V4SF "to_64")
+ (DF "to_128") (V2DF "to_64")])
+
+;; For certain vector-by-element multiplication instructions we must
+;; constrain the HI cases to use only V0-V15. This is covered by
+;; the 'x' constraint. All other modes may use the 'w' constraint.
+(define_mode_attr h_con [(V2SI "w") (V4SI "w")
+ (V4HI "x") (V8HI "x")
+ (V2SF "w") (V4SF "w")
+ (V2DF "w") (DF "w")])
+
+;; Defined to 'f' for types whose element type is a float type.
+(define_mode_attr f [(V8QI "") (V16QI "")
+ (V4HI "") (V8HI "")
+ (V2SI "") (V4SI "")
+ (DI "") (V2DI "")
+ (V2SF "f") (V4SF "f")
+ (V2DF "f") (DF "f")])
+
+;; Defined to '_fp' for types whose element type is a float type.
+(define_mode_attr fp [(V8QI "") (V16QI "")
+ (V4HI "") (V8HI "")
+ (V2SI "") (V4SI "")
+ (DI "") (V2DI "")
+ (V2SF "_fp") (V4SF "_fp")
+ (V2DF "_fp") (DF "_fp")
+ (SF "_fp")])
+
+;; Defined to '_q' for 128-bit types.
+(define_mode_attr q [(V8QI "") (V16QI "_q")
+ (V4HI "") (V8HI "_q")
+ (V2SI "") (V4SI "_q")
+ (DI "") (V2DI "_q")
+ (V2SF "") (V4SF "_q")
+ (V2DF "_q")
+ (QI "") (HI "") (SI "") (DI "") (SF "") (DF "")])
+
+(define_mode_attr vp [(V8QI "v") (V16QI "v")
+ (V4HI "v") (V8HI "v")
+ (V2SI "p") (V4SI "v")
+ (V2DI "p") (V2DF "p")
+ (V2SF "p") (V4SF "v")])
+
+;; -------------------------------------------------------------------
+;; Code Iterators
+;; -------------------------------------------------------------------
+
+;; This code iterator allows the various shifts supported on the core
+(define_code_iterator SHIFT [ashift ashiftrt lshiftrt rotatert])
+
+;; This code iterator allows the shifts supported in arithmetic instructions
+(define_code_iterator ASHIFT [ashift ashiftrt lshiftrt])
+
+;; Code iterator for logical operations
+(define_code_iterator LOGICAL [and ior xor])
+
+;; Code iterator for sign/zero extension
+(define_code_iterator ANY_EXTEND [sign_extend zero_extend])
+
+;; All division operations (signed/unsigned)
+(define_code_iterator ANY_DIV [div udiv])
+
+;; Code iterator for sign/zero extraction
+(define_code_iterator ANY_EXTRACT [sign_extract zero_extract])
+
+;; Code iterator for equality comparisons
+(define_code_iterator EQL [eq ne])
+
+;; Code iterator for less-than and greater/equal-to
+(define_code_iterator LTGE [lt ge])
+
+;; Iterator for __sync_<op> operations that where the operation can be
+;; represented directly RTL. This is all of the sync operations bar
+;; nand.
+(define_code_iterator atomic_op [plus minus ior xor and])
+
+;; Iterator for integer conversions
+(define_code_iterator FIXUORS [fix unsigned_fix])
+
+;; Iterator for float conversions
+(define_code_iterator FLOATUORS [float unsigned_float])
+
+;; Code iterator for variants of vector max and min.
+(define_code_iterator MAXMIN [smax smin umax umin])
+
+(define_code_iterator FMAXMIN [smax smin])
+
+;; Code iterator for variants of vector max and min.
+(define_code_iterator ADDSUB [plus minus])
+
+;; Code iterator for variants of vector saturating binary ops.
+(define_code_iterator BINQOPS [ss_plus us_plus ss_minus us_minus])
+
+;; Code iterator for variants of vector saturating unary ops.
+(define_code_iterator UNQOPS [ss_neg ss_abs])
+
+;; Code iterator for signed variants of vector saturating binary ops.
+(define_code_iterator SBINQOPS [ss_plus ss_minus])
+
+;; Comparison operators for <F>CM.
+(define_code_iterator COMPARISONS [lt le eq ge gt])
+
+;; Unsigned comparison operators.
+(define_code_iterator UCOMPARISONS [ltu leu geu gtu])
+
+;; Unsigned comparison operators.
+(define_code_iterator FAC_COMPARISONS [lt le ge gt])
+
+;; -------------------------------------------------------------------
+;; Code Attributes
+;; -------------------------------------------------------------------
+;; Map rtl objects to optab names
+(define_code_attr optab [(ashift "ashl")
+ (ashiftrt "ashr")
+ (lshiftrt "lshr")
+ (rotatert "rotr")
+ (sign_extend "extend")
+ (zero_extend "zero_extend")
+ (sign_extract "extv")
+ (zero_extract "extzv")
+ (fix "fix")
+ (unsigned_fix "fixuns")
+ (float "float")
+ (unsigned_float "floatuns")
+ (and "and")
+ (ior "ior")
+ (xor "xor")
+ (not "one_cmpl")
+ (neg "neg")
+ (plus "add")
+ (minus "sub")
+ (ss_plus "qadd")
+ (us_plus "qadd")
+ (ss_minus "qsub")
+ (us_minus "qsub")
+ (ss_neg "qneg")
+ (ss_abs "qabs")
+ (eq "eq")
+ (ne "ne")
+ (lt "lt")
+ (ge "ge")
+ (le "le")
+ (gt "gt")
+ (ltu "ltu")
+ (leu "leu")
+ (geu "geu")
+ (gtu "gtu")])
+
+;; For comparison operators we use the FCM* and CM* instructions.
+;; As there are no CMLE or CMLT instructions which act on 3 vector
+;; operands, we must use CMGE or CMGT and swap the order of the
+;; source operands.
+
+(define_code_attr n_optab [(lt "gt") (le "ge") (eq "eq") (ge "ge") (gt "gt")
+ (ltu "hi") (leu "hs") (geu "hs") (gtu "hi")])
+(define_code_attr cmp_1 [(lt "2") (le "2") (eq "1") (ge "1") (gt "1")
+ (ltu "2") (leu "2") (geu "1") (gtu "1")])
+(define_code_attr cmp_2 [(lt "1") (le "1") (eq "2") (ge "2") (gt "2")
+ (ltu "1") (leu "1") (geu "2") (gtu "2")])
+
+(define_code_attr CMP [(lt "LT") (le "LE") (eq "EQ") (ge "GE") (gt "GT")
+ (ltu "LTU") (leu "LEU") (geu "GEU") (gtu "GTU")])
+
+(define_code_attr fix_trunc_optab [(fix "fix_trunc")
+ (unsigned_fix "fixuns_trunc")])
+
+;; Optab prefix for sign/zero-extending operations
+(define_code_attr su_optab [(sign_extend "") (zero_extend "u")
+ (div "") (udiv "u")
+ (fix "") (unsigned_fix "u")
+ (float "s") (unsigned_float "u")
+ (ss_plus "s") (us_plus "u")
+ (ss_minus "s") (us_minus "u")])
+
+;; Similar for the instruction mnemonics
+(define_code_attr shift [(ashift "lsl") (ashiftrt "asr")
+ (lshiftrt "lsr") (rotatert "ror")])
+
+;; Map shift operators onto underlying bit-field instructions
+(define_code_attr bfshift [(ashift "ubfiz") (ashiftrt "sbfx")
+ (lshiftrt "ubfx") (rotatert "extr")])
+
+;; Logical operator instruction mnemonics
+(define_code_attr logical [(and "and") (ior "orr") (xor "eor")])
+
+;; Similar, but when not(op)
+(define_code_attr nlogical [(and "bic") (ior "orn") (xor "eon")])
+
+;; Sign- or zero-extending load
+(define_code_attr ldrxt [(sign_extend "ldrs") (zero_extend "ldr")])
+
+;; Sign- or zero-extending data-op
+(define_code_attr su [(sign_extend "s") (zero_extend "u")
+ (sign_extract "s") (zero_extract "u")
+ (fix "s") (unsigned_fix "u")
+ (div "s") (udiv "u")
+ (smax "s") (umax "u")
+ (smin "s") (umin "u")])
+
+;; Emit cbz/cbnz depending on comparison type.
+(define_code_attr cbz [(eq "cbz") (ne "cbnz") (lt "cbnz") (ge "cbz")])
+
+;; Emit tbz/tbnz depending on comparison type.
+(define_code_attr tbz [(eq "tbz") (ne "tbnz") (lt "tbnz") (ge "tbz")])
+
+;; Max/min attributes.
+(define_code_attr maxmin [(smax "max")
+ (smin "min")
+ (umax "max")
+ (umin "min")])
+
+;; MLA/MLS attributes.
+(define_code_attr as [(ss_plus "a") (ss_minus "s")])
+
+;; Atomic operations
+(define_code_attr atomic_optab
+ [(ior "or") (xor "xor") (and "and") (plus "add") (minus "sub")])
+
+(define_code_attr atomic_op_operand
+ [(ior "aarch64_logical_operand")
+ (xor "aarch64_logical_operand")
+ (and "aarch64_logical_operand")
+ (plus "aarch64_plus_operand")
+ (minus "aarch64_plus_operand")])
+
+;; -------------------------------------------------------------------
+;; Int Iterators.
+;; -------------------------------------------------------------------
+(define_int_iterator MAXMINV [UNSPEC_UMAXV UNSPEC_UMINV
+ UNSPEC_SMAXV UNSPEC_SMINV])
+
+(define_int_iterator FMAXMINV [UNSPEC_FMAXV UNSPEC_FMINV
+ UNSPEC_FMAXNMV UNSPEC_FMINNMV])
+
+(define_int_iterator SUADDV [UNSPEC_SADDV UNSPEC_UADDV])
+
+(define_int_iterator HADDSUB [UNSPEC_SHADD UNSPEC_UHADD
+ UNSPEC_SRHADD UNSPEC_URHADD
+ UNSPEC_SHSUB UNSPEC_UHSUB
+ UNSPEC_SRHSUB UNSPEC_URHSUB])
+
+
+(define_int_iterator ADDSUBHN [UNSPEC_ADDHN UNSPEC_RADDHN
+ UNSPEC_SUBHN UNSPEC_RSUBHN])
+
+(define_int_iterator ADDSUBHN2 [UNSPEC_ADDHN2 UNSPEC_RADDHN2
+ UNSPEC_SUBHN2 UNSPEC_RSUBHN2])
+
+(define_int_iterator FMAXMIN_UNS [UNSPEC_FMAX UNSPEC_FMIN])
+
+(define_int_iterator VQDMULH [UNSPEC_SQDMULH UNSPEC_SQRDMULH])
+
+(define_int_iterator USSUQADD [UNSPEC_SUQADD UNSPEC_USQADD])
+
+(define_int_iterator SUQMOVN [UNSPEC_SQXTN UNSPEC_UQXTN])
+
+(define_int_iterator VSHL [UNSPEC_SSHL UNSPEC_USHL
+ UNSPEC_SRSHL UNSPEC_URSHL])
+
+(define_int_iterator VSHLL [UNSPEC_SSHLL UNSPEC_USHLL])
+
+(define_int_iterator VQSHL [UNSPEC_SQSHL UNSPEC_UQSHL
+ UNSPEC_SQRSHL UNSPEC_UQRSHL])
+
+(define_int_iterator VSRA [UNSPEC_SSRA UNSPEC_USRA
+ UNSPEC_SRSRA UNSPEC_URSRA])
+
+(define_int_iterator VSLRI [UNSPEC_SSLI UNSPEC_USLI
+ UNSPEC_SSRI UNSPEC_USRI])
+
+
+(define_int_iterator VRSHR_N [UNSPEC_SRSHR UNSPEC_URSHR])
+
+(define_int_iterator VQSHL_N [UNSPEC_SQSHLU UNSPEC_SQSHL UNSPEC_UQSHL])
+
+(define_int_iterator VQSHRN_N [UNSPEC_SQSHRUN UNSPEC_SQRSHRUN
+ UNSPEC_SQSHRN UNSPEC_UQSHRN
+ UNSPEC_SQRSHRN UNSPEC_UQRSHRN])
+
+(define_int_iterator PERMUTE [UNSPEC_ZIP1 UNSPEC_ZIP2
+ UNSPEC_TRN1 UNSPEC_TRN2
+ UNSPEC_UZP1 UNSPEC_UZP2])
+
+(define_int_iterator FRINT [UNSPEC_FRINTZ UNSPEC_FRINTP UNSPEC_FRINTM
+ UNSPEC_FRINTN UNSPEC_FRINTI UNSPEC_FRINTX
+ UNSPEC_FRINTA])
+
+(define_int_iterator FCVT [UNSPEC_FRINTZ UNSPEC_FRINTP UNSPEC_FRINTM
+ UNSPEC_FRINTA UNSPEC_FRINTN])
+
+(define_int_iterator FRECP [UNSPEC_FRECPE UNSPEC_FRECPX])
+
+(define_int_iterator CRYPTO_AES [UNSPEC_AESE UNSPEC_AESD])
+(define_int_iterator CRYPTO_AESMC [UNSPEC_AESMC UNSPEC_AESIMC])
+
+(define_int_iterator CRYPTO_SHA1 [UNSPEC_SHA1C UNSPEC_SHA1M UNSPEC_SHA1P])
+
+(define_int_iterator CRYPTO_SHA256 [UNSPEC_SHA256H UNSPEC_SHA256H2])
+
+;; -------------------------------------------------------------------
+;; Int Iterators Attributes.
+;; -------------------------------------------------------------------
+(define_int_attr maxmin_uns [(UNSPEC_UMAXV "umax")
+ (UNSPEC_UMINV "umin")
+ (UNSPEC_SMAXV "smax")
+ (UNSPEC_SMINV "smin")
+ (UNSPEC_FMAX "smax_nan")
+ (UNSPEC_FMAXNMV "smax")
+ (UNSPEC_FMAXV "smax_nan")
+ (UNSPEC_FMIN "smin_nan")
+ (UNSPEC_FMINNMV "smin")
+ (UNSPEC_FMINV "smin_nan")])
+
+(define_int_attr maxmin_uns_op [(UNSPEC_UMAXV "umax")
+ (UNSPEC_UMINV "umin")
+ (UNSPEC_SMAXV "smax")
+ (UNSPEC_SMINV "smin")
+ (UNSPEC_FMAX "fmax")
+ (UNSPEC_FMAXNMV "fmaxnm")
+ (UNSPEC_FMAXV "fmax")
+ (UNSPEC_FMIN "fmin")
+ (UNSPEC_FMINNMV "fminnm")
+ (UNSPEC_FMINV "fmin")])
+
+(define_int_attr sur [(UNSPEC_SHADD "s") (UNSPEC_UHADD "u")
+ (UNSPEC_SRHADD "sr") (UNSPEC_URHADD "ur")
+ (UNSPEC_SHSUB "s") (UNSPEC_UHSUB "u")
+ (UNSPEC_SRHSUB "sr") (UNSPEC_URHSUB "ur")
+ (UNSPEC_ADDHN "") (UNSPEC_RADDHN "r")
+ (UNSPEC_SUBHN "") (UNSPEC_RSUBHN "r")
+ (UNSPEC_ADDHN2 "") (UNSPEC_RADDHN2 "r")
+ (UNSPEC_SUBHN2 "") (UNSPEC_RSUBHN2 "r")
+ (UNSPEC_SQXTN "s") (UNSPEC_UQXTN "u")
+ (UNSPEC_USQADD "us") (UNSPEC_SUQADD "su")
+ (UNSPEC_SADDV "s") (UNSPEC_UADDV "u")
+ (UNSPEC_SSLI "s") (UNSPEC_USLI "u")
+ (UNSPEC_SSRI "s") (UNSPEC_USRI "u")
+ (UNSPEC_USRA "u") (UNSPEC_SSRA "s")
+ (UNSPEC_URSRA "ur") (UNSPEC_SRSRA "sr")
+ (UNSPEC_URSHR "ur") (UNSPEC_SRSHR "sr")
+ (UNSPEC_SQSHLU "s") (UNSPEC_SQSHL "s")
+ (UNSPEC_UQSHL "u")
+ (UNSPEC_SQSHRUN "s") (UNSPEC_SQRSHRUN "s")
+ (UNSPEC_SQSHRN "s") (UNSPEC_UQSHRN "u")
+ (UNSPEC_SQRSHRN "s") (UNSPEC_UQRSHRN "u")
+ (UNSPEC_USHL "u") (UNSPEC_SSHL "s")
+ (UNSPEC_USHLL "u") (UNSPEC_SSHLL "s")
+ (UNSPEC_URSHL "ur") (UNSPEC_SRSHL "sr")
+ (UNSPEC_UQRSHL "u") (UNSPEC_SQRSHL "s")
+])
+
+(define_int_attr r [(UNSPEC_SQDMULH "") (UNSPEC_SQRDMULH "r")
+ (UNSPEC_SQSHRUN "") (UNSPEC_SQRSHRUN "r")
+ (UNSPEC_SQSHRN "") (UNSPEC_UQSHRN "")
+ (UNSPEC_SQRSHRN "r") (UNSPEC_UQRSHRN "r")
+ (UNSPEC_SQSHL "") (UNSPEC_UQSHL "")
+ (UNSPEC_SQRSHL "r")(UNSPEC_UQRSHL "r")
+])
+
+(define_int_attr lr [(UNSPEC_SSLI "l") (UNSPEC_USLI "l")
+ (UNSPEC_SSRI "r") (UNSPEC_USRI "r")])
+
+(define_int_attr u [(UNSPEC_SQSHLU "u") (UNSPEC_SQSHL "") (UNSPEC_UQSHL "")
+ (UNSPEC_SQSHRUN "u") (UNSPEC_SQRSHRUN "u")
+ (UNSPEC_SQSHRN "") (UNSPEC_UQSHRN "")
+ (UNSPEC_SQRSHRN "") (UNSPEC_UQRSHRN "")])
+
+(define_int_attr addsub [(UNSPEC_SHADD "add")
+ (UNSPEC_UHADD "add")
+ (UNSPEC_SRHADD "add")
+ (UNSPEC_URHADD "add")
+ (UNSPEC_SHSUB "sub")
+ (UNSPEC_UHSUB "sub")
+ (UNSPEC_SRHSUB "sub")
+ (UNSPEC_URHSUB "sub")
+ (UNSPEC_ADDHN "add")
+ (UNSPEC_SUBHN "sub")
+ (UNSPEC_RADDHN "add")
+ (UNSPEC_RSUBHN "sub")
+ (UNSPEC_ADDHN2 "add")
+ (UNSPEC_SUBHN2 "sub")
+ (UNSPEC_RADDHN2 "add")
+ (UNSPEC_RSUBHN2 "sub")])
+
+(define_int_attr offsetlr [(UNSPEC_SSLI "1") (UNSPEC_USLI "1")
+ (UNSPEC_SSRI "0") (UNSPEC_USRI "0")])
+
+;; Standard pattern names for floating-point rounding instructions.
+(define_int_attr frint_pattern [(UNSPEC_FRINTZ "btrunc")
+ (UNSPEC_FRINTP "ceil")
+ (UNSPEC_FRINTM "floor")
+ (UNSPEC_FRINTI "nearbyint")
+ (UNSPEC_FRINTX "rint")
+ (UNSPEC_FRINTA "round")
+ (UNSPEC_FRINTN "frintn")])
+
+;; frint suffix for floating-point rounding instructions.
+(define_int_attr frint_suffix [(UNSPEC_FRINTZ "z") (UNSPEC_FRINTP "p")
+ (UNSPEC_FRINTM "m") (UNSPEC_FRINTI "i")
+ (UNSPEC_FRINTX "x") (UNSPEC_FRINTA "a")
+ (UNSPEC_FRINTN "n")])
+
+(define_int_attr fcvt_pattern [(UNSPEC_FRINTZ "btrunc") (UNSPEC_FRINTA "round")
+ (UNSPEC_FRINTP "ceil") (UNSPEC_FRINTM "floor")
+ (UNSPEC_FRINTN "frintn")])
+
+(define_int_attr perm_insn [(UNSPEC_ZIP1 "zip") (UNSPEC_ZIP2 "zip")
+ (UNSPEC_TRN1 "trn") (UNSPEC_TRN2 "trn")
+ (UNSPEC_UZP1 "uzp") (UNSPEC_UZP2 "uzp")])
+
+(define_int_attr perm_hilo [(UNSPEC_ZIP1 "1") (UNSPEC_ZIP2 "2")
+ (UNSPEC_TRN1 "1") (UNSPEC_TRN2 "2")
+ (UNSPEC_UZP1 "1") (UNSPEC_UZP2 "2")])
+
+(define_int_attr frecp_suffix [(UNSPEC_FRECPE "e") (UNSPEC_FRECPX "x")])
+
+(define_int_attr aes_op [(UNSPEC_AESE "e") (UNSPEC_AESD "d")])
+(define_int_attr aesmc_op [(UNSPEC_AESMC "mc") (UNSPEC_AESIMC "imc")])
+
+(define_int_attr sha1_op [(UNSPEC_SHA1C "c") (UNSPEC_SHA1P "p")
+ (UNSPEC_SHA1M "m")])
+
+(define_int_attr sha256_op [(UNSPEC_SHA256H "") (UNSPEC_SHA256H2 "2")])
diff --git a/gcc-4.9/gcc/config/aarch64/predicates.md b/gcc-4.9/gcc/config/aarch64/predicates.md
new file mode 100644
index 000000000..c8e27d871
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/predicates.md
@@ -0,0 +1,302 @@
+;; Machine description for AArch64 architecture.
+;; Copyright (C) 2009-2014 Free Software Foundation, Inc.
+;; Contributed by ARM Ltd.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_special_predicate "cc_register"
+ (and (match_code "reg")
+ (and (match_test "REGNO (op) == CC_REGNUM")
+ (ior (match_test "mode == GET_MODE (op)")
+ (match_test "mode == VOIDmode
+ && GET_MODE_CLASS (GET_MODE (op)) == MODE_CC"))))
+)
+
+(define_predicate "aarch64_simd_register"
+ (and (match_code "reg")
+ (ior (match_test "REGNO_REG_CLASS (REGNO (op)) == FP_LO_REGS")
+ (match_test "REGNO_REG_CLASS (REGNO (op)) == FP_REGS"))))
+
+(define_predicate "aarch64_reg_or_zero"
+ (and (match_code "reg,subreg,const_int")
+ (ior (match_operand 0 "register_operand")
+ (match_test "op == const0_rtx"))))
+
+(define_predicate "aarch64_reg_or_fp_zero"
+ (and (match_code "reg,subreg,const_double")
+ (ior (match_operand 0 "register_operand")
+ (match_test "aarch64_float_const_zero_rtx_p (op)"))))
+
+(define_predicate "aarch64_reg_zero_or_m1_or_1"
+ (and (match_code "reg,subreg,const_int")
+ (ior (match_operand 0 "register_operand")
+ (ior (match_test "op == const0_rtx")
+ (ior (match_test "op == constm1_rtx")
+ (match_test "op == const1_rtx"))))))
+
+(define_predicate "aarch64_fp_compare_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_code "const_double")
+ (match_test "aarch64_float_const_zero_rtx_p (op)"))))
+
+(define_predicate "aarch64_plus_immediate"
+ (and (match_code "const_int")
+ (ior (match_test "aarch64_uimm12_shift (INTVAL (op))")
+ (match_test "aarch64_uimm12_shift (-INTVAL (op))"))))
+
+(define_predicate "aarch64_plus_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_plus_immediate")))
+
+(define_predicate "aarch64_pluslong_immediate"
+ (and (match_code "const_int")
+ (match_test "(INTVAL (op) < 0xffffff && INTVAL (op) > -0xffffff)")))
+
+(define_predicate "aarch64_pluslong_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_pluslong_immediate")))
+
+(define_predicate "aarch64_logical_immediate"
+ (and (match_code "const_int")
+ (match_test "aarch64_bitmask_imm (INTVAL (op), mode)")))
+
+(define_predicate "aarch64_logical_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_logical_immediate")))
+
+(define_predicate "aarch64_shift_imm_si"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) < 32")))
+
+(define_predicate "aarch64_shift_imm_di"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) < 64")))
+
+(define_predicate "aarch64_shift_imm64_di"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) <= 64")))
+
+(define_predicate "aarch64_reg_or_shift_imm_si"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_shift_imm_si")))
+
+(define_predicate "aarch64_reg_or_shift_imm_di"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "aarch64_shift_imm_di")))
+
+;; The imm3 field is a 3-bit field that only accepts immediates in the
+;; range 0..4.
+(define_predicate "aarch64_imm3"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) <= 4")))
+
+(define_predicate "aarch64_pwr_imm3"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) != 0
+ && (unsigned) exact_log2 (INTVAL (op)) <= 4")))
+
+(define_predicate "aarch64_pwr_2_si"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) != 0
+ && (unsigned) exact_log2 (INTVAL (op)) < 32")))
+
+(define_predicate "aarch64_pwr_2_di"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) != 0
+ && (unsigned) exact_log2 (INTVAL (op)) < 64")))
+
+(define_predicate "aarch64_mem_pair_operand"
+ (and (match_code "mem")
+ (match_test "aarch64_legitimate_address_p (mode, XEXP (op, 0), PARALLEL,
+ 0)")))
+
+(define_predicate "aarch64_valid_symref"
+ (match_code "const, symbol_ref, label_ref")
+{
+ return (aarch64_classify_symbolic_expression (op, SYMBOL_CONTEXT_ADR)
+ != SYMBOL_FORCE_TO_MEM);
+})
+
+(define_predicate "aarch64_tls_ie_symref"
+ (match_code "const, symbol_ref, label_ref")
+{
+ switch (GET_CODE (op))
+ {
+ case CONST:
+ op = XEXP (op, 0);
+ if (GET_CODE (op) != PLUS
+ || GET_CODE (XEXP (op, 0)) != SYMBOL_REF
+ || GET_CODE (XEXP (op, 1)) != CONST_INT)
+ return false;
+ op = XEXP (op, 0);
+
+ case SYMBOL_REF:
+ return SYMBOL_REF_TLS_MODEL (op) == TLS_MODEL_INITIAL_EXEC;
+
+ default:
+ gcc_unreachable ();
+ }
+})
+
+(define_predicate "aarch64_tls_le_symref"
+ (match_code "const, symbol_ref, label_ref")
+{
+ switch (GET_CODE (op))
+ {
+ case CONST:
+ op = XEXP (op, 0);
+ if (GET_CODE (op) != PLUS
+ || GET_CODE (XEXP (op, 0)) != SYMBOL_REF
+ || GET_CODE (XEXP (op, 1)) != CONST_INT)
+ return false;
+ op = XEXP (op, 0);
+
+ case SYMBOL_REF:
+ return SYMBOL_REF_TLS_MODEL (op) == TLS_MODEL_LOCAL_EXEC;
+
+ default:
+ gcc_unreachable ();
+ }
+})
+
+(define_predicate "aarch64_mov_operand"
+ (and (match_code "reg,subreg,mem,const,const_int,symbol_ref,label_ref,high")
+ (ior (match_operand 0 "register_operand")
+ (ior (match_operand 0 "memory_operand")
+ (match_test "aarch64_mov_operand_p (op, SYMBOL_CONTEXT_ADR, mode)")))))
+
+(define_predicate "aarch64_movti_operand"
+ (and (match_code "reg,subreg,mem,const_int")
+ (ior (match_operand 0 "register_operand")
+ (ior (match_operand 0 "memory_operand")
+ (match_operand 0 "const_int_operand")))))
+
+(define_predicate "aarch64_reg_or_imm"
+ (and (match_code "reg,subreg,const_int")
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_int_operand"))))
+
+;; True for integer comparisons and for FP comparisons other than LTGT or UNEQ.
+(define_special_predicate "aarch64_comparison_operator"
+ (match_code "eq,ne,le,lt,ge,gt,geu,gtu,leu,ltu,unordered,ordered,unlt,unle,unge,ungt"))
+
+;; True if the operand is memory reference suitable for a load/store exclusive.
+(define_predicate "aarch64_sync_memory_operand"
+ (and (match_operand 0 "memory_operand")
+ (match_code "reg" "0")))
+
+;; Predicates for parallel expanders based on mode.
+(define_special_predicate "vect_par_cnst_hi_half"
+ (match_code "parallel")
+{
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+ int nunits = GET_MODE_NUNITS (mode);
+ int i;
+
+ if (count < 1
+ || count != nunits / 2)
+ return false;
+
+ if (!VECTOR_MODE_P (mode))
+ return false;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+ int val;
+
+ if (GET_CODE (elt) != CONST_INT)
+ return false;
+
+ val = INTVAL (elt);
+ if (val != (nunits / 2) + i)
+ return false;
+ }
+ return true;
+})
+
+(define_special_predicate "vect_par_cnst_lo_half"
+ (match_code "parallel")
+{
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+ int nunits = GET_MODE_NUNITS (mode);
+ int i;
+
+ if (count < 1
+ || count != nunits / 2)
+ return false;
+
+ if (!VECTOR_MODE_P (mode))
+ return false;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+ int val;
+
+ if (GET_CODE (elt) != CONST_INT)
+ return false;
+
+ val = INTVAL (elt);
+ if (val != i)
+ return false;
+ }
+ return true;
+})
+
+
+(define_special_predicate "aarch64_simd_lshift_imm"
+ (match_code "const_vector")
+{
+ return aarch64_simd_shift_imm_p (op, mode, true);
+})
+
+(define_special_predicate "aarch64_simd_rshift_imm"
+ (match_code "const_vector")
+{
+ return aarch64_simd_shift_imm_p (op, mode, false);
+})
+
+(define_predicate "aarch64_simd_reg_or_zero"
+ (and (match_code "reg,subreg,const_int,const_vector")
+ (ior (match_operand 0 "register_operand")
+ (ior (match_test "op == const0_rtx")
+ (match_test "aarch64_simd_imm_zero_p (op, mode)")))))
+
+(define_predicate "aarch64_simd_struct_operand"
+ (and (match_code "mem")
+ (match_test "TARGET_SIMD && aarch64_simd_mem_operand_p (op)")))
+
+;; Like general_operand but allow only valid SIMD addressing modes.
+(define_predicate "aarch64_simd_general_operand"
+ (and (match_operand 0 "general_operand")
+ (match_test "!MEM_P (op)
+ || GET_CODE (XEXP (op, 0)) == POST_INC
+ || GET_CODE (XEXP (op, 0)) == REG")))
+
+;; Like nonimmediate_operand but allow only valid SIMD addressing modes.
+(define_predicate "aarch64_simd_nonimmediate_operand"
+ (and (match_operand 0 "nonimmediate_operand")
+ (match_test "!MEM_P (op)
+ || GET_CODE (XEXP (op, 0)) == POST_INC
+ || GET_CODE (XEXP (op, 0)) == REG")))
+
+(define_special_predicate "aarch64_simd_imm_zero"
+ (match_code "const_vector")
+{
+ return aarch64_simd_imm_zero_p (op, mode);
+})
diff --git a/gcc-4.9/gcc/config/aarch64/t-aarch64 b/gcc-4.9/gcc/config/aarch64/t-aarch64
new file mode 100644
index 000000000..158fbb578
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/t-aarch64
@@ -0,0 +1,45 @@
+# Machine description for AArch64 architecture.
+# Copyright (C) 2009-2014 Free Software Foundation, Inc.
+# Contributed by ARM Ltd.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+TM_H += $(srcdir)/config/aarch64/aarch64-cores.def
+OPTIONS_H_EXTRA += $(srcdir)/config/aarch64/aarch64-cores.def
+
+$(srcdir)/config/aarch64/aarch64-tune.md: $(srcdir)/config/aarch64/gentune.sh \
+ $(srcdir)/config/aarch64/aarch64-cores.def
+ $(SHELL) $(srcdir)/config/aarch64/gentune.sh \
+ $(srcdir)/config/aarch64/aarch64-cores.def > \
+ $(srcdir)/config/aarch64/aarch64-tune.md
+
+aarch64-builtins.o: $(srcdir)/config/aarch64/aarch64-builtins.c $(CONFIG_H) \
+ $(SYSTEM_H) coretypes.h $(TM_H) \
+ $(RTL_H) $(TREE_H) expr.h $(TM_P_H) $(RECOG_H) langhooks.h \
+ $(DIAGNOSTIC_CORE_H) $(OPTABS_H) \
+ $(srcdir)/config/aarch64/aarch64-simd-builtins.def
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ $(srcdir)/config/aarch64/aarch64-builtins.c
+
+aarch-common.o: $(srcdir)/config/arm/aarch-common.c $(CONFIG_H) $(SYSTEM_H) \
+ coretypes.h $(TM_H) $(TM_P_H) $(RTL_H) $(TREE_H) output.h $(C_COMMON_H)
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
+ $(srcdir)/config/arm/aarch-common.c
+
+comma=,
+MULTILIB_OPTIONS = $(subst $(comma),/, $(patsubst %, mabi=%, $(subst $(comma),$(comma)mabi=,$(TM_MULTILIB_CONFIG))))
+MULTILIB_DIRNAMES = $(subst $(comma), ,$(TM_MULTILIB_CONFIG))
diff --git a/gcc-4.9/gcc/config/aarch64/t-aarch64-linux b/gcc-4.9/gcc/config/aarch64/t-aarch64-linux
new file mode 100644
index 000000000..147452b04
--- /dev/null
+++ b/gcc-4.9/gcc/config/aarch64/t-aarch64-linux
@@ -0,0 +1,31 @@
+# Machine description for AArch64 architecture.
+# Copyright (C) 2009-2014 Free Software Foundation, Inc.
+# Contributed by ARM Ltd.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+LIB1ASMSRC = aarch64/lib1funcs.asm
+LIB1ASMFUNCS = _aarch64_sync_cache_range
+
+AARCH_BE = $(if $(findstring TARGET_BIG_ENDIAN_DEFAULT=1, $(tm_defines)),_be)
+MULTILIB_OSDIRNAMES = .=../lib64$(call if_multiarch,:aarch64$(AARCH_BE)-linux-gnu)
+MULTIARCH_DIRNAME = $(call if_multiarch,aarch64$(AARCH_BE)-linux-gnu)
+
+# Disable the multilib for linux-gnu targets for the time being; focus
+# on the baremetal targets.
+MULTILIB_OPTIONS =
+MULTILIB_DIRNAMES =