aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.8.1/gcc/config/aarch64
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.8.1/gcc/config/aarch64')
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64-arches.def29
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64-builtins.c1293
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64-cores.def40
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64-elf-raw.h32
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64-elf.h132
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64-generic.md38
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64-linux.h44
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64-modes.def54
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64-option-extensions.def37
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64-opts.h64
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64-protos.h254
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64-simd-builtins.def258
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64-simd.md3716
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64-tune.md5
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64.c8007
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64.h818
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64.md3325
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/aarch64.opt100
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/arm_neon.h25535
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/atomics.md382
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/constraints.md178
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/gentune.sh32
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/iterators.md802
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/large.md312
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/predicates.md298
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/small.md287
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/t-aarch6436
-rw-r--r--gcc-4.8.1/gcc/config/aarch64/t-aarch64-linux25
28 files changed, 0 insertions, 46133 deletions
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64-arches.def b/gcc-4.8.1/gcc/config/aarch64/aarch64-arches.def
deleted file mode 100644
index b66e33ec9..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64-arches.def
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Copyright (C) 2011-2013 Free Software Foundation, Inc.
- Contributed by ARM Ltd.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3, or (at your
- option) any later version.
-
- GCC is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>. */
-
-/* Before using #include to read this file, define a macro:
-
- AARCH64_ARCH(NAME, CORE, ARCH, FLAGS)
-
- The NAME is the name of the architecture, represented as a string
- constant. The CORE is the identifier for a core representative of
- this architecture. ARCH is the architecture revision. FLAGS are
- the flags implied by the architecture. */
-
-AARCH64_ARCH("armv8-a", generic, 8, AARCH64_FL_FOR_ARCH8)
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64-builtins.c b/gcc-4.8.1/gcc/config/aarch64/aarch64-builtins.c
deleted file mode 100644
index 1ea55a83e..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64-builtins.c
+++ /dev/null
@@ -1,1293 +0,0 @@
-/* Builtins' description for AArch64 SIMD architecture.
- Copyright (C) 2011-2013 Free Software Foundation, Inc.
- Contributed by ARM Ltd.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3, or (at your option)
- any later version.
-
- GCC is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>. */
-
-#include "config.h"
-#include "system.h"
-#include "coretypes.h"
-#include "tm.h"
-#include "rtl.h"
-#include "tree.h"
-#include "expr.h"
-#include "tm_p.h"
-#include "recog.h"
-#include "langhooks.h"
-#include "diagnostic-core.h"
-#include "optabs.h"
-
-enum aarch64_simd_builtin_type_mode
-{
- T_V8QI,
- T_V4HI,
- T_V2SI,
- T_V2SF,
- T_DI,
- T_DF,
- T_V16QI,
- T_V8HI,
- T_V4SI,
- T_V4SF,
- T_V2DI,
- T_V2DF,
- T_TI,
- T_EI,
- T_OI,
- T_XI,
- T_SI,
- T_HI,
- T_QI,
- T_MAX
-};
-
-#define v8qi_UP T_V8QI
-#define v4hi_UP T_V4HI
-#define v2si_UP T_V2SI
-#define v2sf_UP T_V2SF
-#define di_UP T_DI
-#define df_UP T_DF
-#define v16qi_UP T_V16QI
-#define v8hi_UP T_V8HI
-#define v4si_UP T_V4SI
-#define v4sf_UP T_V4SF
-#define v2di_UP T_V2DI
-#define v2df_UP T_V2DF
-#define ti_UP T_TI
-#define ei_UP T_EI
-#define oi_UP T_OI
-#define xi_UP T_XI
-#define si_UP T_SI
-#define hi_UP T_HI
-#define qi_UP T_QI
-
-#define UP(X) X##_UP
-
-typedef enum
-{
- AARCH64_SIMD_BINOP,
- AARCH64_SIMD_TERNOP,
- AARCH64_SIMD_QUADOP,
- AARCH64_SIMD_UNOP,
- AARCH64_SIMD_GETLANE,
- AARCH64_SIMD_SETLANE,
- AARCH64_SIMD_CREATE,
- AARCH64_SIMD_DUP,
- AARCH64_SIMD_DUPLANE,
- AARCH64_SIMD_COMBINE,
- AARCH64_SIMD_SPLIT,
- AARCH64_SIMD_LANEMUL,
- AARCH64_SIMD_LANEMULL,
- AARCH64_SIMD_LANEMULH,
- AARCH64_SIMD_LANEMAC,
- AARCH64_SIMD_SCALARMUL,
- AARCH64_SIMD_SCALARMULL,
- AARCH64_SIMD_SCALARMULH,
- AARCH64_SIMD_SCALARMAC,
- AARCH64_SIMD_CONVERT,
- AARCH64_SIMD_FIXCONV,
- AARCH64_SIMD_SELECT,
- AARCH64_SIMD_RESULTPAIR,
- AARCH64_SIMD_REINTERP,
- AARCH64_SIMD_VTBL,
- AARCH64_SIMD_VTBX,
- AARCH64_SIMD_LOAD1,
- AARCH64_SIMD_LOAD1LANE,
- AARCH64_SIMD_STORE1,
- AARCH64_SIMD_STORE1LANE,
- AARCH64_SIMD_LOADSTRUCT,
- AARCH64_SIMD_LOADSTRUCTLANE,
- AARCH64_SIMD_STORESTRUCT,
- AARCH64_SIMD_STORESTRUCTLANE,
- AARCH64_SIMD_LOGICBINOP,
- AARCH64_SIMD_SHIFTINSERT,
- AARCH64_SIMD_SHIFTIMM,
- AARCH64_SIMD_SHIFTACC
-} aarch64_simd_itype;
-
-typedef struct
-{
- const char *name;
- const aarch64_simd_itype itype;
- enum aarch64_simd_builtin_type_mode mode;
- const enum insn_code code;
- unsigned int fcode;
-} aarch64_simd_builtin_datum;
-
-#define CF(N, X) CODE_FOR_aarch64_##N##X
-
-#define VAR1(T, N, A) \
- {#N, AARCH64_SIMD_##T, UP (A), CF (N, A), 0},
-#define VAR2(T, N, A, B) \
- VAR1 (T, N, A) \
- VAR1 (T, N, B)
-#define VAR3(T, N, A, B, C) \
- VAR2 (T, N, A, B) \
- VAR1 (T, N, C)
-#define VAR4(T, N, A, B, C, D) \
- VAR3 (T, N, A, B, C) \
- VAR1 (T, N, D)
-#define VAR5(T, N, A, B, C, D, E) \
- VAR4 (T, N, A, B, C, D) \
- VAR1 (T, N, E)
-#define VAR6(T, N, A, B, C, D, E, F) \
- VAR5 (T, N, A, B, C, D, E) \
- VAR1 (T, N, F)
-#define VAR7(T, N, A, B, C, D, E, F, G) \
- VAR6 (T, N, A, B, C, D, E, F) \
- VAR1 (T, N, G)
-#define VAR8(T, N, A, B, C, D, E, F, G, H) \
- VAR7 (T, N, A, B, C, D, E, F, G) \
- VAR1 (T, N, H)
-#define VAR9(T, N, A, B, C, D, E, F, G, H, I) \
- VAR8 (T, N, A, B, C, D, E, F, G, H) \
- VAR1 (T, N, I)
-#define VAR10(T, N, A, B, C, D, E, F, G, H, I, J) \
- VAR9 (T, N, A, B, C, D, E, F, G, H, I) \
- VAR1 (T, N, J)
-#define VAR11(T, N, A, B, C, D, E, F, G, H, I, J, K) \
- VAR10 (T, N, A, B, C, D, E, F, G, H, I, J) \
- VAR1 (T, N, K)
-#define VAR12(T, N, A, B, C, D, E, F, G, H, I, J, K, L) \
- VAR11 (T, N, A, B, C, D, E, F, G, H, I, J, K) \
- VAR1 (T, N, L)
-
-/* BUILTIN_<ITERATOR> macros should expand to cover the same range of
- modes as is given for each define_mode_iterator in
- config/aarch64/iterators.md. */
-
-#define BUILTIN_DX(T, N) \
- VAR2 (T, N, di, df)
-#define BUILTIN_SDQ_I(T, N) \
- VAR4 (T, N, qi, hi, si, di)
-#define BUILTIN_SD_HSI(T, N) \
- VAR2 (T, N, hi, si)
-#define BUILTIN_V2F(T, N) \
- VAR2 (T, N, v2sf, v2df)
-#define BUILTIN_VALL(T, N) \
- VAR10 (T, N, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di, v2sf, v4sf, v2df)
-#define BUILTIN_VB(T, N) \
- VAR2 (T, N, v8qi, v16qi)
-#define BUILTIN_VD(T, N) \
- VAR4 (T, N, v8qi, v4hi, v2si, v2sf)
-#define BUILTIN_VDC(T, N) \
- VAR6 (T, N, v8qi, v4hi, v2si, v2sf, di, df)
-#define BUILTIN_VDIC(T, N) \
- VAR3 (T, N, v8qi, v4hi, v2si)
-#define BUILTIN_VDN(T, N) \
- VAR3 (T, N, v4hi, v2si, di)
-#define BUILTIN_VDQ(T, N) \
- VAR7 (T, N, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di)
-#define BUILTIN_VDQF(T, N) \
- VAR3 (T, N, v2sf, v4sf, v2df)
-#define BUILTIN_VDQHS(T, N) \
- VAR4 (T, N, v4hi, v8hi, v2si, v4si)
-#define BUILTIN_VDQIF(T, N) \
- VAR9 (T, N, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2sf, v4sf, v2df)
-#define BUILTIN_VDQM(T, N) \
- VAR6 (T, N, v8qi, v16qi, v4hi, v8hi, v2si, v4si)
-#define BUILTIN_VDQV(T, N) \
- VAR5 (T, N, v8qi, v16qi, v4hi, v8hi, v4si)
-#define BUILTIN_VDQ_BHSI(T, N) \
- VAR6 (T, N, v8qi, v16qi, v4hi, v8hi, v2si, v4si)
-#define BUILTIN_VDQ_I(T, N) \
- VAR7 (T, N, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di)
-#define BUILTIN_VDW(T, N) \
- VAR3 (T, N, v8qi, v4hi, v2si)
-#define BUILTIN_VD_BHSI(T, N) \
- VAR3 (T, N, v8qi, v4hi, v2si)
-#define BUILTIN_VD_HSI(T, N) \
- VAR2 (T, N, v4hi, v2si)
-#define BUILTIN_VD_RE(T, N) \
- VAR6 (T, N, v8qi, v4hi, v2si, v2sf, di, df)
-#define BUILTIN_VQ(T, N) \
- VAR6 (T, N, v16qi, v8hi, v4si, v2di, v4sf, v2df)
-#define BUILTIN_VQN(T, N) \
- VAR3 (T, N, v8hi, v4si, v2di)
-#define BUILTIN_VQW(T, N) \
- VAR3 (T, N, v16qi, v8hi, v4si)
-#define BUILTIN_VQ_HSI(T, N) \
- VAR2 (T, N, v8hi, v4si)
-#define BUILTIN_VQ_S(T, N) \
- VAR6 (T, N, v8qi, v16qi, v4hi, v8hi, v2si, v4si)
-#define BUILTIN_VSDQ_HSI(T, N) \
- VAR6 (T, N, v4hi, v8hi, v2si, v4si, hi, si)
-#define BUILTIN_VSDQ_I(T, N) \
- VAR11 (T, N, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di, qi, hi, si, di)
-#define BUILTIN_VSDQ_I_BHSI(T, N) \
- VAR10 (T, N, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di, qi, hi, si)
-#define BUILTIN_VSDQ_I_DI(T, N) \
- VAR8 (T, N, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di, di)
-#define BUILTIN_VSD_HSI(T, N) \
- VAR4 (T, N, v4hi, v2si, hi, si)
-#define BUILTIN_VSQN_HSDI(T, N) \
- VAR6 (T, N, v8hi, v4si, v2di, hi, si, di)
-#define BUILTIN_VSTRUCT(T, N) \
- VAR3 (T, N, oi, ci, xi)
-
-static aarch64_simd_builtin_datum aarch64_simd_builtin_data[] = {
-#include "aarch64-simd-builtins.def"
-};
-
-#undef VAR1
-#define VAR1(T, N, A) \
- AARCH64_SIMD_BUILTIN_##N##A,
-
-enum aarch64_builtins
-{
- AARCH64_BUILTIN_MIN,
- AARCH64_SIMD_BUILTIN_BASE,
-#include "aarch64-simd-builtins.def"
- AARCH64_SIMD_BUILTIN_MAX = AARCH64_SIMD_BUILTIN_BASE
- + ARRAY_SIZE (aarch64_simd_builtin_data),
- AARCH64_BUILTIN_MAX
-};
-
-#undef BUILTIN_DX
-#undef BUILTIN_SDQ_I
-#undef BUILTIN_SD_HSI
-#undef BUILTIN_V2F
-#undef BUILTIN_VALL
-#undef BUILTIN_VB
-#undef BUILTIN_VD
-#undef BUILTIN_VDC
-#undef BUILTIN_VDIC
-#undef BUILTIN_VDN
-#undef BUILTIN_VDQ
-#undef BUILTIN_VDQF
-#undef BUILTIN_VDQHS
-#undef BUILTIN_VDQIF
-#undef BUILTIN_VDQM
-#undef BUILTIN_VDQV
-#undef BUILTIN_VDQ_BHSI
-#undef BUILTIN_VDQ_I
-#undef BUILTIN_VDW
-#undef BUILTIN_VD_BHSI
-#undef BUILTIN_VD_HSI
-#undef BUILTIN_VD_RE
-#undef BUILTIN_VQ
-#undef BUILTIN_VQN
-#undef BUILTIN_VQW
-#undef BUILTIN_VQ_HSI
-#undef BUILTIN_VQ_S
-#undef BUILTIN_VSDQ_HSI
-#undef BUILTIN_VSDQ_I
-#undef BUILTIN_VSDQ_I_BHSI
-#undef BUILTIN_VSDQ_I_DI
-#undef BUILTIN_VSD_HSI
-#undef BUILTIN_VSQN_HSDI
-#undef BUILTIN_VSTRUCT
-#undef CF
-#undef VAR1
-#undef VAR2
-#undef VAR3
-#undef VAR4
-#undef VAR5
-#undef VAR6
-#undef VAR7
-#undef VAR8
-#undef VAR9
-#undef VAR10
-#undef VAR11
-
-static GTY(()) tree aarch64_builtin_decls[AARCH64_BUILTIN_MAX];
-
-#define NUM_DREG_TYPES 6
-#define NUM_QREG_TYPES 6
-
-static void
-aarch64_init_simd_builtins (void)
-{
- unsigned int i, fcode = AARCH64_SIMD_BUILTIN_BASE + 1;
-
- /* Scalar type nodes. */
- tree aarch64_simd_intQI_type_node;
- tree aarch64_simd_intHI_type_node;
- tree aarch64_simd_polyQI_type_node;
- tree aarch64_simd_polyHI_type_node;
- tree aarch64_simd_intSI_type_node;
- tree aarch64_simd_intDI_type_node;
- tree aarch64_simd_float_type_node;
- tree aarch64_simd_double_type_node;
-
- /* Pointer to scalar type nodes. */
- tree intQI_pointer_node;
- tree intHI_pointer_node;
- tree intSI_pointer_node;
- tree intDI_pointer_node;
- tree float_pointer_node;
- tree double_pointer_node;
-
- /* Const scalar type nodes. */
- tree const_intQI_node;
- tree const_intHI_node;
- tree const_intSI_node;
- tree const_intDI_node;
- tree const_float_node;
- tree const_double_node;
-
- /* Pointer to const scalar type nodes. */
- tree const_intQI_pointer_node;
- tree const_intHI_pointer_node;
- tree const_intSI_pointer_node;
- tree const_intDI_pointer_node;
- tree const_float_pointer_node;
- tree const_double_pointer_node;
-
- /* Vector type nodes. */
- tree V8QI_type_node;
- tree V4HI_type_node;
- tree V2SI_type_node;
- tree V2SF_type_node;
- tree V16QI_type_node;
- tree V8HI_type_node;
- tree V4SI_type_node;
- tree V4SF_type_node;
- tree V2DI_type_node;
- tree V2DF_type_node;
-
- /* Scalar unsigned type nodes. */
- tree intUQI_type_node;
- tree intUHI_type_node;
- tree intUSI_type_node;
- tree intUDI_type_node;
-
- /* Opaque integer types for structures of vectors. */
- tree intEI_type_node;
- tree intOI_type_node;
- tree intCI_type_node;
- tree intXI_type_node;
-
- /* Pointer to vector type nodes. */
- tree V8QI_pointer_node;
- tree V4HI_pointer_node;
- tree V2SI_pointer_node;
- tree V2SF_pointer_node;
- tree V16QI_pointer_node;
- tree V8HI_pointer_node;
- tree V4SI_pointer_node;
- tree V4SF_pointer_node;
- tree V2DI_pointer_node;
- tree V2DF_pointer_node;
-
- /* Operations which return results as pairs. */
- tree void_ftype_pv8qi_v8qi_v8qi;
- tree void_ftype_pv4hi_v4hi_v4hi;
- tree void_ftype_pv2si_v2si_v2si;
- tree void_ftype_pv2sf_v2sf_v2sf;
- tree void_ftype_pdi_di_di;
- tree void_ftype_pv16qi_v16qi_v16qi;
- tree void_ftype_pv8hi_v8hi_v8hi;
- tree void_ftype_pv4si_v4si_v4si;
- tree void_ftype_pv4sf_v4sf_v4sf;
- tree void_ftype_pv2di_v2di_v2di;
- tree void_ftype_pv2df_v2df_v2df;
-
- tree reinterp_ftype_dreg[NUM_DREG_TYPES][NUM_DREG_TYPES];
- tree reinterp_ftype_qreg[NUM_QREG_TYPES][NUM_QREG_TYPES];
- tree dreg_types[NUM_DREG_TYPES], qreg_types[NUM_QREG_TYPES];
-
- /* Create distinguished type nodes for AARCH64_SIMD vector element types,
- and pointers to values of such types, so we can detect them later. */
- aarch64_simd_intQI_type_node =
- make_signed_type (GET_MODE_PRECISION (QImode));
- aarch64_simd_intHI_type_node =
- make_signed_type (GET_MODE_PRECISION (HImode));
- aarch64_simd_polyQI_type_node =
- make_signed_type (GET_MODE_PRECISION (QImode));
- aarch64_simd_polyHI_type_node =
- make_signed_type (GET_MODE_PRECISION (HImode));
- aarch64_simd_intSI_type_node =
- make_signed_type (GET_MODE_PRECISION (SImode));
- aarch64_simd_intDI_type_node =
- make_signed_type (GET_MODE_PRECISION (DImode));
- aarch64_simd_float_type_node = make_node (REAL_TYPE);
- aarch64_simd_double_type_node = make_node (REAL_TYPE);
- TYPE_PRECISION (aarch64_simd_float_type_node) = FLOAT_TYPE_SIZE;
- TYPE_PRECISION (aarch64_simd_double_type_node) = DOUBLE_TYPE_SIZE;
- layout_type (aarch64_simd_float_type_node);
- layout_type (aarch64_simd_double_type_node);
-
- /* Define typedefs which exactly correspond to the modes we are basing vector
- types on. If you change these names you'll need to change
- the table used by aarch64_mangle_type too. */
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intQI_type_node,
- "__builtin_aarch64_simd_qi");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intHI_type_node,
- "__builtin_aarch64_simd_hi");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intSI_type_node,
- "__builtin_aarch64_simd_si");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_float_type_node,
- "__builtin_aarch64_simd_sf");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_intDI_type_node,
- "__builtin_aarch64_simd_di");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_double_type_node,
- "__builtin_aarch64_simd_df");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyQI_type_node,
- "__builtin_aarch64_simd_poly8");
- (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyHI_type_node,
- "__builtin_aarch64_simd_poly16");
-
- intQI_pointer_node = build_pointer_type (aarch64_simd_intQI_type_node);
- intHI_pointer_node = build_pointer_type (aarch64_simd_intHI_type_node);
- intSI_pointer_node = build_pointer_type (aarch64_simd_intSI_type_node);
- intDI_pointer_node = build_pointer_type (aarch64_simd_intDI_type_node);
- float_pointer_node = build_pointer_type (aarch64_simd_float_type_node);
- double_pointer_node = build_pointer_type (aarch64_simd_double_type_node);
-
- /* Next create constant-qualified versions of the above types. */
- const_intQI_node = build_qualified_type (aarch64_simd_intQI_type_node,
- TYPE_QUAL_CONST);
- const_intHI_node = build_qualified_type (aarch64_simd_intHI_type_node,
- TYPE_QUAL_CONST);
- const_intSI_node = build_qualified_type (aarch64_simd_intSI_type_node,
- TYPE_QUAL_CONST);
- const_intDI_node = build_qualified_type (aarch64_simd_intDI_type_node,
- TYPE_QUAL_CONST);
- const_float_node = build_qualified_type (aarch64_simd_float_type_node,
- TYPE_QUAL_CONST);
- const_double_node = build_qualified_type (aarch64_simd_double_type_node,
- TYPE_QUAL_CONST);
-
- const_intQI_pointer_node = build_pointer_type (const_intQI_node);
- const_intHI_pointer_node = build_pointer_type (const_intHI_node);
- const_intSI_pointer_node = build_pointer_type (const_intSI_node);
- const_intDI_pointer_node = build_pointer_type (const_intDI_node);
- const_float_pointer_node = build_pointer_type (const_float_node);
- const_double_pointer_node = build_pointer_type (const_double_node);
-
- /* Now create vector types based on our AARCH64 SIMD element types. */
- /* 64-bit vectors. */
- V8QI_type_node =
- build_vector_type_for_mode (aarch64_simd_intQI_type_node, V8QImode);
- V4HI_type_node =
- build_vector_type_for_mode (aarch64_simd_intHI_type_node, V4HImode);
- V2SI_type_node =
- build_vector_type_for_mode (aarch64_simd_intSI_type_node, V2SImode);
- V2SF_type_node =
- build_vector_type_for_mode (aarch64_simd_float_type_node, V2SFmode);
- /* 128-bit vectors. */
- V16QI_type_node =
- build_vector_type_for_mode (aarch64_simd_intQI_type_node, V16QImode);
- V8HI_type_node =
- build_vector_type_for_mode (aarch64_simd_intHI_type_node, V8HImode);
- V4SI_type_node =
- build_vector_type_for_mode (aarch64_simd_intSI_type_node, V4SImode);
- V4SF_type_node =
- build_vector_type_for_mode (aarch64_simd_float_type_node, V4SFmode);
- V2DI_type_node =
- build_vector_type_for_mode (aarch64_simd_intDI_type_node, V2DImode);
- V2DF_type_node =
- build_vector_type_for_mode (aarch64_simd_double_type_node, V2DFmode);
-
- /* Unsigned integer types for various mode sizes. */
- intUQI_type_node = make_unsigned_type (GET_MODE_PRECISION (QImode));
- intUHI_type_node = make_unsigned_type (GET_MODE_PRECISION (HImode));
- intUSI_type_node = make_unsigned_type (GET_MODE_PRECISION (SImode));
- intUDI_type_node = make_unsigned_type (GET_MODE_PRECISION (DImode));
-
- (*lang_hooks.types.register_builtin_type) (intUQI_type_node,
- "__builtin_aarch64_simd_uqi");
- (*lang_hooks.types.register_builtin_type) (intUHI_type_node,
- "__builtin_aarch64_simd_uhi");
- (*lang_hooks.types.register_builtin_type) (intUSI_type_node,
- "__builtin_aarch64_simd_usi");
- (*lang_hooks.types.register_builtin_type) (intUDI_type_node,
- "__builtin_aarch64_simd_udi");
-
- /* Opaque integer types for structures of vectors. */
- intEI_type_node = make_signed_type (GET_MODE_PRECISION (EImode));
- intOI_type_node = make_signed_type (GET_MODE_PRECISION (OImode));
- intCI_type_node = make_signed_type (GET_MODE_PRECISION (CImode));
- intXI_type_node = make_signed_type (GET_MODE_PRECISION (XImode));
-
- (*lang_hooks.types.register_builtin_type) (intTI_type_node,
- "__builtin_aarch64_simd_ti");
- (*lang_hooks.types.register_builtin_type) (intEI_type_node,
- "__builtin_aarch64_simd_ei");
- (*lang_hooks.types.register_builtin_type) (intOI_type_node,
- "__builtin_aarch64_simd_oi");
- (*lang_hooks.types.register_builtin_type) (intCI_type_node,
- "__builtin_aarch64_simd_ci");
- (*lang_hooks.types.register_builtin_type) (intXI_type_node,
- "__builtin_aarch64_simd_xi");
-
- /* Pointers to vector types. */
- V8QI_pointer_node = build_pointer_type (V8QI_type_node);
- V4HI_pointer_node = build_pointer_type (V4HI_type_node);
- V2SI_pointer_node = build_pointer_type (V2SI_type_node);
- V2SF_pointer_node = build_pointer_type (V2SF_type_node);
- V16QI_pointer_node = build_pointer_type (V16QI_type_node);
- V8HI_pointer_node = build_pointer_type (V8HI_type_node);
- V4SI_pointer_node = build_pointer_type (V4SI_type_node);
- V4SF_pointer_node = build_pointer_type (V4SF_type_node);
- V2DI_pointer_node = build_pointer_type (V2DI_type_node);
- V2DF_pointer_node = build_pointer_type (V2DF_type_node);
-
- /* Operations which return results as pairs. */
- void_ftype_pv8qi_v8qi_v8qi =
- build_function_type_list (void_type_node, V8QI_pointer_node,
- V8QI_type_node, V8QI_type_node, NULL);
- void_ftype_pv4hi_v4hi_v4hi =
- build_function_type_list (void_type_node, V4HI_pointer_node,
- V4HI_type_node, V4HI_type_node, NULL);
- void_ftype_pv2si_v2si_v2si =
- build_function_type_list (void_type_node, V2SI_pointer_node,
- V2SI_type_node, V2SI_type_node, NULL);
- void_ftype_pv2sf_v2sf_v2sf =
- build_function_type_list (void_type_node, V2SF_pointer_node,
- V2SF_type_node, V2SF_type_node, NULL);
- void_ftype_pdi_di_di =
- build_function_type_list (void_type_node, intDI_pointer_node,
- aarch64_simd_intDI_type_node,
- aarch64_simd_intDI_type_node, NULL);
- void_ftype_pv16qi_v16qi_v16qi =
- build_function_type_list (void_type_node, V16QI_pointer_node,
- V16QI_type_node, V16QI_type_node, NULL);
- void_ftype_pv8hi_v8hi_v8hi =
- build_function_type_list (void_type_node, V8HI_pointer_node,
- V8HI_type_node, V8HI_type_node, NULL);
- void_ftype_pv4si_v4si_v4si =
- build_function_type_list (void_type_node, V4SI_pointer_node,
- V4SI_type_node, V4SI_type_node, NULL);
- void_ftype_pv4sf_v4sf_v4sf =
- build_function_type_list (void_type_node, V4SF_pointer_node,
- V4SF_type_node, V4SF_type_node, NULL);
- void_ftype_pv2di_v2di_v2di =
- build_function_type_list (void_type_node, V2DI_pointer_node,
- V2DI_type_node, V2DI_type_node, NULL);
- void_ftype_pv2df_v2df_v2df =
- build_function_type_list (void_type_node, V2DF_pointer_node,
- V2DF_type_node, V2DF_type_node, NULL);
-
- dreg_types[0] = V8QI_type_node;
- dreg_types[1] = V4HI_type_node;
- dreg_types[2] = V2SI_type_node;
- dreg_types[3] = V2SF_type_node;
- dreg_types[4] = aarch64_simd_intDI_type_node;
- dreg_types[5] = aarch64_simd_double_type_node;
-
- qreg_types[0] = V16QI_type_node;
- qreg_types[1] = V8HI_type_node;
- qreg_types[2] = V4SI_type_node;
- qreg_types[3] = V4SF_type_node;
- qreg_types[4] = V2DI_type_node;
- qreg_types[5] = V2DF_type_node;
-
- /* If NUM_DREG_TYPES != NUM_QREG_TYPES, we will need separate nested loops
- for qreg and dreg reinterp inits. */
- for (i = 0; i < NUM_DREG_TYPES; i++)
- {
- int j;
- for (j = 0; j < NUM_DREG_TYPES; j++)
- {
- reinterp_ftype_dreg[i][j]
- = build_function_type_list (dreg_types[i], dreg_types[j], NULL);
- reinterp_ftype_qreg[i][j]
- = build_function_type_list (qreg_types[i], qreg_types[j], NULL);
- }
- }
-
- for (i = 0; i < ARRAY_SIZE (aarch64_simd_builtin_data); i++, fcode++)
- {
- aarch64_simd_builtin_datum *d = &aarch64_simd_builtin_data[i];
- const char *const modenames[] =
- {
- "v8qi", "v4hi", "v2si", "v2sf", "di", "df",
- "v16qi", "v8hi", "v4si", "v4sf", "v2di", "v2df",
- "ti", "ei", "oi", "xi", "si", "hi", "qi"
- };
- char namebuf[60];
- tree ftype = NULL;
- tree fndecl = NULL;
- int is_load = 0;
- int is_store = 0;
-
- gcc_assert (ARRAY_SIZE (modenames) == T_MAX);
-
- d->fcode = fcode;
-
- switch (d->itype)
- {
- case AARCH64_SIMD_LOAD1:
- case AARCH64_SIMD_LOAD1LANE:
- case AARCH64_SIMD_LOADSTRUCT:
- case AARCH64_SIMD_LOADSTRUCTLANE:
- is_load = 1;
- /* Fall through. */
- case AARCH64_SIMD_STORE1:
- case AARCH64_SIMD_STORE1LANE:
- case AARCH64_SIMD_STORESTRUCT:
- case AARCH64_SIMD_STORESTRUCTLANE:
- if (!is_load)
- is_store = 1;
- /* Fall through. */
- case AARCH64_SIMD_UNOP:
- case AARCH64_SIMD_BINOP:
- case AARCH64_SIMD_TERNOP:
- case AARCH64_SIMD_QUADOP:
- case AARCH64_SIMD_COMBINE:
- case AARCH64_SIMD_CONVERT:
- case AARCH64_SIMD_CREATE:
- case AARCH64_SIMD_DUP:
- case AARCH64_SIMD_DUPLANE:
- case AARCH64_SIMD_FIXCONV:
- case AARCH64_SIMD_GETLANE:
- case AARCH64_SIMD_LANEMAC:
- case AARCH64_SIMD_LANEMUL:
- case AARCH64_SIMD_LANEMULH:
- case AARCH64_SIMD_LANEMULL:
- case AARCH64_SIMD_LOGICBINOP:
- case AARCH64_SIMD_SCALARMAC:
- case AARCH64_SIMD_SCALARMUL:
- case AARCH64_SIMD_SCALARMULH:
- case AARCH64_SIMD_SCALARMULL:
- case AARCH64_SIMD_SELECT:
- case AARCH64_SIMD_SETLANE:
- case AARCH64_SIMD_SHIFTACC:
- case AARCH64_SIMD_SHIFTIMM:
- case AARCH64_SIMD_SHIFTINSERT:
- case AARCH64_SIMD_SPLIT:
- case AARCH64_SIMD_VTBL:
- case AARCH64_SIMD_VTBX:
- {
- int k;
- tree return_type = void_type_node, args = void_list_node;
- tree eltype;
- /* Build a function type directly from the insn_data for this
- builtin. The build_function_type () function takes care of
- removing duplicates for us. */
-
- for (k = insn_data[d->code].n_operands -1; k >= 0; k--)
- {
- /* Skip an internal operand for vget_{low, high}. */
- if (k == 2 && d->itype == AARCH64_SIMD_SPLIT)
- continue;
-
- if (is_load && k == 1)
- {
- /* AdvSIMD load patterns always have the memory operand
- (a DImode pointer) in the operand 1 position. We
- want a const pointer to the element type in that
- position. */
- gcc_assert (insn_data[d->code].operand[k].mode == DImode);
-
- switch (d->mode)
- {
- case T_V8QI:
- case T_V16QI:
- eltype = const_intQI_pointer_node;
- break;
-
- case T_V4HI:
- case T_V8HI:
- eltype = const_intHI_pointer_node;
- break;
-
- case T_V2SI:
- case T_V4SI:
- eltype = const_intSI_pointer_node;
- break;
-
- case T_V2SF:
- case T_V4SF:
- eltype = const_float_pointer_node;
- break;
-
- case T_DI:
- case T_V2DI:
- eltype = const_intDI_pointer_node;
- break;
-
- case T_DF:
- case T_V2DF:
- eltype = const_double_pointer_node;
- break;
-
- default:
- gcc_unreachable ();
- }
- }
- else if (is_store && k == 0)
- {
- /* Similarly, AdvSIMD store patterns use operand 0 as
- the memory location to store to (a DImode pointer).
- Use a pointer to the element type of the store in
- that position. */
- gcc_assert (insn_data[d->code].operand[k].mode == DImode);
-
- switch (d->mode)
- {
- case T_V8QI:
- case T_V16QI:
- eltype = intQI_pointer_node;
- break;
-
- case T_V4HI:
- case T_V8HI:
- eltype = intHI_pointer_node;
- break;
-
- case T_V2SI:
- case T_V4SI:
- eltype = intSI_pointer_node;
- break;
-
- case T_V2SF:
- case T_V4SF:
- eltype = float_pointer_node;
- break;
-
- case T_DI:
- case T_V2DI:
- eltype = intDI_pointer_node;
- break;
-
- case T_DF:
- case T_V2DF:
- eltype = double_pointer_node;
- break;
-
- default:
- gcc_unreachable ();
- }
- }
- else
- {
- switch (insn_data[d->code].operand[k].mode)
- {
- case VOIDmode:
- eltype = void_type_node;
- break;
- /* Scalars. */
- case QImode:
- eltype = aarch64_simd_intQI_type_node;
- break;
- case HImode:
- eltype = aarch64_simd_intHI_type_node;
- break;
- case SImode:
- eltype = aarch64_simd_intSI_type_node;
- break;
- case SFmode:
- eltype = aarch64_simd_float_type_node;
- break;
- case DFmode:
- eltype = aarch64_simd_double_type_node;
- break;
- case DImode:
- eltype = aarch64_simd_intDI_type_node;
- break;
- case TImode:
- eltype = intTI_type_node;
- break;
- case EImode:
- eltype = intEI_type_node;
- break;
- case OImode:
- eltype = intOI_type_node;
- break;
- case CImode:
- eltype = intCI_type_node;
- break;
- case XImode:
- eltype = intXI_type_node;
- break;
- /* 64-bit vectors. */
- case V8QImode:
- eltype = V8QI_type_node;
- break;
- case V4HImode:
- eltype = V4HI_type_node;
- break;
- case V2SImode:
- eltype = V2SI_type_node;
- break;
- case V2SFmode:
- eltype = V2SF_type_node;
- break;
- /* 128-bit vectors. */
- case V16QImode:
- eltype = V16QI_type_node;
- break;
- case V8HImode:
- eltype = V8HI_type_node;
- break;
- case V4SImode:
- eltype = V4SI_type_node;
- break;
- case V4SFmode:
- eltype = V4SF_type_node;
- break;
- case V2DImode:
- eltype = V2DI_type_node;
- break;
- case V2DFmode:
- eltype = V2DF_type_node;
- break;
- default:
- gcc_unreachable ();
- }
- }
-
- if (k == 0 && !is_store)
- return_type = eltype;
- else
- args = tree_cons (NULL_TREE, eltype, args);
- }
- ftype = build_function_type (return_type, args);
- }
- break;
-
- case AARCH64_SIMD_RESULTPAIR:
- {
- switch (insn_data[d->code].operand[1].mode)
- {
- case V8QImode:
- ftype = void_ftype_pv8qi_v8qi_v8qi;
- break;
- case V4HImode:
- ftype = void_ftype_pv4hi_v4hi_v4hi;
- break;
- case V2SImode:
- ftype = void_ftype_pv2si_v2si_v2si;
- break;
- case V2SFmode:
- ftype = void_ftype_pv2sf_v2sf_v2sf;
- break;
- case DImode:
- ftype = void_ftype_pdi_di_di;
- break;
- case V16QImode:
- ftype = void_ftype_pv16qi_v16qi_v16qi;
- break;
- case V8HImode:
- ftype = void_ftype_pv8hi_v8hi_v8hi;
- break;
- case V4SImode:
- ftype = void_ftype_pv4si_v4si_v4si;
- break;
- case V4SFmode:
- ftype = void_ftype_pv4sf_v4sf_v4sf;
- break;
- case V2DImode:
- ftype = void_ftype_pv2di_v2di_v2di;
- break;
- case V2DFmode:
- ftype = void_ftype_pv2df_v2df_v2df;
- break;
- default:
- gcc_unreachable ();
- }
- }
- break;
-
- case AARCH64_SIMD_REINTERP:
- {
- /* We iterate over 6 doubleword types, then 6 quadword
- types. */
- int rhs_d = d->mode % NUM_DREG_TYPES;
- int rhs_q = (d->mode - NUM_DREG_TYPES) % NUM_QREG_TYPES;
- switch (insn_data[d->code].operand[0].mode)
- {
- case V8QImode:
- ftype = reinterp_ftype_dreg[0][rhs_d];
- break;
- case V4HImode:
- ftype = reinterp_ftype_dreg[1][rhs_d];
- break;
- case V2SImode:
- ftype = reinterp_ftype_dreg[2][rhs_d];
- break;
- case V2SFmode:
- ftype = reinterp_ftype_dreg[3][rhs_d];
- break;
- case DImode:
- ftype = reinterp_ftype_dreg[4][rhs_d];
- break;
- case DFmode:
- ftype = reinterp_ftype_dreg[5][rhs_d];
- break;
- case V16QImode:
- ftype = reinterp_ftype_qreg[0][rhs_q];
- break;
- case V8HImode:
- ftype = reinterp_ftype_qreg[1][rhs_q];
- break;
- case V4SImode:
- ftype = reinterp_ftype_qreg[2][rhs_q];
- break;
- case V4SFmode:
- ftype = reinterp_ftype_qreg[3][rhs_q];
- break;
- case V2DImode:
- ftype = reinterp_ftype_qreg[4][rhs_q];
- break;
- case V2DFmode:
- ftype = reinterp_ftype_qreg[5][rhs_q];
- break;
- default:
- gcc_unreachable ();
- }
- }
- break;
-
- default:
- gcc_unreachable ();
- }
- gcc_assert (ftype != NULL);
-
- snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s%s",
- d->name, modenames[d->mode]);
-
- fndecl = add_builtin_function (namebuf, ftype, fcode, BUILT_IN_MD,
- NULL, NULL_TREE);
- aarch64_builtin_decls[fcode] = fndecl;
- }
-}
-
-void
-aarch64_init_builtins (void)
-{
- if (TARGET_SIMD)
- aarch64_init_simd_builtins ();
-}
-
-tree
-aarch64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
-{
- if (code >= AARCH64_BUILTIN_MAX)
- return error_mark_node;
-
- return aarch64_builtin_decls[code];
-}
-
-typedef enum
-{
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_CONSTANT,
- SIMD_ARG_STOP
-} builtin_simd_arg;
-
-#define SIMD_MAX_BUILTIN_ARGS 5
-
-static rtx
-aarch64_simd_expand_args (rtx target, int icode, int have_retval,
- tree exp, ...)
-{
- va_list ap;
- rtx pat;
- tree arg[SIMD_MAX_BUILTIN_ARGS];
- rtx op[SIMD_MAX_BUILTIN_ARGS];
- enum machine_mode tmode = insn_data[icode].operand[0].mode;
- enum machine_mode mode[SIMD_MAX_BUILTIN_ARGS];
- int argc = 0;
-
- if (have_retval
- && (!target
- || GET_MODE (target) != tmode
- || !(*insn_data[icode].operand[0].predicate) (target, tmode)))
- target = gen_reg_rtx (tmode);
-
- va_start (ap, exp);
-
- for (;;)
- {
- builtin_simd_arg thisarg = (builtin_simd_arg) va_arg (ap, int);
-
- if (thisarg == SIMD_ARG_STOP)
- break;
- else
- {
- arg[argc] = CALL_EXPR_ARG (exp, argc);
- op[argc] = expand_normal (arg[argc]);
- mode[argc] = insn_data[icode].operand[argc + have_retval].mode;
-
- switch (thisarg)
- {
- case SIMD_ARG_COPY_TO_REG:
- /*gcc_assert (GET_MODE (op[argc]) == mode[argc]); */
- if (!(*insn_data[icode].operand[argc + have_retval].predicate)
- (op[argc], mode[argc]))
- op[argc] = copy_to_mode_reg (mode[argc], op[argc]);
- break;
-
- case SIMD_ARG_CONSTANT:
- if (!(*insn_data[icode].operand[argc + have_retval].predicate)
- (op[argc], mode[argc]))
- error_at (EXPR_LOCATION (exp), "incompatible type for argument %d, "
- "expected %<const int%>", argc + 1);
- break;
-
- case SIMD_ARG_STOP:
- gcc_unreachable ();
- }
-
- argc++;
- }
- }
-
- va_end (ap);
-
- if (have_retval)
- switch (argc)
- {
- case 1:
- pat = GEN_FCN (icode) (target, op[0]);
- break;
-
- case 2:
- pat = GEN_FCN (icode) (target, op[0], op[1]);
- break;
-
- case 3:
- pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
- break;
-
- case 4:
- pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
- break;
-
- case 5:
- pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
- break;
-
- default:
- gcc_unreachable ();
- }
- else
- switch (argc)
- {
- case 1:
- pat = GEN_FCN (icode) (op[0]);
- break;
-
- case 2:
- pat = GEN_FCN (icode) (op[0], op[1]);
- break;
-
- case 3:
- pat = GEN_FCN (icode) (op[0], op[1], op[2]);
- break;
-
- case 4:
- pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
- break;
-
- case 5:
- pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
- break;
-
- default:
- gcc_unreachable ();
- }
-
- if (!pat)
- return 0;
-
- emit_insn (pat);
-
- return target;
-}
-
-/* Expand an AArch64 AdvSIMD builtin(intrinsic). */
-rtx
-aarch64_simd_expand_builtin (int fcode, tree exp, rtx target)
-{
- aarch64_simd_builtin_datum *d =
- &aarch64_simd_builtin_data[fcode - (AARCH64_SIMD_BUILTIN_BASE + 1)];
- aarch64_simd_itype itype = d->itype;
- enum insn_code icode = d->code;
-
- switch (itype)
- {
- case AARCH64_SIMD_UNOP:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_STOP);
-
- case AARCH64_SIMD_BINOP:
- {
- rtx arg2 = expand_normal (CALL_EXPR_ARG (exp, 1));
- /* Handle constants only if the predicate allows it. */
- bool op1_const_int_p =
- (CONST_INT_P (arg2)
- && (*insn_data[icode].operand[2].predicate)
- (arg2, insn_data[icode].operand[2].mode));
- return aarch64_simd_expand_args
- (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- op1_const_int_p ? SIMD_ARG_CONSTANT : SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_STOP);
- }
-
- case AARCH64_SIMD_TERNOP:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_STOP);
-
- case AARCH64_SIMD_QUADOP:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_STOP);
- case AARCH64_SIMD_LOAD1:
- case AARCH64_SIMD_LOADSTRUCT:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_STORESTRUCT:
- return aarch64_simd_expand_args (target, icode, 0, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_REINTERP:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_CREATE:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_COMBINE:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG, SIMD_ARG_STOP);
-
- case AARCH64_SIMD_GETLANE:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_CONSTANT,
- SIMD_ARG_STOP);
-
- case AARCH64_SIMD_SETLANE:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_CONSTANT,
- SIMD_ARG_STOP);
-
- case AARCH64_SIMD_SHIFTIMM:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_CONSTANT,
- SIMD_ARG_STOP);
-
- case AARCH64_SIMD_SHIFTACC:
- case AARCH64_SIMD_SHIFTINSERT:
- return aarch64_simd_expand_args (target, icode, 1, exp,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_COPY_TO_REG,
- SIMD_ARG_CONSTANT,
- SIMD_ARG_STOP);
-
- default:
- gcc_unreachable ();
- }
-}
-
-/* Expand an expression EXP that calls a built-in function,
- with result going to TARGET if that's convenient. */
-rtx
-aarch64_expand_builtin (tree exp,
- rtx target,
- rtx subtarget ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED,
- int ignore ATTRIBUTE_UNUSED)
-{
- tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
- int fcode = DECL_FUNCTION_CODE (fndecl);
-
- if (fcode >= AARCH64_SIMD_BUILTIN_BASE)
- return aarch64_simd_expand_builtin (fcode, exp, target);
-
- return NULL_RTX;
-}
-
-tree
-aarch64_builtin_vectorized_function (tree fndecl, tree type_out, tree type_in)
-{
- enum machine_mode in_mode, out_mode;
- int in_n, out_n;
-
- if (TREE_CODE (type_out) != VECTOR_TYPE
- || TREE_CODE (type_in) != VECTOR_TYPE)
- return NULL_TREE;
-
- out_mode = TYPE_MODE (TREE_TYPE (type_out));
- out_n = TYPE_VECTOR_SUBPARTS (type_out);
- in_mode = TYPE_MODE (TREE_TYPE (type_in));
- in_n = TYPE_VECTOR_SUBPARTS (type_in);
-
-#undef AARCH64_CHECK_BUILTIN_MODE
-#define AARCH64_CHECK_BUILTIN_MODE(C, N) 1
-#define AARCH64_FIND_FRINT_VARIANT(N) \
- (AARCH64_CHECK_BUILTIN_MODE (2, D) \
- ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_##N##v2df] \
- : (AARCH64_CHECK_BUILTIN_MODE (4, S) \
- ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_##N##v4sf] \
- : (AARCH64_CHECK_BUILTIN_MODE (2, S) \
- ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_##N##v2sf] \
- : NULL_TREE)))
- if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
- {
- enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
- switch (fn)
- {
-#undef AARCH64_CHECK_BUILTIN_MODE
-#define AARCH64_CHECK_BUILTIN_MODE(C, N) \
- (out_mode == N##Fmode && out_n == C \
- && in_mode == N##Fmode && in_n == C)
- case BUILT_IN_FLOOR:
- case BUILT_IN_FLOORF:
- return AARCH64_FIND_FRINT_VARIANT (frintm);
- case BUILT_IN_CEIL:
- case BUILT_IN_CEILF:
- return AARCH64_FIND_FRINT_VARIANT (frintp);
- case BUILT_IN_TRUNC:
- case BUILT_IN_TRUNCF:
- return AARCH64_FIND_FRINT_VARIANT (frintz);
- case BUILT_IN_ROUND:
- case BUILT_IN_ROUNDF:
- return AARCH64_FIND_FRINT_VARIANT (frinta);
- case BUILT_IN_NEARBYINT:
- case BUILT_IN_NEARBYINTF:
- return AARCH64_FIND_FRINT_VARIANT (frinti);
- case BUILT_IN_SQRT:
- case BUILT_IN_SQRTF:
- return AARCH64_FIND_FRINT_VARIANT (sqrt);
-#undef AARCH64_CHECK_BUILTIN_MODE
-#define AARCH64_CHECK_BUILTIN_MODE(C, N) \
- (out_mode == N##Imode && out_n == C \
- && in_mode == N##Fmode && in_n == C)
- case BUILT_IN_LFLOOR:
- return AARCH64_FIND_FRINT_VARIANT (fcvtms);
- case BUILT_IN_LCEIL:
- return AARCH64_FIND_FRINT_VARIANT (fcvtps);
- default:
- return NULL_TREE;
- }
- }
-
- return NULL_TREE;
-}
-#undef AARCH64_CHECK_BUILTIN_MODE
-#undef AARCH64_FIND_FRINT_VARIANT
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64-cores.def b/gcc-4.8.1/gcc/config/aarch64/aarch64-cores.def
deleted file mode 100644
index c840aa016..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64-cores.def
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Copyright (C) 2011-2013 Free Software Foundation, Inc.
- Contributed by ARM Ltd.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3, or (at your option)
- any later version.
-
- GCC is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>. */
-
-/* This is a list of cores that implement AArch64.
-
- Before using #include to read this file, define a macro:
-
- AARCH64_CORE(CORE_NAME, CORE_IDENT, ARCH, FLAGS, COSTS)
-
- The CORE_NAME is the name of the core, represented as a string constant.
- The CORE_IDENT is the name of the core, represented as an identifier.
- ARCH is the architecture revision implemented by the chip.
- FLAGS are the bitwise-or of the traits that apply to that core.
- This need not include flags implied by the architecture.
- COSTS is the name of the rtx_costs routine to use. */
-
-/* V8 Architecture Processors.
- This list currently contains example CPUs that implement AArch64, and
- therefore serves as a template for adding more CPUs in the future. */
-
-AARCH64_CORE("cortex-a53", cortexa53, 8, AARCH64_FL_FPSIMD, generic)
-AARCH64_CORE("cortex-a57", cortexa57, 8, AARCH64_FL_FPSIMD, generic)
-AARCH64_CORE("example-1", large, 8, AARCH64_FL_FPSIMD, generic)
-AARCH64_CORE("example-2", small, 8, AARCH64_FL_FPSIMD, generic)
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64-elf-raw.h b/gcc-4.8.1/gcc/config/aarch64/aarch64-elf-raw.h
deleted file mode 100644
index 1cd0155fc..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64-elf-raw.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Machine description for AArch64 architecture.
- Copyright (C) 2009-2013 Free Software Foundation, Inc.
- Contributed by ARM Ltd.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3, or (at your option)
- any later version.
-
- GCC is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>. */
-
-/* Support for bare-metal builds. */
-#ifndef GCC_AARCH64_ELF_RAW_H
-#define GCC_AARCH64_ELF_RAW_H
-
-#define STARTFILE_SPEC " crti%O%s crtbegin%O%s crt0%O%s"
-#define ENDFILE_SPEC " crtend%O%s crtn%O%s"
-
-#ifndef LINK_SPEC
-#define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} -X"
-#endif
-
-#endif /* GCC_AARCH64_ELF_RAW_H */
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64-elf.h b/gcc-4.8.1/gcc/config/aarch64/aarch64-elf.h
deleted file mode 100644
index db08031b1..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64-elf.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/* Machine description for AArch64 architecture.
- Copyright (C) 2009-2013 Free Software Foundation, Inc.
- Contributed by ARM Ltd.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3, or (at your option)
- any later version.
-
- GCC is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>. */
-
-#ifndef GCC_AARCH64_ELF_H
-#define GCC_AARCH64_ELF_H
-
-
-#define ASM_OUTPUT_LABELREF(FILE, NAME) \
- aarch64_asm_output_labelref (FILE, NAME)
-
-#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \
- do \
- { \
- assemble_name (FILE, NAME1); \
- fputs (" = ", FILE); \
- assemble_name (FILE, NAME2); \
- fputc ('\n', FILE); \
- } while (0)
-
-#define TEXT_SECTION_ASM_OP "\t.text"
-#define DATA_SECTION_ASM_OP "\t.data"
-#define BSS_SECTION_ASM_OP "\t.bss"
-
-#define CTORS_SECTION_ASM_OP "\t.section\t.init_array,\"aw\",%init_array"
-#define DTORS_SECTION_ASM_OP "\t.section\t.fini_array,\"aw\",%fini_array"
-
-#undef INIT_SECTION_ASM_OP
-#undef FINI_SECTION_ASM_OP
-#define INIT_ARRAY_SECTION_ASM_OP CTORS_SECTION_ASM_OP
-#define FINI_ARRAY_SECTION_ASM_OP DTORS_SECTION_ASM_OP
-
-/* Since we use .init_array/.fini_array we don't need the markers at
- the start and end of the ctors/dtors arrays. */
-#define CTOR_LIST_BEGIN asm (CTORS_SECTION_ASM_OP)
-#define CTOR_LIST_END /* empty */
-#define DTOR_LIST_BEGIN asm (DTORS_SECTION_ASM_OP)
-#define DTOR_LIST_END /* empty */
-
-#undef TARGET_ASM_CONSTRUCTOR
-#define TARGET_ASM_CONSTRUCTOR aarch64_elf_asm_constructor
-
-#undef TARGET_ASM_DESTRUCTOR
-#define TARGET_ASM_DESTRUCTOR aarch64_elf_asm_destructor
-
-#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
-/* Support for -falign-* switches. Use .p2align to ensure that code
- sections are padded with NOP instructions, rather than zeros. */
-#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE, LOG, MAX_SKIP) \
- do \
- { \
- if ((LOG) != 0) \
- { \
- if ((MAX_SKIP) == 0) \
- fprintf ((FILE), "\t.p2align %d\n", (int) (LOG)); \
- else \
- fprintf ((FILE), "\t.p2align %d,,%d\n", \
- (int) (LOG), (int) (MAX_SKIP)); \
- } \
- } while (0)
-
-#endif /* HAVE_GAS_MAX_SKIP_P2ALIGN */
-
-#define JUMP_TABLES_IN_TEXT_SECTION 0
-
-#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
- do { \
- switch (GET_MODE (BODY)) \
- { \
- case QImode: \
- asm_fprintf (STREAM, "\t.byte\t(%LL%d - %LLrtx%d) / 4\n", \
- VALUE, REL); \
- break; \
- case HImode: \
- asm_fprintf (STREAM, "\t.2byte\t(%LL%d - %LLrtx%d) / 4\n", \
- VALUE, REL); \
- break; \
- case SImode: \
- case DImode: /* See comment in aarch64_output_casesi. */ \
- asm_fprintf (STREAM, "\t.word\t(%LL%d - %LLrtx%d) / 4\n", \
- VALUE, REL); \
- break; \
- default: \
- gcc_unreachable (); \
- } \
- } while (0)
-
-#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
- fprintf(STREAM, "\t.align\t%d\n", (int)POWER)
-
-#define ASM_COMMENT_START "//"
-
-#define REGISTER_PREFIX ""
-#define LOCAL_LABEL_PREFIX "."
-#define USER_LABEL_PREFIX ""
-
-#define GLOBAL_ASM_OP "\t.global\t"
-
-#ifndef ASM_SPEC
-#define ASM_SPEC "\
-%{mbig-endian:-EB} \
-%{mlittle-endian:-EL} \
-%{mcpu=*:-mcpu=%*} \
-%{march=*:-march=%*}"
-#endif
-
-#undef TYPE_OPERAND_FMT
-#define TYPE_OPERAND_FMT "%%%s"
-
-#undef TARGET_ASM_NAMED_SECTION
-#define TARGET_ASM_NAMED_SECTION aarch64_elf_asm_named_section
-
-/* Stabs debug not required. */
-#undef DBX_DEBUGGING_INFO
-
-#endif /* GCC_AARCH64_ELF_H */
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64-generic.md b/gcc-4.8.1/gcc/config/aarch64/aarch64-generic.md
deleted file mode 100644
index cbb756003..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64-generic.md
+++ /dev/null
@@ -1,38 +0,0 @@
-;; Machine description for AArch64 architecture.
-;; Copyright (C) 2009-2013 Free Software Foundation, Inc.
-;; Contributed by ARM Ltd.
-;;
-;; This file is part of GCC.
-;;
-;; GCC is free software; you can redistribute it and/or modify it
-;; under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 3, or (at your option)
-;; any later version.
-;;
-;; GCC is distributed in the hope that it will be useful, but
-;; WITHOUT ANY WARRANTY; without even the implied warranty of
-;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-;; General Public License for more details.
-;;
-;; You should have received a copy of the GNU General Public License
-;; along with GCC; see the file COPYING3. If not see
-;; <http://www.gnu.org/licenses/>.
-
-;; Generic scheduler
-
-(define_automaton "aarch64")
-
-(define_cpu_unit "core" "aarch64")
-
-(define_attr "is_load" "yes,no"
- (if_then_else (eq_attr "v8type" "fpsimd_load,fpsimd_load2,load1,load2")
- (const_string "yes")
- (const_string "no")))
-
-(define_insn_reservation "load" 2
- (eq_attr "is_load" "yes")
- "core")
-
-(define_insn_reservation "nonload" 1
- (eq_attr "is_load" "no")
- "core")
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64-linux.h b/gcc-4.8.1/gcc/config/aarch64/aarch64-linux.h
deleted file mode 100644
index e914ed27f..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64-linux.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Machine description for AArch64 architecture.
- Copyright (C) 2009-2013 Free Software Foundation, Inc.
- Contributed by ARM Ltd.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3, or (at your option)
- any later version.
-
- GCC is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>. */
-
-#ifndef GCC_AARCH64_LINUX_H
-#define GCC_AARCH64_LINUX_H
-
-#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-aarch64.so.1"
-
-#define LINUX_TARGET_LINK_SPEC "%{h*} \
- %{static:-Bstatic} \
- %{shared:-shared} \
- %{symbolic:-Bsymbolic} \
- %{rdynamic:-export-dynamic} \
- -dynamic-linker " GNU_USER_DYNAMIC_LINKER " \
- -X \
- %{mbig-endian:-EB} %{mlittle-endian:-EL}"
-
-#define LINK_SPEC LINUX_TARGET_LINK_SPEC
-
-#define TARGET_OS_CPP_BUILTINS() \
- do \
- { \
- GNU_USER_TARGET_OS_CPP_BUILTINS(); \
- } \
- while (0)
-
-#endif /* GCC_AARCH64_LINUX_H */
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64-modes.def b/gcc-4.8.1/gcc/config/aarch64/aarch64-modes.def
deleted file mode 100644
index fc547c890..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64-modes.def
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Machine description for AArch64 architecture.
- Copyright (C) 2009-2013 Free Software Foundation, Inc.
- Contributed by ARM Ltd.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3, or (at your option)
- any later version.
-
- GCC is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>. */
-
-CC_MODE (CCFP);
-CC_MODE (CCFPE);
-CC_MODE (CC_SWP);
-CC_MODE (CC_ZESWP); /* zero-extend LHS (but swap to make it RHS). */
-CC_MODE (CC_SESWP); /* sign-extend LHS (but swap to make it RHS). */
-CC_MODE (CC_NZ); /* Only N and Z bits of condition flags are valid. */
-
-/* Vector modes. */
-VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI. */
-VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI. */
-VECTOR_MODES (FLOAT, 8); /* V2SF. */
-VECTOR_MODES (FLOAT, 16); /* V4SF V2DF. */
-
-/* Oct Int: 256-bit integer mode needed for 32-byte vector arguments. */
-INT_MODE (OI, 32);
-
-/* Opaque integer modes for 3, 6 or 8 Neon double registers (2 is
- TImode). */
-INT_MODE (EI, 24);
-INT_MODE (CI, 48);
-INT_MODE (XI, 64);
-
-/* Vector modes for register lists. */
-VECTOR_MODES (INT, 32); /* V32QI V16HI V8SI V4DI. */
-VECTOR_MODES (FLOAT, 32); /* V8SF V4DF. */
-
-VECTOR_MODES (INT, 48); /* V32QI V16HI V8SI V4DI. */
-VECTOR_MODES (FLOAT, 48); /* V8SF V4DF. */
-
-VECTOR_MODES (INT, 64); /* V32QI V16HI V8SI V4DI. */
-VECTOR_MODES (FLOAT, 64); /* V8SF V4DF. */
-
-/* Quad float: 128-bit floating mode for long doubles. */
-FLOAT_MODE (TF, 16, ieee_quad_format);
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64-option-extensions.def b/gcc-4.8.1/gcc/config/aarch64/aarch64-option-extensions.def
deleted file mode 100644
index 58e815471..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64-option-extensions.def
+++ /dev/null
@@ -1,37 +0,0 @@
-/* Copyright (C) 2012-2013 Free Software Foundation, Inc.
- Contributed by ARM Ltd.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3, or (at your option)
- any later version.
-
- GCC is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>. */
-
-/* This is a list of ISA extentsions in AArch64.
-
- Before using #include to read this file, define a macro:
-
- AARCH64_OPT_EXTENSION(EXT_NAME, FLAGS_ON, FLAGS_OFF)
-
- EXT_NAME is the name of the extension, represented as a string constant.
- FLAGS_ON are the bitwise-or of the features that the extension adds.
- FLAGS_OFF are the bitwise-or of the features that the extension removes. */
-
-/* V8 Architecture Extensions.
- This list currently contains example extensions for CPUs that implement
- AArch64, and therefore serves as a template for adding more CPUs in the
- future. */
-
-AARCH64_OPT_EXTENSION("fp", AARCH64_FL_FP, AARCH64_FL_FPSIMD | AARCH64_FL_CRYPTO)
-AARCH64_OPT_EXTENSION("simd", AARCH64_FL_FPSIMD, AARCH64_FL_SIMD | AARCH64_FL_CRYPTO)
-AARCH64_OPT_EXTENSION("crypto", AARCH64_FL_CRYPTO | AARCH64_FL_FPSIMD, AARCH64_FL_CRYPTO)
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64-opts.h b/gcc-4.8.1/gcc/config/aarch64/aarch64-opts.h
deleted file mode 100644
index 31e105f68..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64-opts.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (C) 2011-2013 Free Software Foundation, Inc.
- Contributed by ARM Ltd.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3, or (at your
- option) any later version.
-
- GCC is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>. */
-
-/* Definitions for option handling for AArch64. */
-
-#ifndef GCC_AARCH64_OPTS_H
-#define GCC_AARCH64_OPTS_H
-
-/* The various cores that implement AArch64. */
-enum aarch64_processor
-{
-#define AARCH64_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
- IDENT,
-#include "aarch64-cores.def"
-#undef AARCH64_CORE
- /* Used to indicate that no processor has been specified. */
- generic,
- /* Used to mark the end of the processor table. */
- aarch64_none
-};
-
-/* TLS types. */
-enum aarch64_tls_type {
- TLS_TRADITIONAL,
- TLS_DESCRIPTORS
-};
-
-/* The code model defines the address generation strategy.
- Most have a PIC and non-PIC variant. */
-enum aarch64_code_model {
- /* Static code and data fit within a 1MB region.
- Not fully implemented, mostly treated as SMALL. */
- AARCH64_CMODEL_TINY,
- /* Static code, data and GOT/PLT fit within a 1MB region.
- Not fully implemented, mostly treated as SMALL_PIC. */
- AARCH64_CMODEL_TINY_PIC,
- /* Static code and data fit within a 4GB region.
- The default non-PIC code model. */
- AARCH64_CMODEL_SMALL,
- /* Static code, data and GOT/PLT fit within a 4GB region.
- The default PIC code model. */
- AARCH64_CMODEL_SMALL_PIC,
- /* No assumptions about addresses of code and data.
- The PIC variant is not yet implemented. */
- AARCH64_CMODEL_LARGE
-};
-
-#endif
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64-protos.h b/gcc-4.8.1/gcc/config/aarch64/aarch64-protos.h
deleted file mode 100644
index 5d0072f9d..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64-protos.h
+++ /dev/null
@@ -1,254 +0,0 @@
-/* Machine description for AArch64 architecture.
- Copyright (C) 2009-2013 Free Software Foundation, Inc.
- Contributed by ARM Ltd.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3, or (at your option)
- any later version.
-
- GCC is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>. */
-
-
-#ifndef GCC_AARCH64_PROTOS_H
-#define GCC_AARCH64_PROTOS_H
-
-/*
- SYMBOL_CONTEXT_ADR
- The symbol is used in a load-address operation.
- SYMBOL_CONTEXT_MEM
- The symbol is used as the address in a MEM.
- */
-enum aarch64_symbol_context
-{
- SYMBOL_CONTEXT_MEM,
- SYMBOL_CONTEXT_ADR
-};
-
-/* SYMBOL_SMALL_ABSOLUTE: Generate symbol accesses through
- high and lo relocs that calculate the base address using a PC
- relative reloc.
- So to get the address of foo, we generate
- adrp x0, foo
- add x0, x0, :lo12:foo
-
- To load or store something to foo, we could use the corresponding
- load store variants that generate an
- ldr x0, [x0,:lo12:foo]
- or
- str x1, [x0, :lo12:foo]
-
- This corresponds to the small code model of the compiler.
-
- SYMBOL_SMALL_GOT: Similar to the one above but this
- gives us the GOT entry of the symbol being referred to :
- Thus calculating the GOT entry for foo is done using the
- following sequence of instructions. The ADRP instruction
- gets us to the page containing the GOT entry of the symbol
- and the got_lo12 gets us the actual offset in it.
-
- adrp x0, :got:foo
- ldr x0, [x0, :gotoff_lo12:foo]
-
- This corresponds to the small PIC model of the compiler.
-
- SYMBOL_SMALL_TLSGD
- SYMBOL_SMALL_TLSDESC
- SYMBOL_SMALL_GOTTPREL
- SYMBOL_SMALL_TPREL
- Each of of these represents a thread-local symbol, and corresponds to the
- thread local storage relocation operator for the symbol being referred to.
-
- SYMBOL_FORCE_TO_MEM : Global variables are addressed using
- constant pool. All variable addresses are spilled into constant
- pools. The constant pools themselves are addressed using PC
- relative accesses. This only works for the large code model.
- */
-enum aarch64_symbol_type
-{
- SYMBOL_SMALL_ABSOLUTE,
- SYMBOL_SMALL_GOT,
- SYMBOL_SMALL_TLSGD,
- SYMBOL_SMALL_TLSDESC,
- SYMBOL_SMALL_GOTTPREL,
- SYMBOL_SMALL_TPREL,
- SYMBOL_FORCE_TO_MEM
-};
-
-/* A set of tuning parameters contains references to size and time
- cost models and vectors for address cost calculations, register
- move costs and memory move costs. */
-
-/* Extra costs for specific insns. Only records the cost above a
- single insn. */
-
-struct cpu_rtx_cost_table
-{
- const int memory_load;
- const int memory_store;
- const int register_shift;
- const int int_divide;
- const int float_divide;
- const int double_divide;
- const int int_multiply;
- const int int_multiply_extend;
- const int int_multiply_add;
- const int int_multiply_extend_add;
- const int float_multiply;
- const int double_multiply;
-};
-
-/* Additional cost for addresses. */
-struct cpu_addrcost_table
-{
- const int pre_modify;
- const int post_modify;
- const int register_offset;
- const int register_extend;
- const int imm_offset;
-};
-
-/* Additional costs for register copies. Cost is for one register. */
-struct cpu_regmove_cost
-{
- const int GP2GP;
- const int GP2FP;
- const int FP2GP;
- const int FP2FP;
-};
-
-struct tune_params
-{
- const struct cpu_rtx_cost_table *const insn_extra_cost;
- const struct cpu_addrcost_table *const addr_cost;
- const struct cpu_regmove_cost *const regmove_cost;
- const int memmov_cost;
-};
-
-HOST_WIDE_INT aarch64_initial_elimination_offset (unsigned, unsigned);
-bool aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode);
-bool aarch64_constant_address_p (rtx);
-bool aarch64_float_const_zero_rtx_p (rtx);
-bool aarch64_function_arg_regno_p (unsigned);
-bool aarch64_gen_movmemqi (rtx *);
-bool aarch64_is_extend_from_extract (enum machine_mode, rtx, rtx);
-bool aarch64_is_long_call_p (rtx);
-bool aarch64_label_mentioned_p (rtx);
-bool aarch64_legitimate_pic_operand_p (rtx);
-bool aarch64_move_imm (HOST_WIDE_INT, enum machine_mode);
-bool aarch64_pad_arg_upward (enum machine_mode, const_tree);
-bool aarch64_pad_reg_upward (enum machine_mode, const_tree, bool);
-bool aarch64_regno_ok_for_base_p (int, bool);
-bool aarch64_regno_ok_for_index_p (int, bool);
-bool aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode);
-bool aarch64_simd_imm_zero_p (rtx, enum machine_mode);
-bool aarch64_simd_shift_imm_p (rtx, enum machine_mode, bool);
-bool aarch64_symbolic_address_p (rtx);
-bool aarch64_symbolic_constant_p (rtx, enum aarch64_symbol_context,
- enum aarch64_symbol_type *);
-bool aarch64_uimm12_shift (HOST_WIDE_INT);
-const char *aarch64_output_casesi (rtx *);
-enum aarch64_symbol_type aarch64_classify_symbol (rtx,
- enum aarch64_symbol_context);
-enum aarch64_symbol_type aarch64_classify_tls_symbol (rtx);
-enum reg_class aarch64_regno_regclass (unsigned);
-int aarch64_asm_preferred_eh_data_format (int, int);
-int aarch64_hard_regno_mode_ok (unsigned, enum machine_mode);
-int aarch64_hard_regno_nregs (unsigned, enum machine_mode);
-int aarch64_simd_attr_length_move (rtx);
-int aarch64_simd_immediate_valid_for_move (rtx, enum machine_mode, rtx *,
- int *, unsigned char *, int *,
- int *);
-int aarch64_uxt_size (int, HOST_WIDE_INT);
-rtx aarch64_final_eh_return_addr (void);
-rtx aarch64_legitimize_reload_address (rtx *, enum machine_mode, int, int, int);
-const char *aarch64_output_move_struct (rtx *operands);
-rtx aarch64_return_addr (int, rtx);
-rtx aarch64_simd_gen_const_vector_dup (enum machine_mode, int);
-bool aarch64_simd_mem_operand_p (rtx);
-rtx aarch64_simd_vect_par_cnst_half (enum machine_mode, bool);
-rtx aarch64_tls_get_addr (void);
-unsigned aarch64_dbx_register_number (unsigned);
-unsigned aarch64_trampoline_size (void);
-void aarch64_asm_output_labelref (FILE *, const char *);
-void aarch64_elf_asm_named_section (const char *, unsigned, tree);
-void aarch64_expand_epilogue (bool);
-void aarch64_expand_mov_immediate (rtx, rtx);
-void aarch64_expand_prologue (void);
-void aarch64_expand_vector_init (rtx, rtx);
-void aarch64_function_profiler (FILE *, int);
-void aarch64_init_cumulative_args (CUMULATIVE_ARGS *, const_tree, rtx,
- const_tree, unsigned);
-void aarch64_init_expanders (void);
-void aarch64_print_operand (FILE *, rtx, char);
-void aarch64_print_operand_address (FILE *, rtx);
-
-/* Initialize builtins for SIMD intrinsics. */
-void init_aarch64_simd_builtins (void);
-
-void aarch64_simd_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
-void aarch64_simd_disambiguate_copy (rtx *, rtx *, rtx *, unsigned int);
-
-/* Emit code to place a AdvSIMD pair result in memory locations (with equal
- registers). */
-void aarch64_simd_emit_pair_result_insn (enum machine_mode,
- rtx (*intfn) (rtx, rtx, rtx), rtx,
- rtx);
-
-/* Expand builtins for SIMD intrinsics. */
-rtx aarch64_simd_expand_builtin (int, tree, rtx);
-
-void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
-
-/* Emit code for reinterprets. */
-void aarch64_simd_reinterpret (rtx, rtx);
-
-void aarch64_split_128bit_move (rtx, rtx);
-
-bool aarch64_split_128bit_move_p (rtx, rtx);
-
-/* Check for a legitimate floating point constant for FMOV. */
-bool aarch64_float_const_representable_p (rtx);
-
-#if defined (RTX_CODE)
-
-bool aarch64_legitimate_address_p (enum machine_mode, rtx, RTX_CODE, bool);
-enum machine_mode aarch64_select_cc_mode (RTX_CODE, rtx, rtx);
-rtx aarch64_gen_compare_reg (RTX_CODE, rtx, rtx);
-rtx aarch64_load_tp (rtx);
-
-void aarch64_expand_compare_and_swap (rtx op[]);
-void aarch64_split_compare_and_swap (rtx op[]);
-void aarch64_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx);
-
-#endif /* RTX_CODE */
-
-void aarch64_init_builtins (void);
-rtx aarch64_expand_builtin (tree exp,
- rtx target,
- rtx subtarget ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED,
- int ignore ATTRIBUTE_UNUSED);
-tree aarch64_builtin_decl (unsigned, bool ATTRIBUTE_UNUSED);
-
-tree
-aarch64_builtin_vectorized_function (tree fndecl,
- tree type_out,
- tree type_in);
-
-extern void aarch64_split_combinev16qi (rtx operands[3]);
-extern void aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel);
-extern bool
-aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel);
-
-char* aarch64_output_simd_mov_immediate (rtx *, enum machine_mode, unsigned);
-#endif /* GCC_AARCH64_PROTOS_H */
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc-4.8.1/gcc/config/aarch64/aarch64-simd-builtins.def
deleted file mode 100644
index a6a5e12c7..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64-simd-builtins.def
+++ /dev/null
@@ -1,258 +0,0 @@
-/* Machine description for AArch64 architecture.
- Copyright (C) 2012-2013 Free Software Foundation, Inc.
- Contributed by ARM Ltd.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3, or (at your option)
- any later version.
-
- GCC is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>. */
-
-/* In the list below, the BUILTIN_<ITERATOR> macros should
- correspond to the iterator used to construct the instruction's
- patterns in aarch64-simd.md. A helpful idiom to follow when
- adding new builtins is to add a line for each pattern in the md
- file. Thus, ADDP, which has one pattern defined for the VD_BHSI
- iterator, and one for DImode, has two entries below. */
-
- BUILTIN_VD_RE (CREATE, create)
- BUILTIN_VQ_S (GETLANE, get_lane_signed)
- BUILTIN_VDQ (GETLANE, get_lane_unsigned)
- BUILTIN_VDQF (GETLANE, get_lane)
- VAR1 (GETLANE, get_lane, di)
- BUILTIN_VDC (COMBINE, combine)
- BUILTIN_VB (BINOP, pmul)
- BUILTIN_VDQF (UNOP, sqrt)
- BUILTIN_VD_BHSI (BINOP, addp)
- VAR1 (UNOP, addp, di)
-
- BUILTIN_VD_RE (REINTERP, reinterpretdi)
- BUILTIN_VDC (REINTERP, reinterpretv8qi)
- BUILTIN_VDC (REINTERP, reinterpretv4hi)
- BUILTIN_VDC (REINTERP, reinterpretv2si)
- BUILTIN_VDC (REINTERP, reinterpretv2sf)
- BUILTIN_VQ (REINTERP, reinterpretv16qi)
- BUILTIN_VQ (REINTERP, reinterpretv8hi)
- BUILTIN_VQ (REINTERP, reinterpretv4si)
- BUILTIN_VQ (REINTERP, reinterpretv4sf)
- BUILTIN_VQ (REINTERP, reinterpretv2di)
- BUILTIN_VQ (REINTERP, reinterpretv2df)
-
- BUILTIN_VDQ_I (BINOP, dup_lane)
- BUILTIN_SDQ_I (BINOP, dup_lane)
- /* Implemented by aarch64_<sur>q<r>shl<mode>. */
- BUILTIN_VSDQ_I (BINOP, sqshl)
- BUILTIN_VSDQ_I (BINOP, uqshl)
- BUILTIN_VSDQ_I (BINOP, sqrshl)
- BUILTIN_VSDQ_I (BINOP, uqrshl)
- /* Implemented by aarch64_<su_optab><optab><mode>. */
- BUILTIN_VSDQ_I (BINOP, sqadd)
- BUILTIN_VSDQ_I (BINOP, uqadd)
- BUILTIN_VSDQ_I (BINOP, sqsub)
- BUILTIN_VSDQ_I (BINOP, uqsub)
- /* Implemented by aarch64_<sur>qadd<mode>. */
- BUILTIN_VSDQ_I (BINOP, suqadd)
- BUILTIN_VSDQ_I (BINOP, usqadd)
-
- /* Implemented by aarch64_get_dreg<VSTRUCT:mode><VDC:mode>. */
- BUILTIN_VDC (GETLANE, get_dregoi)
- BUILTIN_VDC (GETLANE, get_dregci)
- BUILTIN_VDC (GETLANE, get_dregxi)
- /* Implemented by aarch64_get_qreg<VSTRUCT:mode><VQ:mode>. */
- BUILTIN_VQ (GETLANE, get_qregoi)
- BUILTIN_VQ (GETLANE, get_qregci)
- BUILTIN_VQ (GETLANE, get_qregxi)
- /* Implemented by aarch64_set_qreg<VSTRUCT:mode><VQ:mode>. */
- BUILTIN_VQ (SETLANE, set_qregoi)
- BUILTIN_VQ (SETLANE, set_qregci)
- BUILTIN_VQ (SETLANE, set_qregxi)
- /* Implemented by aarch64_ld<VSTRUCT:nregs><VDC:mode>. */
- BUILTIN_VDC (LOADSTRUCT, ld2)
- BUILTIN_VDC (LOADSTRUCT, ld3)
- BUILTIN_VDC (LOADSTRUCT, ld4)
- /* Implemented by aarch64_ld<VSTRUCT:nregs><VQ:mode>. */
- BUILTIN_VQ (LOADSTRUCT, ld2)
- BUILTIN_VQ (LOADSTRUCT, ld3)
- BUILTIN_VQ (LOADSTRUCT, ld4)
- /* Implemented by aarch64_st<VSTRUCT:nregs><VDC:mode>. */
- BUILTIN_VDC (STORESTRUCT, st2)
- BUILTIN_VDC (STORESTRUCT, st3)
- BUILTIN_VDC (STORESTRUCT, st4)
- /* Implemented by aarch64_st<VSTRUCT:nregs><VQ:mode>. */
- BUILTIN_VQ (STORESTRUCT, st2)
- BUILTIN_VQ (STORESTRUCT, st3)
- BUILTIN_VQ (STORESTRUCT, st4)
-
- BUILTIN_VQW (BINOP, saddl2)
- BUILTIN_VQW (BINOP, uaddl2)
- BUILTIN_VQW (BINOP, ssubl2)
- BUILTIN_VQW (BINOP, usubl2)
- BUILTIN_VQW (BINOP, saddw2)
- BUILTIN_VQW (BINOP, uaddw2)
- BUILTIN_VQW (BINOP, ssubw2)
- BUILTIN_VQW (BINOP, usubw2)
- /* Implemented by aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>. */
- BUILTIN_VDW (BINOP, saddl)
- BUILTIN_VDW (BINOP, uaddl)
- BUILTIN_VDW (BINOP, ssubl)
- BUILTIN_VDW (BINOP, usubl)
- /* Implemented by aarch64_<ANY_EXTEND:su><ADDSUB:optab>w<mode>. */
- BUILTIN_VDW (BINOP, saddw)
- BUILTIN_VDW (BINOP, uaddw)
- BUILTIN_VDW (BINOP, ssubw)
- BUILTIN_VDW (BINOP, usubw)
- /* Implemented by aarch64_<sur>h<addsub><mode>. */
- BUILTIN_VQ_S (BINOP, shadd)
- BUILTIN_VQ_S (BINOP, uhadd)
- BUILTIN_VQ_S (BINOP, srhadd)
- BUILTIN_VQ_S (BINOP, urhadd)
- /* Implemented by aarch64_<sur><addsub>hn<mode>. */
- BUILTIN_VQN (BINOP, addhn)
- BUILTIN_VQN (BINOP, raddhn)
- /* Implemented by aarch64_<sur><addsub>hn2<mode>. */
- BUILTIN_VQN (TERNOP, addhn2)
- BUILTIN_VQN (TERNOP, raddhn2)
-
- BUILTIN_VSQN_HSDI (UNOP, sqmovun)
- /* Implemented by aarch64_<sur>qmovn<mode>. */
- BUILTIN_VSQN_HSDI (UNOP, sqmovn)
- BUILTIN_VSQN_HSDI (UNOP, uqmovn)
- /* Implemented by aarch64_s<optab><mode>. */
- BUILTIN_VSDQ_I_BHSI (UNOP, sqabs)
- BUILTIN_VSDQ_I_BHSI (UNOP, sqneg)
-
- BUILTIN_VSD_HSI (QUADOP, sqdmlal_lane)
- BUILTIN_VSD_HSI (QUADOP, sqdmlsl_lane)
- BUILTIN_VSD_HSI (QUADOP, sqdmlal_laneq)
- BUILTIN_VSD_HSI (QUADOP, sqdmlsl_laneq)
- BUILTIN_VQ_HSI (TERNOP, sqdmlal2)
- BUILTIN_VQ_HSI (TERNOP, sqdmlsl2)
- BUILTIN_VQ_HSI (QUADOP, sqdmlal2_lane)
- BUILTIN_VQ_HSI (QUADOP, sqdmlsl2_lane)
- BUILTIN_VQ_HSI (QUADOP, sqdmlal2_laneq)
- BUILTIN_VQ_HSI (QUADOP, sqdmlsl2_laneq)
- BUILTIN_VQ_HSI (TERNOP, sqdmlal2_n)
- BUILTIN_VQ_HSI (TERNOP, sqdmlsl2_n)
- /* Implemented by aarch64_sqdml<SBINQOPS:as>l<mode>. */
- BUILTIN_VSD_HSI (TERNOP, sqdmlal)
- BUILTIN_VSD_HSI (TERNOP, sqdmlsl)
- /* Implemented by aarch64_sqdml<SBINQOPS:as>l_n<mode>. */
- BUILTIN_VD_HSI (TERNOP, sqdmlal_n)
- BUILTIN_VD_HSI (TERNOP, sqdmlsl_n)
-
- BUILTIN_VSD_HSI (BINOP, sqdmull)
- BUILTIN_VSD_HSI (TERNOP, sqdmull_lane)
- BUILTIN_VD_HSI (TERNOP, sqdmull_laneq)
- BUILTIN_VD_HSI (BINOP, sqdmull_n)
- BUILTIN_VQ_HSI (BINOP, sqdmull2)
- BUILTIN_VQ_HSI (TERNOP, sqdmull2_lane)
- BUILTIN_VQ_HSI (TERNOP, sqdmull2_laneq)
- BUILTIN_VQ_HSI (BINOP, sqdmull2_n)
- /* Implemented by aarch64_sq<r>dmulh<mode>. */
- BUILTIN_VSDQ_HSI (BINOP, sqdmulh)
- BUILTIN_VSDQ_HSI (BINOP, sqrdmulh)
- /* Implemented by aarch64_sq<r>dmulh_lane<q><mode>. */
- BUILTIN_VDQHS (TERNOP, sqdmulh_lane)
- BUILTIN_VDQHS (TERNOP, sqdmulh_laneq)
- BUILTIN_VDQHS (TERNOP, sqrdmulh_lane)
- BUILTIN_VDQHS (TERNOP, sqrdmulh_laneq)
- BUILTIN_SD_HSI (TERNOP, sqdmulh_lane)
- BUILTIN_SD_HSI (TERNOP, sqrdmulh_lane)
-
- BUILTIN_VSDQ_I_DI (BINOP, sshl_n)
- BUILTIN_VSDQ_I_DI (BINOP, ushl_n)
- /* Implemented by aarch64_<sur>shl<mode>. */
- BUILTIN_VSDQ_I_DI (BINOP, sshl)
- BUILTIN_VSDQ_I_DI (BINOP, ushl)
- BUILTIN_VSDQ_I_DI (BINOP, srshl)
- BUILTIN_VSDQ_I_DI (BINOP, urshl)
-
- BUILTIN_VSDQ_I_DI (SHIFTIMM, sshr_n)
- BUILTIN_VSDQ_I_DI (SHIFTIMM, ushr_n)
- /* Implemented by aarch64_<sur>shr_n<mode>. */
- BUILTIN_VSDQ_I_DI (SHIFTIMM, srshr_n)
- BUILTIN_VSDQ_I_DI (SHIFTIMM, urshr_n)
- /* Implemented by aarch64_<sur>sra_n<mode>. */
- BUILTIN_VSDQ_I_DI (SHIFTACC, ssra_n)
- BUILTIN_VSDQ_I_DI (SHIFTACC, usra_n)
- BUILTIN_VSDQ_I_DI (SHIFTACC, srsra_n)
- BUILTIN_VSDQ_I_DI (SHIFTACC, ursra_n)
- /* Implemented by aarch64_<sur>shll_n<mode>. */
- BUILTIN_VDW (SHIFTIMM, sshll_n)
- BUILTIN_VDW (SHIFTIMM, ushll_n)
- /* Implemented by aarch64_<sur>shll2_n<mode>. */
- BUILTIN_VQW (SHIFTIMM, sshll2_n)
- BUILTIN_VQW (SHIFTIMM, ushll2_n)
- /* Implemented by aarch64_<sur>q<r>shr<u>n_n<mode>. */
- BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrun_n)
- BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrun_n)
- BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrn_n)
- BUILTIN_VSQN_HSDI (SHIFTIMM, uqshrn_n)
- BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrn_n)
- BUILTIN_VSQN_HSDI (SHIFTIMM, uqrshrn_n)
- /* Implemented by aarch64_<sur>s<lr>i_n<mode>. */
- BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssri_n)
- BUILTIN_VSDQ_I_DI (SHIFTINSERT, usri_n)
- BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssli_n)
- BUILTIN_VSDQ_I_DI (SHIFTINSERT, usli_n)
- /* Implemented by aarch64_<sur>qshl<u>_n<mode>. */
- BUILTIN_VSDQ_I (SHIFTIMM, sqshlu_n)
- BUILTIN_VSDQ_I (SHIFTIMM, sqshl_n)
- BUILTIN_VSDQ_I (SHIFTIMM, uqshl_n)
-
- /* Implemented by aarch64_cm<cmp><mode>. */
- BUILTIN_VSDQ_I_DI (BINOP, cmeq)
- BUILTIN_VSDQ_I_DI (BINOP, cmge)
- BUILTIN_VSDQ_I_DI (BINOP, cmgt)
- BUILTIN_VSDQ_I_DI (BINOP, cmle)
- BUILTIN_VSDQ_I_DI (BINOP, cmlt)
- /* Implemented by aarch64_cm<cmp><mode>. */
- BUILTIN_VSDQ_I_DI (BINOP, cmhs)
- BUILTIN_VSDQ_I_DI (BINOP, cmhi)
- BUILTIN_VSDQ_I_DI (BINOP, cmtst)
-
- /* Implemented by aarch64_<fmaxmin><mode>. */
- BUILTIN_VDQF (BINOP, fmax)
- BUILTIN_VDQF (BINOP, fmin)
- /* Implemented by aarch64_<maxmin><mode>. */
- BUILTIN_VDQ_BHSI (BINOP, smax)
- BUILTIN_VDQ_BHSI (BINOP, smin)
- BUILTIN_VDQ_BHSI (BINOP, umax)
- BUILTIN_VDQ_BHSI (BINOP, umin)
-
- /* Implemented by aarch64_frint<frint_suffix><mode>. */
- BUILTIN_VDQF (UNOP, frintz)
- BUILTIN_VDQF (UNOP, frintp)
- BUILTIN_VDQF (UNOP, frintm)
- BUILTIN_VDQF (UNOP, frinti)
- BUILTIN_VDQF (UNOP, frintx)
- BUILTIN_VDQF (UNOP, frinta)
-
- /* Implemented by aarch64_fcvt<frint_suffix><su><mode>. */
- BUILTIN_VDQF (UNOP, fcvtzs)
- BUILTIN_VDQF (UNOP, fcvtzu)
- BUILTIN_VDQF (UNOP, fcvtas)
- BUILTIN_VDQF (UNOP, fcvtau)
- BUILTIN_VDQF (UNOP, fcvtps)
- BUILTIN_VDQF (UNOP, fcvtpu)
- BUILTIN_VDQF (UNOP, fcvtms)
- BUILTIN_VDQF (UNOP, fcvtmu)
-
- /* Implemented by
- aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>. */
- BUILTIN_VALL (BINOP, zip1)
- BUILTIN_VALL (BINOP, zip2)
- BUILTIN_VALL (BINOP, uzp1)
- BUILTIN_VALL (BINOP, uzp2)
- BUILTIN_VALL (BINOP, trn1)
- BUILTIN_VALL (BINOP, trn2)
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64-simd.md b/gcc-4.8.1/gcc/config/aarch64/aarch64-simd.md
deleted file mode 100644
index 79c309331..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64-simd.md
+++ /dev/null
@@ -1,3716 +0,0 @@
-;; Machine description for AArch64 AdvSIMD architecture.
-;; Copyright (C) 2011-2013 Free Software Foundation, Inc.
-;; Contributed by ARM Ltd.
-;;
-;; This file is part of GCC.
-;;
-;; GCC is free software; you can redistribute it and/or modify it
-;; under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 3, or (at your option)
-;; any later version.
-;;
-;; GCC is distributed in the hope that it will be useful, but
-;; WITHOUT ANY WARRANTY; without even the implied warranty of
-;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-;; General Public License for more details.
-;;
-;; You should have received a copy of the GNU General Public License
-;; along with GCC; see the file COPYING3. If not see
-;; <http://www.gnu.org/licenses/>.
-
-
-; Main data types used by the insntructions
-
-(define_attr "simd_mode" "unknown,none,V8QI,V16QI,V4HI,V8HI,V2SI,V4SI,V2DI,V2SF,V4SF,V2DF,OI,CI,XI,DI,DF,SI,HI,QI"
- (const_string "unknown"))
-
-
-; Classification of AdvSIMD instructions for scheduling purposes.
-; Do not set this attribute and the "v8type" attribute together in
-; any instruction pattern.
-
-; simd_abd integer absolute difference and accumulate.
-; simd_abdl integer absolute difference and accumulate (long).
-; simd_adal integer add and accumulate (long).
-; simd_add integer addition/subtraction.
-; simd_addl integer addition/subtraction (long).
-; simd_addlv across lanes integer sum (long).
-; simd_addn integer addition/subtraction (narrow).
-; simd_addn2 integer addition/subtraction (narrow, high).
-; simd_addv across lanes integer sum.
-; simd_cls count leading sign/zero bits.
-; simd_cmp compare / create mask.
-; simd_cnt population count.
-; simd_dup duplicate element.
-; simd_dupgp duplicate general purpose register.
-; simd_ext bitwise extract from pair.
-; simd_fadd floating point add/sub.
-; simd_fcmp floating point compare.
-; simd_fcvti floating point convert to integer.
-; simd_fcvtl floating-point convert upsize.
-; simd_fcvtn floating-point convert downsize (narrow).
-; simd_fcvtn2 floating-point convert downsize (narrow, high).
-; simd_fdiv floating point division.
-; simd_fminmax floating point min/max.
-; simd_fminmaxv across lanes floating point min/max.
-; simd_fmla floating point multiply-add.
-; simd_fmla_elt floating point multiply-add (by element).
-; simd_fmul floating point multiply.
-; simd_fmul_elt floating point multiply (by element).
-; simd_fnegabs floating point neg/abs.
-; simd_frcpe floating point reciprocal estimate.
-; simd_frcps floating point reciprocal step.
-; simd_frecx floating point reciprocal exponent.
-; simd_frint floating point round to integer.
-; simd_fsqrt floating point square root.
-; simd_icvtf integer convert to floating point.
-; simd_ins insert element.
-; simd_insgp insert general purpose register.
-; simd_load1 load multiple structures to one register (LD1).
-; simd_load1r load single structure to all lanes of one register (LD1R).
-; simd_load1s load single structure to one lane of one register (LD1 [index]).
-; simd_load2 load multiple structures to two registers (LD1, LD2).
-; simd_load2r load single structure to all lanes of two registers (LD1R, LD2R).
-; simd_load2s load single structure to one lane of two registers (LD2 [index]).
-; simd_load3 load multiple structures to three registers (LD1, LD3).
-; simd_load3r load single structure to all lanes of three registers (LD3R).
-; simd_load3s load single structure to one lane of three registers (LD3 [index]).
-; simd_load4 load multiple structures to four registers (LD1, LD2, LD4).
-; simd_load4r load single structure to all lanes of four registers (LD4R).
-; simd_load4s load single structure to one lane of four registers (LD4 [index]).
-; simd_logic logical operation.
-; simd_logic_imm logcial operation (immediate).
-; simd_minmax integer min/max.
-; simd_minmaxv across lanes integer min/max,
-; simd_mla integer multiply-accumulate.
-; simd_mla_elt integer multiply-accumulate (by element).
-; simd_mlal integer multiply-accumulate (long).
-; simd_mlal_elt integer multiply-accumulate (by element, long).
-; simd_move move register.
-; simd_move_imm move immediate.
-; simd_movgp move element to general purpose register.
-; simd_mul integer multiply.
-; simd_mul_elt integer multiply (by element).
-; simd_mull integer multiply (long).
-; simd_mull_elt integer multiply (by element, long).
-; simd_negabs integer negate/absolute.
-; simd_rbit bitwise reverse.
-; simd_rcpe integer reciprocal estimate.
-; simd_rcps integer reciprocal square root.
-; simd_rev element reverse.
-; simd_sat_add integer saturating addition/subtraction.
-; simd_sat_mlal integer saturating multiply-accumulate (long).
-; simd_sat_mlal_elt integer saturating multiply-accumulate (by element, long).
-; simd_sat_mul integer saturating multiply.
-; simd_sat_mul_elt integer saturating multiply (by element).
-; simd_sat_mull integer saturating multiply (long).
-; simd_sat_mull_elt integer saturating multiply (by element, long).
-; simd_sat_negabs integer saturating negate/absolute.
-; simd_sat_shift integer saturating shift.
-; simd_sat_shift_imm integer saturating shift (immediate).
-; simd_sat_shiftn_imm integer saturating shift (narrow, immediate).
-; simd_sat_shiftn2_imm integer saturating shift (narrow, high, immediate).
-; simd_shift shift register/vector.
-; simd_shift_acc shift accumulate.
-; simd_shift_imm shift immediate.
-; simd_shift_imm_acc shift immediate and accumualte.
-; simd_shiftl shift register/vector (long).
-; simd_shiftl_imm shift register/vector (long, immediate).
-; simd_shiftn_imm shift register/vector (narrow, immediate).
-; simd_shiftn2_imm shift register/vector (narrow, high, immediate).
-; simd_store1 store multiple structures from one register (ST1).
-; simd_store1s store single structure from one lane of one register (ST1 [index]).
-; simd_store2 store multiple structures from two registers (ST1, ST2).
-; simd_store2s store single structure from one lane of two registers (ST2 [index]).
-; simd_store3 store multiple structures from three registers (ST1, ST3).
-; simd_store3s store single structure from one lane of three register (ST3 [index]).
-; simd_store4 store multiple structures from four registers (ST1, ST2, ST4).
-; simd_store4s store single structure from one lane for four registers (ST4 [index]).
-; simd_tbl table lookup.
-; simd_trn transpose.
-; simd_uzp unzip.
-; simd_zip zip.
-
-(define_attr "simd_type"
- "simd_abd,\
- simd_abdl,\
- simd_adal,\
- simd_add,\
- simd_addl,\
- simd_addlv,\
- simd_addn,\
- simd_addn2,\
- simd_addv,\
- simd_cls,\
- simd_cmp,\
- simd_cnt,\
- simd_dup,\
- simd_dupgp,\
- simd_ext,\
- simd_fadd,\
- simd_fcmp,\
- simd_fcvti,\
- simd_fcvtl,\
- simd_fcvtn,\
- simd_fcvtn2,\
- simd_fdiv,\
- simd_fminmax,\
- simd_fminmaxv,\
- simd_fmla,\
- simd_fmla_elt,\
- simd_fmul,\
- simd_fmul_elt,\
- simd_fnegabs,\
- simd_frcpe,\
- simd_frcps,\
- simd_frecx,\
- simd_frint,\
- simd_fsqrt,\
- simd_icvtf,\
- simd_ins,\
- simd_insgp,\
- simd_load1,\
- simd_load1r,\
- simd_load1s,\
- simd_load2,\
- simd_load2r,\
- simd_load2s,\
- simd_load3,\
- simd_load3r,\
- simd_load3s,\
- simd_load4,\
- simd_load4r,\
- simd_load4s,\
- simd_logic,\
- simd_logic_imm,\
- simd_minmax,\
- simd_minmaxv,\
- simd_mla,\
- simd_mla_elt,\
- simd_mlal,\
- simd_mlal_elt,\
- simd_movgp,\
- simd_move,\
- simd_move_imm,\
- simd_mul,\
- simd_mul_elt,\
- simd_mull,\
- simd_mull_elt,\
- simd_negabs,\
- simd_rbit,\
- simd_rcpe,\
- simd_rcps,\
- simd_rev,\
- simd_sat_add,\
- simd_sat_mlal,\
- simd_sat_mlal_elt,\
- simd_sat_mul,\
- simd_sat_mul_elt,\
- simd_sat_mull,\
- simd_sat_mull_elt,\
- simd_sat_negabs,\
- simd_sat_shift,\
- simd_sat_shift_imm,\
- simd_sat_shiftn_imm,\
- simd_sat_shiftn2_imm,\
- simd_shift,\
- simd_shift_acc,\
- simd_shift_imm,\
- simd_shift_imm_acc,\
- simd_shiftl,\
- simd_shiftl_imm,\
- simd_shiftn_imm,\
- simd_shiftn2_imm,\
- simd_store1,\
- simd_store1s,\
- simd_store2,\
- simd_store2s,\
- simd_store3,\
- simd_store3s,\
- simd_store4,\
- simd_store4s,\
- simd_tbl,\
- simd_trn,\
- simd_uzp,\
- simd_zip,\
- none"
- (const_string "none"))
-
-
-; The "neon_type" attribute is used by the AArch32 backend. Below is a mapping
-; from "simd_type" to "neon_type".
-
-(define_attr "neon_type"
- "neon_int_1,neon_int_2,neon_int_3,neon_int_4,neon_int_5,neon_vqneg_vqabs,
- neon_vmov,neon_vaba,neon_vsma,neon_vaba_qqq,
- neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,neon_mul_qqq_8_16_32_ddd_32,
- neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar,
- neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,neon_mla_qqq_8_16,
- neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long,
- neon_mla_qqq_32_qqd_32_scalar,neon_mul_ddd_16_scalar_32_16_long_scalar,
- neon_mul_qqd_32_scalar,neon_mla_ddd_16_scalar_qdd_32_16_long_scalar,
- neon_shift_1,neon_shift_2,neon_shift_3,neon_vshl_ddd,
- neon_vqshl_vrshl_vqrshl_qqq,neon_vsra_vrsra,neon_fp_vadd_ddd_vabs_dd,
- neon_fp_vadd_qqq_vabs_qq,neon_fp_vsum,neon_fp_vmul_ddd,neon_fp_vmul_qqd,
- neon_fp_vmla_ddd,neon_fp_vmla_qqq,neon_fp_vmla_ddd_scalar,
- neon_fp_vmla_qqq_scalar,neon_fp_vrecps_vrsqrts_ddd,
- neon_fp_vrecps_vrsqrts_qqq,neon_bp_simple,neon_bp_2cycle,neon_bp_3cycle,
- neon_ldr,neon_str,neon_vld1_1_2_regs,neon_vld1_3_4_regs,
- neon_vld2_2_regs_vld1_vld2_all_lanes,neon_vld2_4_regs,neon_vld3_vld4,
- neon_vst1_1_2_regs_vst2_2_regs,neon_vst1_3_4_regs,
- neon_vst2_4_regs_vst3_vst4,neon_vst3_vst4,neon_vld1_vld2_lane,
- neon_vld3_vld4_lane,neon_vst1_vst2_lane,neon_vst3_vst4_lane,
- neon_vld3_vld4_all_lanes,neon_mcr,neon_mcr_2_mcrr,neon_mrc,neon_mrrc,
- neon_ldm_2,neon_stm_2,none,unknown"
- (cond [
- (eq_attr "simd_type" "simd_dup") (const_string "neon_bp_simple")
- (eq_attr "simd_type" "simd_movgp") (const_string "neon_bp_simple")
- (eq_attr "simd_type" "simd_add,simd_logic,simd_logic_imm") (const_string "neon_int_1")
- (eq_attr "simd_type" "simd_negabs,simd_addlv") (const_string "neon_int_3")
- (eq_attr "simd_type" "simd_addn,simd_addn2,simd_addl,simd_sat_add,simd_sat_negabs") (const_string "neon_int_4")
- (eq_attr "simd_type" "simd_move") (const_string "neon_vmov")
- (eq_attr "simd_type" "simd_ins") (const_string "neon_mcr")
- (and (eq_attr "simd_type" "simd_mul,simd_sat_mul") (eq_attr "simd_mode" "V8QI,V4HI")) (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
- (and (eq_attr "simd_type" "simd_mul,simd_sat_mul") (eq_attr "simd_mode" "V2SI,V8QI,V16QI,V2SI")) (const_string "neon_mul_qqq_8_16_32_ddd_32")
- (and (eq_attr "simd_type" "simd_mull,simd_sat_mull") (eq_attr "simd_mode" "V8QI,V16QI,V4HI,V8HI")) (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
- (and (eq_attr "simd_type" "simd_mull,simd_sat_mull") (eq_attr "simd_mode" "V2SI,V4SI,V2DI")) (const_string "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar")
- (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V8QI,V4HI")) (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
- (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V2SI")) (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")
- (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V16QI,V8HI")) (const_string "neon_mla_qqq_8_16")
- (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V4SI")) (const_string "neon_mla_qqq_32_qqd_32_scalar")
- (and (eq_attr "simd_type" "simd_mlal") (eq_attr "simd_mode" "V8QI,V16QI,V4HI,V8HI")) (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
- (and (eq_attr "simd_type" "simd_mlal") (eq_attr "simd_mode" "V2SI,V4SI,V2DI")) (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")
- (and (eq_attr "simd_type" "simd_fmla") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmla_ddd")
- (and (eq_attr "simd_type" "simd_fmla") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmla_qqq")
- (and (eq_attr "simd_type" "simd_fmla_elt") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmla_ddd_scalar")
- (and (eq_attr "simd_type" "simd_fmla_elt") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmla_qqq_scalar")
- (and (eq_attr "simd_type" "simd_fmul,simd_fmul_elt,simd_fdiv,simd_fsqrt") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmul_ddd")
- (and (eq_attr "simd_type" "simd_fmul,simd_fmul_elt,simd_fdiv,simd_fsqrt") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmul_qqd")
- (and (eq_attr "simd_type" "simd_fadd") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vadd_ddd_vabs_dd")
- (and (eq_attr "simd_type" "simd_fadd") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vadd_qqq_vabs_qq")
- (and (eq_attr "simd_type" "simd_fnegabs,simd_fminmax,simd_fminmaxv") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vadd_ddd_vabs_dd")
- (and (eq_attr "simd_type" "simd_fnegabs,simd_fminmax,simd_fminmaxv") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vadd_qqq_vabs_qq")
- (and (eq_attr "simd_type" "simd_shift,simd_shift_acc") (eq_attr "simd_mode" "V8QI,V4HI,V2SI")) (const_string "neon_vshl_ddd")
- (and (eq_attr "simd_type" "simd_shift,simd_shift_acc") (eq_attr "simd_mode" "V16QI,V8HI,V4SI,V2DI")) (const_string "neon_shift_3")
- (eq_attr "simd_type" "simd_minmax,simd_minmaxv") (const_string "neon_int_5")
- (eq_attr "simd_type" "simd_shiftn_imm,simd_shiftn2_imm,simd_shiftl_imm,") (const_string "neon_shift_1")
- (eq_attr "simd_type" "simd_load1,simd_load2") (const_string "neon_vld1_1_2_regs")
- (eq_attr "simd_type" "simd_load3,simd_load3") (const_string "neon_vld1_3_4_regs")
- (eq_attr "simd_type" "simd_load1r,simd_load2r,simd_load3r,simd_load4r") (const_string "neon_vld2_2_regs_vld1_vld2_all_lanes")
- (eq_attr "simd_type" "simd_load1s,simd_load2s") (const_string "neon_vld1_vld2_lane")
- (eq_attr "simd_type" "simd_load3s,simd_load4s") (const_string "neon_vld3_vld4_lane")
- (eq_attr "simd_type" "simd_store1,simd_store2") (const_string "neon_vst1_1_2_regs_vst2_2_regs")
- (eq_attr "simd_type" "simd_store3,simd_store4") (const_string "neon_vst1_3_4_regs")
- (eq_attr "simd_type" "simd_store1s,simd_store2s") (const_string "neon_vst1_vst2_lane")
- (eq_attr "simd_type" "simd_store3s,simd_store4s") (const_string "neon_vst3_vst4_lane")
- (and (eq_attr "simd_type" "simd_frcpe,simd_frcps") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vrecps_vrsqrts_ddd")
- (and (eq_attr "simd_type" "simd_frcpe,simd_frcps") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vrecps_vrsqrts_qqq")
- (eq_attr "simd_type" "none") (const_string "none")
- ]
- (const_string "unknown")))
-
-
-(define_expand "mov<mode>"
- [(set (match_operand:VALL 0 "aarch64_simd_nonimmediate_operand" "")
- (match_operand:VALL 1 "aarch64_simd_general_operand" ""))]
- "TARGET_SIMD"
- "
- if (GET_CODE (operands[0]) == MEM)
- operands[1] = force_reg (<MODE>mode, operands[1]);
- "
-)
-
-(define_expand "movmisalign<mode>"
- [(set (match_operand:VALL 0 "aarch64_simd_nonimmediate_operand" "")
- (match_operand:VALL 1 "aarch64_simd_general_operand" ""))]
- "TARGET_SIMD"
-{
- /* This pattern is not permitted to fail during expansion: if both arguments
- are non-registers (e.g. memory := constant, which can be created by the
- auto-vectorizer), force operand 1 into a register. */
- if (!register_operand (operands[0], <MODE>mode)
- && !register_operand (operands[1], <MODE>mode))
- operands[1] = force_reg (<MODE>mode, operands[1]);
-})
-
-(define_insn "aarch64_simd_dup<mode>"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (vec_duplicate:VDQ (match_operand:<VEL> 1 "register_operand" "r")))]
- "TARGET_SIMD"
- "dup\\t%0.<Vtype>, %<vw>1"
- [(set_attr "simd_type" "simd_dupgp")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_dup_lane<mode>"
- [(set (match_operand:VDQ_I 0 "register_operand" "=w")
- (vec_duplicate:VDQ_I
- (vec_select:<VEL>
- (match_operand:<VCON> 1 "register_operand" "w")
- (parallel [(match_operand:SI 2 "immediate_operand" "i")])
- )))]
- "TARGET_SIMD"
- "dup\\t%<v>0<Vmtype>, %1.<Vetype>[%2]"
- [(set_attr "simd_type" "simd_dup")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_dup_lane<mode>"
- [(set (match_operand:SDQ_I 0 "register_operand" "=w")
- (vec_select:<VEL>
- (match_operand:<VCON> 1 "register_operand" "w")
- (parallel [(match_operand:SI 2 "immediate_operand" "i")])
- ))]
- "TARGET_SIMD"
- "dup\\t%<v>0<Vmtype>, %1.<Vetype>[%2]"
- [(set_attr "simd_type" "simd_dup")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_simd_dup<mode>"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (vec_duplicate:VDQF (match_operand:<VEL> 1 "register_operand" "w")))]
- "TARGET_SIMD"
- "dup\\t%0.<Vtype>, %1.<Vetype>[0]"
- [(set_attr "simd_type" "simd_dup")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "*aarch64_simd_mov<mode>"
- [(set (match_operand:VD 0 "aarch64_simd_nonimmediate_operand"
- "=w, Utv, w, ?r, ?w, ?r, w")
- (match_operand:VD 1 "aarch64_simd_general_operand"
- "Utv, w, w, w, r, r, Dn"))]
- "TARGET_SIMD
- && (register_operand (operands[0], <MODE>mode)
- || register_operand (operands[1], <MODE>mode))"
-{
- switch (which_alternative)
- {
- case 0: return "ld1\t{%0.<Vtype>}, %1";
- case 1: return "st1\t{%1.<Vtype>}, %0";
- case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>";
- case 3: return "umov\t%0, %1.d[0]";
- case 4: return "ins\t%0.d[0], %1";
- case 5: return "mov\t%0, %1";
- case 6:
- return aarch64_output_simd_mov_immediate (&operands[1],
- <MODE>mode, 64);
- default: gcc_unreachable ();
- }
-}
- [(set_attr "simd_type" "simd_load1,simd_store1,simd_move,simd_movgp,simd_insgp,simd_move,simd_move_imm")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "*aarch64_simd_mov<mode>"
- [(set (match_operand:VQ 0 "aarch64_simd_nonimmediate_operand"
- "=w, Utv, w, ?r, ?w, ?r, w")
- (match_operand:VQ 1 "aarch64_simd_general_operand"
- "Utv, w, w, w, r, r, Dn"))]
- "TARGET_SIMD
- && (register_operand (operands[0], <MODE>mode)
- || register_operand (operands[1], <MODE>mode))"
-{
- switch (which_alternative)
- {
- case 0: return "ld1\t{%0.<Vtype>}, %1";
- case 1: return "st1\t{%1.<Vtype>}, %0";
- case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>";
- case 3: return "umov\t%0, %1.d[0]\;umov\t%H0, %1.d[1]";
- case 4: return "ins\t%0.d[0], %1\;ins\t%0.d[1], %H1";
- case 5: return "#";
- case 6:
- return aarch64_output_simd_mov_immediate (&operands[1],
- <MODE>mode, 128);
- default: gcc_unreachable ();
- }
-}
- [(set_attr "simd_type" "simd_load1,simd_store1,simd_move,simd_movgp,simd_insgp,simd_move,simd_move_imm")
- (set_attr "simd_mode" "<MODE>")
- (set_attr "length" "4,4,4,8,8,8,4")]
-)
-
-(define_split
- [(set (match_operand:VQ 0 "register_operand" "")
- (match_operand:VQ 1 "register_operand" ""))]
- "TARGET_SIMD && reload_completed
- && GP_REGNUM_P (REGNO (operands[0]))
- && GP_REGNUM_P (REGNO (operands[1]))"
- [(set (match_dup 0) (match_dup 1))
- (set (match_dup 2) (match_dup 3))]
-{
- int rdest = REGNO (operands[0]);
- int rsrc = REGNO (operands[1]);
- rtx dest[2], src[2];
-
- dest[0] = gen_rtx_REG (DImode, rdest);
- src[0] = gen_rtx_REG (DImode, rsrc);
- dest[1] = gen_rtx_REG (DImode, rdest + 1);
- src[1] = gen_rtx_REG (DImode, rsrc + 1);
-
- aarch64_simd_disambiguate_copy (operands, dest, src, 2);
-})
-
-(define_insn "orn<mode>3"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (ior:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w"))
- (match_operand:VDQ 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "orn\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
- [(set_attr "simd_type" "simd_logic")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "bic<mode>3"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (and:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w"))
- (match_operand:VDQ 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "bic\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
- [(set_attr "simd_type" "simd_logic")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "add<mode>3"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (plus:VDQ (match_operand:VDQ 1 "register_operand" "w")
- (match_operand:VDQ 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "add\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_add")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "sub<mode>3"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (minus:VDQ (match_operand:VDQ 1 "register_operand" "w")
- (match_operand:VDQ 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "sub\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_add")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "mul<mode>3"
- [(set (match_operand:VDQM 0 "register_operand" "=w")
- (mult:VDQM (match_operand:VDQM 1 "register_operand" "w")
- (match_operand:VDQM 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "mul\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_mul")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "neg<mode>2"
- [(set (match_operand:VDQM 0 "register_operand" "=w")
- (neg:VDQM (match_operand:VDQM 1 "register_operand" "w")))]
- "TARGET_SIMD"
- "neg\t%0.<Vtype>, %1.<Vtype>"
- [(set_attr "simd_type" "simd_negabs")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "abs<mode>2"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (abs:VDQ (match_operand:VDQ 1 "register_operand" "w")))]
- "TARGET_SIMD"
- "abs\t%0.<Vtype>, %1.<Vtype>"
- [(set_attr "simd_type" "simd_negabs")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "and<mode>3"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (and:VDQ (match_operand:VDQ 1 "register_operand" "w")
- (match_operand:VDQ 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "and\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
- [(set_attr "simd_type" "simd_logic")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "ior<mode>3"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (ior:VDQ (match_operand:VDQ 1 "register_operand" "w")
- (match_operand:VDQ 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
- [(set_attr "simd_type" "simd_logic")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "xor<mode>3"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (xor:VDQ (match_operand:VDQ 1 "register_operand" "w")
- (match_operand:VDQ 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "eor\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
- [(set_attr "simd_type" "simd_logic")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "one_cmpl<mode>2"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (not:VDQ (match_operand:VDQ 1 "register_operand" "w")))]
- "TARGET_SIMD"
- "not\t%0.<Vbtype>, %1.<Vbtype>"
- [(set_attr "simd_type" "simd_logic")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_simd_vec_set<mode>"
- [(set (match_operand:VQ_S 0 "register_operand" "=w")
- (vec_merge:VQ_S
- (vec_duplicate:VQ_S
- (match_operand:<VEL> 1 "register_operand" "r"))
- (match_operand:VQ_S 3 "register_operand" "0")
- (match_operand:SI 2 "immediate_operand" "i")))]
- "TARGET_SIMD"
- "ins\t%0.<Vetype>[%p2], %w1";
- [(set_attr "simd_type" "simd_insgp")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_simd_lshr<mode>"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (lshiftrt:VDQ (match_operand:VDQ 1 "register_operand" "w")
- (match_operand:VDQ 2 "aarch64_simd_rshift_imm" "Dr")))]
- "TARGET_SIMD"
- "ushr\t%0.<Vtype>, %1.<Vtype>, %2"
- [(set_attr "simd_type" "simd_shift_imm")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_simd_ashr<mode>"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (ashiftrt:VDQ (match_operand:VDQ 1 "register_operand" "w")
- (match_operand:VDQ 2 "aarch64_simd_rshift_imm" "Dr")))]
- "TARGET_SIMD"
- "sshr\t%0.<Vtype>, %1.<Vtype>, %2"
- [(set_attr "simd_type" "simd_shift_imm")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_simd_imm_shl<mode>"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (ashift:VDQ (match_operand:VDQ 1 "register_operand" "w")
- (match_operand:VDQ 2 "aarch64_simd_lshift_imm" "Dl")))]
- "TARGET_SIMD"
- "shl\t%0.<Vtype>, %1.<Vtype>, %2"
- [(set_attr "simd_type" "simd_shift_imm")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_simd_reg_sshl<mode>"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (ashift:VDQ (match_operand:VDQ 1 "register_operand" "w")
- (match_operand:VDQ 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "sshl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_shift")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_simd_reg_shl<mode>_unsigned"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (unspec:VDQ [(match_operand:VDQ 1 "register_operand" "w")
- (match_operand:VDQ 2 "register_operand" "w")]
- UNSPEC_ASHIFT_UNSIGNED))]
- "TARGET_SIMD"
- "ushl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_shift")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_simd_reg_shl<mode>_signed"
- [(set (match_operand:VDQ 0 "register_operand" "=w")
- (unspec:VDQ [(match_operand:VDQ 1 "register_operand" "w")
- (match_operand:VDQ 2 "register_operand" "w")]
- UNSPEC_ASHIFT_SIGNED))]
- "TARGET_SIMD"
- "sshl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_shift")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "ashl<mode>3"
- [(match_operand:VDQ 0 "register_operand" "")
- (match_operand:VDQ 1 "register_operand" "")
- (match_operand:SI 2 "general_operand" "")]
- "TARGET_SIMD"
-{
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- int shift_amount;
-
- if (CONST_INT_P (operands[2]))
- {
- shift_amount = INTVAL (operands[2]);
- if (shift_amount >= 0 && shift_amount < bit_width)
- {
- rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
- shift_amount);
- emit_insn (gen_aarch64_simd_imm_shl<mode> (operands[0],
- operands[1],
- tmp));
- DONE;
- }
- else
- {
- operands[2] = force_reg (SImode, operands[2]);
- }
- }
- else if (MEM_P (operands[2]))
- {
- operands[2] = force_reg (SImode, operands[2]);
- }
-
- if (REG_P (operands[2]))
- {
- rtx tmp = gen_reg_rtx (<MODE>mode);
- emit_insn (gen_aarch64_simd_dup<mode> (tmp,
- convert_to_mode (<VEL>mode,
- operands[2],
- 0)));
- emit_insn (gen_aarch64_simd_reg_sshl<mode> (operands[0], operands[1],
- tmp));
- DONE;
- }
- else
- FAIL;
-}
-)
-
-(define_expand "lshr<mode>3"
- [(match_operand:VDQ 0 "register_operand" "")
- (match_operand:VDQ 1 "register_operand" "")
- (match_operand:SI 2 "general_operand" "")]
- "TARGET_SIMD"
-{
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- int shift_amount;
-
- if (CONST_INT_P (operands[2]))
- {
- shift_amount = INTVAL (operands[2]);
- if (shift_amount > 0 && shift_amount <= bit_width)
- {
- rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
- shift_amount);
- emit_insn (gen_aarch64_simd_lshr<mode> (operands[0],
- operands[1],
- tmp));
- DONE;
- }
- else
- operands[2] = force_reg (SImode, operands[2]);
- }
- else if (MEM_P (operands[2]))
- {
- operands[2] = force_reg (SImode, operands[2]);
- }
-
- if (REG_P (operands[2]))
- {
- rtx tmp = gen_reg_rtx (SImode);
- rtx tmp1 = gen_reg_rtx (<MODE>mode);
- emit_insn (gen_negsi2 (tmp, operands[2]));
- emit_insn (gen_aarch64_simd_dup<mode> (tmp1,
- convert_to_mode (<VEL>mode,
- tmp, 0)));
- emit_insn (gen_aarch64_simd_reg_shl<mode>_unsigned (operands[0],
- operands[1],
- tmp1));
- DONE;
- }
- else
- FAIL;
-}
-)
-
-(define_expand "ashr<mode>3"
- [(match_operand:VDQ 0 "register_operand" "")
- (match_operand:VDQ 1 "register_operand" "")
- (match_operand:SI 2 "general_operand" "")]
- "TARGET_SIMD"
-{
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- int shift_amount;
-
- if (CONST_INT_P (operands[2]))
- {
- shift_amount = INTVAL (operands[2]);
- if (shift_amount > 0 && shift_amount <= bit_width)
- {
- rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
- shift_amount);
- emit_insn (gen_aarch64_simd_ashr<mode> (operands[0],
- operands[1],
- tmp));
- DONE;
- }
- else
- operands[2] = force_reg (SImode, operands[2]);
- }
- else if (MEM_P (operands[2]))
- {
- operands[2] = force_reg (SImode, operands[2]);
- }
-
- if (REG_P (operands[2]))
- {
- rtx tmp = gen_reg_rtx (SImode);
- rtx tmp1 = gen_reg_rtx (<MODE>mode);
- emit_insn (gen_negsi2 (tmp, operands[2]));
- emit_insn (gen_aarch64_simd_dup<mode> (tmp1,
- convert_to_mode (<VEL>mode,
- tmp, 0)));
- emit_insn (gen_aarch64_simd_reg_shl<mode>_signed (operands[0],
- operands[1],
- tmp1));
- DONE;
- }
- else
- FAIL;
-}
-)
-
-(define_expand "vashl<mode>3"
- [(match_operand:VDQ 0 "register_operand" "")
- (match_operand:VDQ 1 "register_operand" "")
- (match_operand:VDQ 2 "register_operand" "")]
- "TARGET_SIMD"
-{
- emit_insn (gen_aarch64_simd_reg_sshl<mode> (operands[0], operands[1],
- operands[2]));
- DONE;
-})
-
-;; Using mode VQ_S as there is no V2DImode neg!
-;; Negating individual lanes most certainly offsets the
-;; gain from vectorization.
-(define_expand "vashr<mode>3"
- [(match_operand:VQ_S 0 "register_operand" "")
- (match_operand:VQ_S 1 "register_operand" "")
- (match_operand:VQ_S 2 "register_operand" "")]
- "TARGET_SIMD"
-{
- rtx neg = gen_reg_rtx (<MODE>mode);
- emit (gen_neg<mode>2 (neg, operands[2]));
- emit_insn (gen_aarch64_simd_reg_shl<mode>_signed (operands[0], operands[1],
- neg));
- DONE;
-})
-
-(define_expand "vlshr<mode>3"
- [(match_operand:VQ_S 0 "register_operand" "")
- (match_operand:VQ_S 1 "register_operand" "")
- (match_operand:VQ_S 2 "register_operand" "")]
- "TARGET_SIMD"
-{
- rtx neg = gen_reg_rtx (<MODE>mode);
- emit (gen_neg<mode>2 (neg, operands[2]));
- emit_insn (gen_aarch64_simd_reg_shl<mode>_unsigned (operands[0], operands[1],
- neg));
- DONE;
-})
-
-(define_expand "vec_set<mode>"
- [(match_operand:VQ_S 0 "register_operand" "+w")
- (match_operand:<VEL> 1 "register_operand" "r")
- (match_operand:SI 2 "immediate_operand" "")]
- "TARGET_SIMD"
- {
- HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
- emit_insn (gen_aarch64_simd_vec_set<mode> (operands[0], operands[1],
- GEN_INT (elem), operands[0]));
- DONE;
- }
-)
-
-(define_insn "aarch64_simd_vec_setv2di"
- [(set (match_operand:V2DI 0 "register_operand" "=w")
- (vec_merge:V2DI
- (vec_duplicate:V2DI
- (match_operand:DI 1 "register_operand" "r"))
- (match_operand:V2DI 3 "register_operand" "0")
- (match_operand:SI 2 "immediate_operand" "i")))]
- "TARGET_SIMD"
- "ins\t%0.d[%p2], %1";
- [(set_attr "simd_type" "simd_insgp")
- (set_attr "simd_mode" "V2DI")]
-)
-
-(define_expand "vec_setv2di"
- [(match_operand:V2DI 0 "register_operand" "+w")
- (match_operand:DI 1 "register_operand" "r")
- (match_operand:SI 2 "immediate_operand" "")]
- "TARGET_SIMD"
- {
- HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
- emit_insn (gen_aarch64_simd_vec_setv2di (operands[0], operands[1],
- GEN_INT (elem), operands[0]));
- DONE;
- }
-)
-
-(define_insn "aarch64_simd_vec_set<mode>"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (vec_merge:VDQF
- (vec_duplicate:VDQF
- (match_operand:<VEL> 1 "register_operand" "w"))
- (match_operand:VDQF 3 "register_operand" "0")
- (match_operand:SI 2 "immediate_operand" "i")))]
- "TARGET_SIMD"
- "ins\t%0.<Vetype>[%p2], %1.<Vetype>[0]";
- [(set_attr "simd_type" "simd_ins")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "vec_set<mode>"
- [(match_operand:VDQF 0 "register_operand" "+w")
- (match_operand:<VEL> 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "")]
- "TARGET_SIMD"
- {
- HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
- emit_insn (gen_aarch64_simd_vec_set<mode> (operands[0], operands[1],
- GEN_INT (elem), operands[0]));
- DONE;
- }
-)
-
-
-(define_insn "aarch64_mla<mode>"
- [(set (match_operand:VQ_S 0 "register_operand" "=w")
- (plus:VQ_S (mult:VQ_S (match_operand:VQ_S 2 "register_operand" "w")
- (match_operand:VQ_S 3 "register_operand" "w"))
- (match_operand:VQ_S 1 "register_operand" "0")))]
- "TARGET_SIMD"
- "mla\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
- [(set_attr "simd_type" "simd_mla")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_mls<mode>"
- [(set (match_operand:VQ_S 0 "register_operand" "=w")
- (minus:VQ_S (match_operand:VQ_S 1 "register_operand" "0")
- (mult:VQ_S (match_operand:VQ_S 2 "register_operand" "w")
- (match_operand:VQ_S 3 "register_operand" "w"))))]
- "TARGET_SIMD"
- "mls\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
- [(set_attr "simd_type" "simd_mla")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; Max/Min operations.
-(define_insn "<maxmin><mode>3"
- [(set (match_operand:VQ_S 0 "register_operand" "=w")
- (MAXMIN:VQ_S (match_operand:VQ_S 1 "register_operand" "w")
- (match_operand:VQ_S 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "<maxmin>\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_minmax")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; Move into low-half clearing high half to 0.
-
-(define_insn "move_lo_quad_<mode>"
- [(set (match_operand:VQ 0 "register_operand" "=w")
- (vec_concat:VQ
- (match_operand:<VHALF> 1 "register_operand" "w")
- (vec_duplicate:<VHALF> (const_int 0))))]
- "TARGET_SIMD"
- "mov\\t%d0, %d1";
- [(set_attr "simd_type" "simd_dup")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; Move into high-half.
-
-(define_insn "aarch64_simd_move_hi_quad_<mode>"
- [(set (match_operand:VQ 0 "register_operand" "+w")
- (vec_concat:VQ
- (vec_select:<VHALF>
- (match_dup 0)
- (match_operand:VQ 2 "vect_par_cnst_lo_half" ""))
- (match_operand:<VHALF> 1 "register_operand" "w")))]
- "TARGET_SIMD"
- "ins\\t%0.d[1], %1.d[0]";
- [(set_attr "simd_type" "simd_ins")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "move_hi_quad_<mode>"
- [(match_operand:VQ 0 "register_operand" "")
- (match_operand:<VHALF> 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
- emit_insn (gen_aarch64_simd_move_hi_quad_<mode> (operands[0],
- operands[1], p));
- DONE;
-})
-
-;; Narrowing operations.
-
-;; For doubles.
-(define_insn "aarch64_simd_vec_pack_trunc_<mode>"
- [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
- (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w")))]
- "TARGET_SIMD"
- "xtn\\t%0.<Vntype>, %1.<Vtype>"
- [(set_attr "simd_type" "simd_shiftn_imm")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "vec_pack_trunc_<mode>"
- [(match_operand:<VNARROWD> 0 "register_operand" "")
- (match_operand:VDN 1 "register_operand" "")
- (match_operand:VDN 2 "register_operand" "")]
- "TARGET_SIMD"
-{
- rtx tempreg = gen_reg_rtx (<VDBL>mode);
-
- emit_insn (gen_move_lo_quad_<Vdbl> (tempreg, operands[1]));
- emit_insn (gen_move_hi_quad_<Vdbl> (tempreg, operands[2]));
- emit_insn (gen_aarch64_simd_vec_pack_trunc_<Vdbl> (operands[0], tempreg));
- DONE;
-})
-
-;; For quads.
-
-(define_insn "vec_pack_trunc_<mode>"
- [(set (match_operand:<VNARROWQ2> 0 "register_operand" "+&w")
- (vec_concat:<VNARROWQ2>
- (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w"))
- (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))))]
- "TARGET_SIMD"
- "xtn\\t%0.<Vntype>, %1.<Vtype>\;xtn2\\t%0.<V2ntype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_shiftn2_imm")
- (set_attr "simd_mode" "<MODE>")
- (set_attr "length" "8")]
-)
-
-;; Widening operations.
-
-(define_insn "aarch64_simd_vec_unpack<su>_lo_<mode>"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
- (match_operand:VQW 1 "register_operand" "w")
- (match_operand:VQW 2 "vect_par_cnst_lo_half" "")
- )))]
- "TARGET_SIMD"
- "<su>shll %0.<Vwtype>, %1.<Vhalftype>, 0"
- [(set_attr "simd_type" "simd_shiftl_imm")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_simd_vec_unpack<su>_hi_<mode>"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
- (match_operand:VQW 1 "register_operand" "w")
- (match_operand:VQW 2 "vect_par_cnst_hi_half" "")
- )))]
- "TARGET_SIMD"
- "<su>shll2 %0.<Vwtype>, %1.<Vtype>, 0"
- [(set_attr "simd_type" "simd_shiftl_imm")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "vec_unpack<su>_hi_<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "")
- (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand"))]
- "TARGET_SIMD"
- {
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_simd_vec_unpack<su>_hi_<mode> (operands[0],
- operands[1], p));
- DONE;
- }
-)
-
-(define_expand "vec_unpack<su>_lo_<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "")
- (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))]
- "TARGET_SIMD"
- {
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
- emit_insn (gen_aarch64_simd_vec_unpack<su>_lo_<mode> (operands[0],
- operands[1], p));
- DONE;
- }
-)
-
-;; Widening arithmetic.
-
-(define_insn "aarch64_simd_vec_<su>mult_lo_<mode>"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (mult:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
- (match_operand:VQW 1 "register_operand" "w")
- (match_operand:VQW 3 "vect_par_cnst_lo_half" "")))
- (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
- (match_operand:VQW 2 "register_operand" "w")
- (match_dup 3)))))]
- "TARGET_SIMD"
- "<su>mull\\t%0.<Vwtype>, %1.<Vhalftype>, %2.<Vhalftype>"
- [(set_attr "simd_type" "simd_mull")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "vec_widen_<su>mult_lo_<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "")
- (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))
- (ANY_EXTEND:<VWIDE> (match_operand:VQW 2 "register_operand" ""))]
- "TARGET_SIMD"
- {
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
- emit_insn (gen_aarch64_simd_vec_<su>mult_lo_<mode> (operands[0],
- operands[1],
- operands[2], p));
- DONE;
- }
-)
-
-(define_insn "aarch64_simd_vec_<su>mult_hi_<mode>"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (mult:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
- (match_operand:VQW 1 "register_operand" "w")
- (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))
- (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
- (match_operand:VQW 2 "register_operand" "w")
- (match_dup 3)))))]
- "TARGET_SIMD"
- "<su>mull2\\t%0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_mull")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "vec_widen_<su>mult_hi_<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "")
- (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))
- (ANY_EXTEND:<VWIDE> (match_operand:VQW 2 "register_operand" ""))]
- "TARGET_SIMD"
- {
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_simd_vec_<su>mult_hi_<mode> (operands[0],
- operands[1],
- operands[2], p));
- DONE;
-
- }
-)
-
-;; FP vector operations.
-;; AArch64 AdvSIMD supports single-precision (32-bit) and
-;; double-precision (64-bit) floating-point data types and arithmetic as
-;; defined by the IEEE 754-2008 standard. This makes them vectorizable
-;; without the need for -ffast-math or -funsafe-math-optimizations.
-;;
-;; Floating-point operations can raise an exception. Vectorizing such
-;; operations are safe because of reasons explained below.
-;;
-;; ARMv8 permits an extension to enable trapped floating-point
-;; exception handling, however this is an optional feature. In the
-;; event of a floating-point exception being raised by vectorised
-;; code then:
-;; 1. If trapped floating-point exceptions are available, then a trap
-;; will be taken when any lane raises an enabled exception. A trap
-;; handler may determine which lane raised the exception.
-;; 2. Alternatively a sticky exception flag is set in the
-;; floating-point status register (FPSR). Software may explicitly
-;; test the exception flags, in which case the tests will either
-;; prevent vectorisation, allowing precise identification of the
-;; failing operation, or if tested outside of vectorisable regions
-;; then the specific operation and lane are not of interest.
-
-;; FP arithmetic operations.
-
-(define_insn "add<mode>3"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (plus:VDQF (match_operand:VDQF 1 "register_operand" "w")
- (match_operand:VDQF 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "fadd\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_fadd")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "sub<mode>3"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (minus:VDQF (match_operand:VDQF 1 "register_operand" "w")
- (match_operand:VDQF 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "fsub\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_fadd")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "mul<mode>3"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (mult:VDQF (match_operand:VDQF 1 "register_operand" "w")
- (match_operand:VDQF 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_fmul")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "div<mode>3"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (div:VDQF (match_operand:VDQF 1 "register_operand" "w")
- (match_operand:VDQF 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "fdiv\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_fdiv")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "neg<mode>2"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (neg:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
- "TARGET_SIMD"
- "fneg\\t%0.<Vtype>, %1.<Vtype>"
- [(set_attr "simd_type" "simd_fnegabs")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "abs<mode>2"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (abs:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
- "TARGET_SIMD"
- "fabs\\t%0.<Vtype>, %1.<Vtype>"
- [(set_attr "simd_type" "simd_fnegabs")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "fma<mode>4"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (fma:VDQF (match_operand:VDQF 1 "register_operand" "w")
- (match_operand:VDQF 2 "register_operand" "w")
- (match_operand:VDQF 3 "register_operand" "0")))]
- "TARGET_SIMD"
- "fmla\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_fmla")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_frint<frint_suffix><mode>"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")]
- FRINT))]
- "TARGET_SIMD"
- "frint<frint_suffix>\\t%0.<Vtype>, %1.<Vtype>"
- [(set_attr "simd_type" "simd_frint")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; Vector versions of the floating-point frint patterns.
-;; Expands to btrunc, ceil, floor, nearbyint, rint, round.
-(define_expand "<frint_pattern><mode>2"
- [(set (match_operand:VDQF 0 "register_operand")
- (unspec:VDQF [(match_operand:VDQF 1 "register_operand")]
- FRINT))]
- "TARGET_SIMD"
- {})
-
-(define_insn "aarch64_fcvt<frint_suffix><su><mode>"
- [(set (match_operand:<FCVT_TARGET> 0 "register_operand" "=w")
- (FIXUORS:<FCVT_TARGET> (unspec:<FCVT_TARGET>
- [(match_operand:VDQF 1 "register_operand" "w")]
- FCVT)))]
- "TARGET_SIMD"
- "fcvt<frint_suffix><su>\\t%0.<Vtype>, %1.<Vtype>"
- [(set_attr "simd_type" "simd_fcvti")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; Vector versions of the fcvt standard patterns.
-;; Expands to lbtrunc, lround, lceil, lfloor
-(define_expand "l<fcvt_pattern><su_optab><fcvt_target><VDQF:mode>2"
- [(set (match_operand:<FCVT_TARGET> 0 "register_operand")
- (FIXUORS:<FCVT_TARGET> (unspec:<FCVT_TARGET>
- [(match_operand:VDQF 1 "register_operand")]
- FCVT)))]
- "TARGET_SIMD"
- {})
-
-(define_insn "aarch64_vmls<mode>"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (minus:VDQF (match_operand:VDQF 1 "register_operand" "0")
- (mult:VDQF (match_operand:VDQF 2 "register_operand" "w")
- (match_operand:VDQF 3 "register_operand" "w"))))]
- "TARGET_SIMD"
- "fmls\\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
- [(set_attr "simd_type" "simd_fmla")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; FP Max/Min
-;; Max/Min are introduced by idiom recognition by GCC's mid-end. An
-;; expression like:
-;; a = (b < c) ? b : c;
-;; is idiom-matched as MIN_EXPR<b,c> only if -ffinite-math-only is enabled
-;; either explicitly or indirectly via -ffast-math.
-;;
-;; MIN_EXPR and MAX_EXPR eventually map to 'smin' and 'smax' in RTL.
-;; The 'smax' and 'smin' RTL standard pattern names do not specify which
-;; operand will be returned when both operands are zero (i.e. they may not
-;; honour signed zeroes), or when either operand is NaN. Therefore GCC
-;; only introduces MIN_EXPR/MAX_EXPR in fast math mode or when not honouring
-;; NaNs.
-
-(define_insn "smax<mode>3"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (smax:VDQF (match_operand:VDQF 1 "register_operand" "w")
- (match_operand:VDQF 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "fmaxnm\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_fminmax")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "smin<mode>3"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (smin:VDQF (match_operand:VDQF 1 "register_operand" "w")
- (match_operand:VDQF 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "fminnm\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_fminmax")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; FP 'across lanes' max and min ops.
-
-(define_insn "reduc_s<fmaxminv>_v4sf"
- [(set (match_operand:V4SF 0 "register_operand" "=w")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
- FMAXMINV))]
- "TARGET_SIMD"
- "f<fmaxminv>nmv\\t%s0, %1.4s";
- [(set_attr "simd_type" "simd_fminmaxv")
- (set_attr "simd_mode" "V4SF")]
-)
-
-(define_insn "reduc_s<fmaxminv>_<mode>"
- [(set (match_operand:V2F 0 "register_operand" "=w")
- (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
- FMAXMINV))]
- "TARGET_SIMD"
- "f<fmaxminv>nmp\\t%0.<Vtype>, %1.<Vtype>, %1.<Vtype>";
- [(set_attr "simd_type" "simd_fminmax")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; FP 'across lanes' add.
-
-(define_insn "aarch64_addvv4sf"
- [(set (match_operand:V4SF 0 "register_operand" "=w")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
- UNSPEC_FADDV))]
- "TARGET_SIMD"
- "faddp\\t%0.4s, %1.4s, %1.4s"
- [(set_attr "simd_type" "simd_fadd")
- (set_attr "simd_mode" "V4SF")]
-)
-
-(define_expand "reduc_uplus_v4sf"
- [(set (match_operand:V4SF 0 "register_operand" "=w")
- (match_operand:V4SF 1 "register_operand" "w"))]
- "TARGET_SIMD"
-{
- rtx tmp = gen_reg_rtx (V4SFmode);
- emit_insn (gen_aarch64_addvv4sf (tmp, operands[1]));
- emit_insn (gen_aarch64_addvv4sf (operands[0], tmp));
- DONE;
-})
-
-(define_expand "reduc_splus_v4sf"
- [(set (match_operand:V4SF 0 "register_operand" "=w")
- (match_operand:V4SF 1 "register_operand" "w"))]
- "TARGET_SIMD"
-{
- rtx tmp = gen_reg_rtx (V4SFmode);
- emit_insn (gen_aarch64_addvv4sf (tmp, operands[1]));
- emit_insn (gen_aarch64_addvv4sf (operands[0], tmp));
- DONE;
-})
-
-(define_insn "aarch64_addv<mode>"
- [(set (match_operand:V2F 0 "register_operand" "=w")
- (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
- UNSPEC_FADDV))]
- "TARGET_SIMD"
- "faddp\\t%<Vetype>0, %1.<Vtype>"
- [(set_attr "simd_type" "simd_fadd")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "reduc_uplus_<mode>"
- [(set (match_operand:V2F 0 "register_operand" "=w")
- (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
- UNSPEC_FADDV))]
- "TARGET_SIMD"
- ""
-)
-
-(define_expand "reduc_splus_<mode>"
- [(set (match_operand:V2F 0 "register_operand" "=w")
- (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
- UNSPEC_FADDV))]
- "TARGET_SIMD"
- ""
-)
-
-;; Reduction across lanes.
-
-(define_insn "aarch64_addv<mode>"
- [(set (match_operand:VDQV 0 "register_operand" "=w")
- (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
- UNSPEC_ADDV))]
- "TARGET_SIMD"
- "addv\\t%<Vetype>0, %1.<Vtype>"
- [(set_attr "simd_type" "simd_addv")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "reduc_splus_<mode>"
- [(set (match_operand:VDQV 0 "register_operand" "=w")
- (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
- UNSPEC_ADDV))]
- "TARGET_SIMD"
- ""
-)
-
-(define_expand "reduc_uplus_<mode>"
- [(set (match_operand:VDQV 0 "register_operand" "=w")
- (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
- UNSPEC_ADDV))]
- "TARGET_SIMD"
- ""
-)
-
-(define_insn "aarch64_addvv2di"
- [(set (match_operand:V2DI 0 "register_operand" "=w")
- (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
- UNSPEC_ADDV))]
- "TARGET_SIMD"
- "addp\\t%d0, %1.2d"
- [(set_attr "simd_type" "simd_add")
- (set_attr "simd_mode" "V2DI")]
-)
-
-(define_expand "reduc_uplus_v2di"
- [(set (match_operand:V2DI 0 "register_operand" "=w")
- (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
- UNSPEC_ADDV))]
- "TARGET_SIMD"
- ""
-)
-
-(define_expand "reduc_splus_v2di"
- [(set (match_operand:V2DI 0 "register_operand" "=w")
- (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
- UNSPEC_ADDV))]
- "TARGET_SIMD"
- ""
-)
-
-(define_insn "aarch64_addvv2si"
- [(set (match_operand:V2SI 0 "register_operand" "=w")
- (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
- UNSPEC_ADDV))]
- "TARGET_SIMD"
- "addp\\t%0.2s, %1.2s, %1.2s"
- [(set_attr "simd_type" "simd_add")
- (set_attr "simd_mode" "V2SI")]
-)
-
-(define_expand "reduc_uplus_v2si"
- [(set (match_operand:V2SI 0 "register_operand" "=w")
- (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
- UNSPEC_ADDV))]
- "TARGET_SIMD"
- ""
-)
-
-(define_expand "reduc_splus_v2si"
- [(set (match_operand:V2SI 0 "register_operand" "=w")
- (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
- UNSPEC_ADDV))]
- "TARGET_SIMD"
- ""
-)
-
-(define_insn "reduc_<maxminv>_<mode>"
- [(set (match_operand:VDQV 0 "register_operand" "=w")
- (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
- MAXMINV))]
- "TARGET_SIMD"
- "<maxminv>v\\t%<Vetype>0, %1.<Vtype>"
- [(set_attr "simd_type" "simd_minmaxv")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "reduc_<maxminv>_v2si"
- [(set (match_operand:V2SI 0 "register_operand" "=w")
- (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
- MAXMINV))]
- "TARGET_SIMD"
- "<maxminv>p\\t%0.2s, %1.2s, %1.2s"
- [(set_attr "simd_type" "simd_minmax")
- (set_attr "simd_mode" "V2SI")]
-)
-
-;; vbsl_* intrinsics may compile to any of bsl/bif/bit depending on register
-;; allocation. For an intrinsic of form:
-;; vD = bsl_* (vS, vN, vM)
-;; We can use any of:
-;; bsl vS, vN, vM (if D = S)
-;; bit vD, vN, vS (if D = M, so 1-bits in vS choose bits from vN, else vM)
-;; bif vD, vM, vS (if D = N, so 0-bits in vS choose bits from vM, else vN)
-
-(define_insn "aarch64_simd_bsl<mode>_internal"
- [(set (match_operand:VALL 0 "register_operand" "=w,w,w")
- (unspec:VALL
- [(match_operand:<V_cmp_result> 1 "register_operand" " 0,w,w")
- (match_operand:VALL 2 "register_operand" " w,w,0")
- (match_operand:VALL 3 "register_operand" " w,0,w")]
- UNSPEC_BSL))]
- "TARGET_SIMD"
- "@
- bsl\\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
- bit\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
- bif\\t%0.<Vbtype>, %3.<Vbtype>, %1.<Vbtype>"
-)
-
-(define_expand "aarch64_simd_bsl<mode>"
- [(set (match_operand:VALL 0 "register_operand")
- (unspec:VALL [(match_operand:<V_cmp_result> 1 "register_operand")
- (match_operand:VALL 2 "register_operand")
- (match_operand:VALL 3 "register_operand")]
- UNSPEC_BSL))]
- "TARGET_SIMD"
-{
- /* We can't alias operands together if they have different modes. */
- operands[1] = gen_lowpart (<V_cmp_result>mode, operands[1]);
-})
-
-(define_expand "aarch64_vcond_internal<mode>"
- [(set (match_operand:VDQ 0 "register_operand")
- (if_then_else:VDQ
- (match_operator 3 "comparison_operator"
- [(match_operand:VDQ 4 "register_operand")
- (match_operand:VDQ 5 "nonmemory_operand")])
- (match_operand:VDQ 1 "register_operand")
- (match_operand:VDQ 2 "register_operand")))]
- "TARGET_SIMD"
-{
- int inverse = 0, has_zero_imm_form = 0;
- rtx mask = gen_reg_rtx (<MODE>mode);
-
- switch (GET_CODE (operands[3]))
- {
- case LE:
- case LT:
- case NE:
- inverse = 1;
- /* Fall through. */
- case GE:
- case GT:
- case EQ:
- has_zero_imm_form = 1;
- break;
- case LEU:
- case LTU:
- inverse = 1;
- break;
- default:
- break;
- }
-
- if (!REG_P (operands[5])
- && (operands[5] != CONST0_RTX (<MODE>mode) || !has_zero_imm_form))
- operands[5] = force_reg (<MODE>mode, operands[5]);
-
- switch (GET_CODE (operands[3]))
- {
- case LT:
- case GE:
- emit_insn (gen_aarch64_cmge<mode> (mask, operands[4], operands[5]));
- break;
-
- case LE:
- case GT:
- emit_insn (gen_aarch64_cmgt<mode> (mask, operands[4], operands[5]));
- break;
-
- case LTU:
- case GEU:
- emit_insn (gen_aarch64_cmhs<mode> (mask, operands[4], operands[5]));
- break;
-
- case LEU:
- case GTU:
- emit_insn (gen_aarch64_cmhi<mode> (mask, operands[4], operands[5]));
- break;
-
- case NE:
- case EQ:
- emit_insn (gen_aarch64_cmeq<mode> (mask, operands[4], operands[5]));
- break;
-
- default:
- gcc_unreachable ();
- }
-
- if (inverse)
- emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[2],
- operands[1]));
- else
- emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[1],
- operands[2]));
-
- DONE;
-})
-
-(define_expand "aarch64_vcond_internal<mode>"
- [(set (match_operand:VDQF 0 "register_operand")
- (if_then_else:VDQF
- (match_operator 3 "comparison_operator"
- [(match_operand:VDQF 4 "register_operand")
- (match_operand:VDQF 5 "nonmemory_operand")])
- (match_operand:VDQF 1 "register_operand")
- (match_operand:VDQF 2 "register_operand")))]
- "TARGET_SIMD"
-{
- int inverse = 0;
- int use_zero_form = 0;
- int swap_bsl_operands = 0;
- rtx mask = gen_reg_rtx (<V_cmp_result>mode);
- rtx tmp = gen_reg_rtx (<V_cmp_result>mode);
-
- rtx (*base_comparison) (rtx, rtx, rtx);
- rtx (*complimentary_comparison) (rtx, rtx, rtx);
-
- switch (GET_CODE (operands[3]))
- {
- case GE:
- case GT:
- case LE:
- case LT:
- case EQ:
- if (operands[5] == CONST0_RTX (<MODE>mode))
- {
- use_zero_form = 1;
- break;
- }
- /* Fall through. */
- default:
- if (!REG_P (operands[5]))
- operands[5] = force_reg (<MODE>mode, operands[5]);
- }
-
- switch (GET_CODE (operands[3]))
- {
- case LT:
- case UNLT:
- inverse = 1;
- /* Fall through. */
- case GE:
- case UNGE:
- case ORDERED:
- case UNORDERED:
- base_comparison = gen_aarch64_cmge<mode>;
- complimentary_comparison = gen_aarch64_cmgt<mode>;
- break;
- case LE:
- case UNLE:
- inverse = 1;
- /* Fall through. */
- case GT:
- case UNGT:
- base_comparison = gen_aarch64_cmgt<mode>;
- complimentary_comparison = gen_aarch64_cmge<mode>;
- break;
- case EQ:
- case NE:
- case UNEQ:
- base_comparison = gen_aarch64_cmeq<mode>;
- complimentary_comparison = gen_aarch64_cmeq<mode>;
- break;
- default:
- gcc_unreachable ();
- }
-
- switch (GET_CODE (operands[3]))
- {
- case LT:
- case LE:
- case GT:
- case GE:
- case EQ:
- /* The easy case. Here we emit one of FCMGE, FCMGT or FCMEQ.
- As a LT b <=> b GE a && a LE b <=> b GT a. Our transformations are:
- a GE b -> a GE b
- a GT b -> a GT b
- a LE b -> b GE a
- a LT b -> b GT a
- a EQ b -> a EQ b
- Note that there also exist direct comparison against 0 forms,
- so catch those as a special case. */
- if (use_zero_form)
- {
- inverse = 0;
- switch (GET_CODE (operands[3]))
- {
- case LT:
- base_comparison = gen_aarch64_cmlt<mode>;
- break;
- case LE:
- base_comparison = gen_aarch64_cmle<mode>;
- break;
- default:
- /* Do nothing, other zero form cases already have the correct
- base_comparison. */
- break;
- }
- }
-
- if (!inverse)
- emit_insn (base_comparison (mask, operands[4], operands[5]));
- else
- emit_insn (complimentary_comparison (mask, operands[5], operands[4]));
- break;
- case UNLT:
- case UNLE:
- case UNGT:
- case UNGE:
- case NE:
- /* FCM returns false for lanes which are unordered, so if we use
- the inverse of the comparison we actually want to emit, then
- swap the operands to BSL, we will end up with the correct result.
- Note that a NE NaN and NaN NE b are true for all a, b.
-
- Our transformations are:
- a GE b -> !(b GT a)
- a GT b -> !(b GE a)
- a LE b -> !(a GT b)
- a LT b -> !(a GE b)
- a NE b -> !(a EQ b) */
-
- if (inverse)
- emit_insn (base_comparison (mask, operands[4], operands[5]));
- else
- emit_insn (complimentary_comparison (mask, operands[5], operands[4]));
-
- swap_bsl_operands = 1;
- break;
- case UNEQ:
- /* We check (a > b || b > a). combining these comparisons give us
- true iff !(a != b && a ORDERED b), swapping the operands to BSL
- will then give us (a == b || a UNORDERED b) as intended. */
-
- emit_insn (gen_aarch64_cmgt<mode> (mask, operands[4], operands[5]));
- emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[5], operands[4]));
- emit_insn (gen_ior<v_cmp_result>3 (mask, mask, tmp));
- swap_bsl_operands = 1;
- break;
- case UNORDERED:
- /* Operands are ORDERED iff (a > b || b >= a).
- Swapping the operands to BSL will give the UNORDERED case. */
- swap_bsl_operands = 1;
- /* Fall through. */
- case ORDERED:
- emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[4], operands[5]));
- emit_insn (gen_aarch64_cmge<mode> (mask, operands[5], operands[4]));
- emit_insn (gen_ior<v_cmp_result>3 (mask, mask, tmp));
- break;
- default:
- gcc_unreachable ();
- }
-
- if (swap_bsl_operands)
- emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[2],
- operands[1]));
- else
- emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[1],
- operands[2]));
- DONE;
-})
-
-(define_expand "vcond<mode><mode>"
- [(set (match_operand:VALL 0 "register_operand")
- (if_then_else:VALL
- (match_operator 3 "comparison_operator"
- [(match_operand:VALL 4 "register_operand")
- (match_operand:VALL 5 "nonmemory_operand")])
- (match_operand:VALL 1 "register_operand")
- (match_operand:VALL 2 "register_operand")))]
- "TARGET_SIMD"
-{
- emit_insn (gen_aarch64_vcond_internal<mode> (operands[0], operands[1],
- operands[2], operands[3],
- operands[4], operands[5]));
- DONE;
-})
-
-
-(define_expand "vcondu<mode><mode>"
- [(set (match_operand:VDQ 0 "register_operand")
- (if_then_else:VDQ
- (match_operator 3 "comparison_operator"
- [(match_operand:VDQ 4 "register_operand")
- (match_operand:VDQ 5 "nonmemory_operand")])
- (match_operand:VDQ 1 "register_operand")
- (match_operand:VDQ 2 "register_operand")))]
- "TARGET_SIMD"
-{
- emit_insn (gen_aarch64_vcond_internal<mode> (operands[0], operands[1],
- operands[2], operands[3],
- operands[4], operands[5]));
- DONE;
-})
-
-;; Patterns for AArch64 SIMD Intrinsics.
-
-(define_expand "aarch64_create<mode>"
- [(match_operand:VD_RE 0 "register_operand" "")
- (match_operand:DI 1 "general_operand" "")]
- "TARGET_SIMD"
-{
- rtx src = gen_lowpart (<MODE>mode, operands[1]);
- emit_move_insn (operands[0], src);
- DONE;
-})
-
-(define_insn "aarch64_get_lane_signed<mode>"
- [(set (match_operand:<VEL> 0 "register_operand" "=r")
- (sign_extend:<VEL>
- (vec_select:<VEL>
- (match_operand:VQ_S 1 "register_operand" "w")
- (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
- "TARGET_SIMD"
- "smov\\t%0, %1.<Vetype>[%2]"
- [(set_attr "simd_type" "simd_movgp")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_get_lane_unsigned<mode>"
- [(set (match_operand:<VEL> 0 "register_operand" "=r")
- (zero_extend:<VEL>
- (vec_select:<VEL>
- (match_operand:VDQ 1 "register_operand" "w")
- (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
- "TARGET_SIMD"
- "umov\\t%<vw>0, %1.<Vetype>[%2]"
- [(set_attr "simd_type" "simd_movgp")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_get_lane<mode>"
- [(set (match_operand:<VEL> 0 "register_operand" "=w")
- (vec_select:<VEL>
- (match_operand:VDQF 1 "register_operand" "w")
- (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
- "TARGET_SIMD"
- "mov\\t%0.<Vetype>[0], %1.<Vetype>[%2]"
- [(set_attr "simd_type" "simd_ins")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "aarch64_get_lanedi"
- [(match_operand:DI 0 "register_operand" "=r")
- (match_operand:DI 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- aarch64_simd_lane_bounds (operands[2], 0, 1);
- emit_move_insn (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv8qi<mode>"
- [(match_operand:V8QI 0 "register_operand" "")
- (match_operand:VDC 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv4hi<mode>"
- [(match_operand:V4HI 0 "register_operand" "")
- (match_operand:VDC 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv2si<mode>"
- [(match_operand:V2SI 0 "register_operand" "")
- (match_operand:VDC 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv2sf<mode>"
- [(match_operand:V2SF 0 "register_operand" "")
- (match_operand:VDC 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretdi<mode>"
- [(match_operand:DI 0 "register_operand" "")
- (match_operand:VD_RE 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv16qi<mode>"
- [(match_operand:V16QI 0 "register_operand" "")
- (match_operand:VQ 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv8hi<mode>"
- [(match_operand:V8HI 0 "register_operand" "")
- (match_operand:VQ 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv4si<mode>"
- [(match_operand:V4SI 0 "register_operand" "")
- (match_operand:VQ 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv4sf<mode>"
- [(match_operand:V4SF 0 "register_operand" "")
- (match_operand:VQ 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv2di<mode>"
- [(match_operand:V2DI 0 "register_operand" "")
- (match_operand:VQ 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "aarch64_reinterpretv2df<mode>"
- [(match_operand:V2DF 0 "register_operand" "")
- (match_operand:VQ 1 "register_operand" "")]
- "TARGET_SIMD"
-{
- aarch64_simd_reinterpret (operands[0], operands[1]);
- DONE;
-})
-
-;; In this insn, operand 1 should be low, and operand 2 the high part of the
-;; dest vector.
-
-(define_insn "*aarch64_combinez<mode>"
- [(set (match_operand:<VDBL> 0 "register_operand" "=&w")
- (vec_concat:<VDBL>
- (match_operand:VDIC 1 "register_operand" "w")
- (match_operand:VDIC 2 "aarch64_simd_imm_zero" "Dz")))]
- "TARGET_SIMD"
- "mov\\t%0.8b, %1.8b"
- [(set_attr "simd_type" "simd_move")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_combine<mode>"
- [(set (match_operand:<VDBL> 0 "register_operand" "=&w")
- (vec_concat:<VDBL> (match_operand:VDC 1 "register_operand" "w")
- (match_operand:VDC 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "mov\\t%0.d[0], %1.d[0]\;ins\\t%0.d[1], %2.d[0]"
- [(set_attr "simd_type" "simd_ins")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; <su><addsub>l<q>.
-
-(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l2<mode>_internal"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
- (match_operand:VQW 1 "register_operand" "w")
- (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))
- (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
- (match_operand:VQW 2 "register_operand" "w")
- (match_dup 3)))))]
- "TARGET_SIMD"
- "<ANY_EXTEND:su><ADDSUB:optab>l2 %0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_addl")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "aarch64_saddl2<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:VQW 1 "register_operand" "w")
- (match_operand:VQW 2 "register_operand" "w")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_saddl2<mode>_internal (operands[0], operands[1],
- operands[2], p));
- DONE;
-})
-
-(define_expand "aarch64_uaddl2<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:VQW 1 "register_operand" "w")
- (match_operand:VQW 2 "register_operand" "w")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_uaddl2<mode>_internal (operands[0], operands[1],
- operands[2], p));
- DONE;
-})
-
-(define_expand "aarch64_ssubl2<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:VQW 1 "register_operand" "w")
- (match_operand:VQW 2 "register_operand" "w")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_ssubl2<mode>_internal (operands[0], operands[1],
- operands[2], p));
- DONE;
-})
-
-(define_expand "aarch64_usubl2<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:VQW 1 "register_operand" "w")
- (match_operand:VQW 2 "register_operand" "w")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_usubl2<mode>_internal (operands[0], operands[1],
- operands[2], p));
- DONE;
-})
-
-(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE>
- (match_operand:VDW 1 "register_operand" "w"))
- (ANY_EXTEND:<VWIDE>
- (match_operand:VDW 2 "register_operand" "w"))))]
- "TARGET_SIMD"
- "<ANY_EXTEND:su><ADDSUB:optab>l %0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_addl")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; <su><addsub>w<q>.
-
-(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>w<mode>"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (ADDSUB:<VWIDE> (match_operand:<VWIDE> 1 "register_operand" "w")
- (ANY_EXTEND:<VWIDE>
- (match_operand:VDW 2 "register_operand" "w"))))]
- "TARGET_SIMD"
- "<ANY_EXTEND:su><ADDSUB:optab>w\\t%0.<Vwtype>, %1.<Vwtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_addl")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>w2<mode>_internal"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (ADDSUB:<VWIDE> (match_operand:<VWIDE> 1 "register_operand" "w")
- (ANY_EXTEND:<VWIDE>
- (vec_select:<VHALF>
- (match_operand:VQW 2 "register_operand" "w")
- (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))))]
- "TARGET_SIMD"
- "<ANY_EXTEND:su><ADDSUB:optab>w2\\t%0.<Vwtype>, %1.<Vwtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_addl")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "aarch64_saddw2<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "w")
- (match_operand:VQW 2 "register_operand" "w")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_saddw2<mode>_internal (operands[0], operands[1],
- operands[2], p));
- DONE;
-})
-
-(define_expand "aarch64_uaddw2<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "w")
- (match_operand:VQW 2 "register_operand" "w")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_uaddw2<mode>_internal (operands[0], operands[1],
- operands[2], p));
- DONE;
-})
-
-
-(define_expand "aarch64_ssubw2<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "w")
- (match_operand:VQW 2 "register_operand" "w")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_ssubw2<mode>_internal (operands[0], operands[1],
- operands[2], p));
- DONE;
-})
-
-(define_expand "aarch64_usubw2<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "w")
- (match_operand:VQW 2 "register_operand" "w")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_usubw2<mode>_internal (operands[0], operands[1],
- operands[2], p));
- DONE;
-})
-
-;; <su><r>h<addsub>.
-
-(define_insn "aarch64_<sur>h<addsub><mode>"
- [(set (match_operand:VQ_S 0 "register_operand" "=w")
- (unspec:VQ_S [(match_operand:VQ_S 1 "register_operand" "w")
- (match_operand:VQ_S 2 "register_operand" "w")]
- HADDSUB))]
- "TARGET_SIMD"
- "<sur>h<addsub>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_add")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; <r><addsub>hn<q>.
-
-(define_insn "aarch64_<sur><addsub>hn<mode>"
- [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
- (unspec:<VNARROWQ> [(match_operand:VQN 1 "register_operand" "w")
- (match_operand:VQN 2 "register_operand" "w")]
- ADDSUBHN))]
- "TARGET_SIMD"
- "<sur><addsub>hn\\t%0.<Vntype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_addn")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_<sur><addsub>hn2<mode>"
- [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
- (unspec:<VNARROWQ2> [(match_operand:<VNARROWQ> 1 "register_operand" "0")
- (match_operand:VQN 2 "register_operand" "w")
- (match_operand:VQN 3 "register_operand" "w")]
- ADDSUBHN2))]
- "TARGET_SIMD"
- "<sur><addsub>hn2\\t%0.<V2ntype>, %2.<Vtype>, %3.<Vtype>"
- [(set_attr "simd_type" "simd_addn2")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; pmul.
-
-(define_insn "aarch64_pmul<mode>"
- [(set (match_operand:VB 0 "register_operand" "=w")
- (unspec:VB [(match_operand:VB 1 "register_operand" "w")
- (match_operand:VB 2 "register_operand" "w")]
- UNSPEC_PMUL))]
- "TARGET_SIMD"
- "pmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_mul")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; <su>q<addsub>
-
-(define_insn "aarch64_<su_optab><optab><mode>"
- [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
- (BINQOPS:VSDQ_I (match_operand:VSDQ_I 1 "register_operand" "w")
- (match_operand:VSDQ_I 2 "register_operand" "w")))]
- "TARGET_SIMD"
- "<su_optab><optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
- [(set_attr "simd_type" "simd_add")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; suqadd and usqadd
-
-(define_insn "aarch64_<sur>qadd<mode>"
- [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
- (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "0")
- (match_operand:VSDQ_I 2 "register_operand" "w")]
- USSUQADD))]
- "TARGET_SIMD"
- "<sur>qadd\\t%<v>0<Vmtype>, %<v>2<Vmtype>"
- [(set_attr "simd_type" "simd_sat_add")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; sqmovun
-
-(define_insn "aarch64_sqmovun<mode>"
- [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
- (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")]
- UNSPEC_SQXTUN))]
- "TARGET_SIMD"
- "sqxtun\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>"
- [(set_attr "simd_type" "simd_sat_shiftn_imm")
- (set_attr "simd_mode" "<MODE>")]
- )
-
-;; sqmovn and uqmovn
-
-(define_insn "aarch64_<sur>qmovn<mode>"
- [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
- (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")]
- SUQMOVN))]
- "TARGET_SIMD"
- "<sur>qxtn\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>"
- [(set_attr "simd_type" "simd_sat_shiftn_imm")
- (set_attr "simd_mode" "<MODE>")]
- )
-
-;; <su>q<absneg>
-
-(define_insn "aarch64_s<optab><mode>"
- [(set (match_operand:VSDQ_I_BHSI 0 "register_operand" "=w")
- (UNQOPS:VSDQ_I_BHSI
- (match_operand:VSDQ_I_BHSI 1 "register_operand" "w")))]
- "TARGET_SIMD"
- "s<optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>"
- [(set_attr "simd_type" "simd_sat_negabs")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; sq<r>dmulh.
-
-(define_insn "aarch64_sq<r>dmulh<mode>"
- [(set (match_operand:VSDQ_HSI 0 "register_operand" "=w")
- (unspec:VSDQ_HSI
- [(match_operand:VSDQ_HSI 1 "register_operand" "w")
- (match_operand:VSDQ_HSI 2 "register_operand" "w")]
- VQDMULH))]
- "TARGET_SIMD"
- "sq<r>dmulh\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
- [(set_attr "simd_type" "simd_sat_mul")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; sq<r>dmulh_lane
-
-(define_insn "aarch64_sq<r>dmulh_lane<mode>"
- [(set (match_operand:VDQHS 0 "register_operand" "=w")
- (unspec:VDQHS
- [(match_operand:VDQHS 1 "register_operand" "w")
- (vec_select:<VEL>
- (match_operand:<VCOND> 2 "register_operand" "<vwx>")
- (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
- VQDMULH))]
- "TARGET_SIMD"
- "*
- aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCOND>mode));
- return \"sq<r>dmulh\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]\";"
- [(set_attr "simd_type" "simd_sat_mul")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_sq<r>dmulh_laneq<mode>"
- [(set (match_operand:VDQHS 0 "register_operand" "=w")
- (unspec:VDQHS
- [(match_operand:VDQHS 1 "register_operand" "w")
- (vec_select:<VEL>
- (match_operand:<VCONQ> 2 "register_operand" "<vwx>")
- (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
- VQDMULH))]
- "TARGET_SIMD"
- "*
- aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode));
- return \"sq<r>dmulh\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]\";"
- [(set_attr "simd_type" "simd_sat_mul")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_sq<r>dmulh_lane<mode>"
- [(set (match_operand:SD_HSI 0 "register_operand" "=w")
- (unspec:SD_HSI
- [(match_operand:SD_HSI 1 "register_operand" "w")
- (vec_select:<VEL>
- (match_operand:<VCONQ> 2 "register_operand" "<vwx>")
- (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
- VQDMULH))]
- "TARGET_SIMD"
- "*
- aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode));
- return \"sq<r>dmulh\\t%<v>0, %<v>1, %2.<v>[%3]\";"
- [(set_attr "simd_type" "simd_sat_mul")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; vqdml[sa]l
-
-(define_insn "aarch64_sqdml<SBINQOPS:as>l<mode>"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (SBINQOPS:<VWIDE>
- (match_operand:<VWIDE> 1 "register_operand" "0")
- (ss_ashift:<VWIDE>
- (mult:<VWIDE>
- (sign_extend:<VWIDE>
- (match_operand:VSD_HSI 2 "register_operand" "w"))
- (sign_extend:<VWIDE>
- (match_operand:VSD_HSI 3 "register_operand" "w")))
- (const_int 1))))]
- "TARGET_SIMD"
- "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %<v>3<Vmtype>"
- [(set_attr "simd_type" "simd_sat_mlal")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; vqdml[sa]l_lane
-
-(define_insn "aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (SBINQOPS:<VWIDE>
- (match_operand:<VWIDE> 1 "register_operand" "0")
- (ss_ashift:<VWIDE>
- (mult:<VWIDE>
- (sign_extend:<VWIDE>
- (match_operand:VD_HSI 2 "register_operand" "w"))
- (sign_extend:<VWIDE>
- (vec_duplicate:VD_HSI
- (vec_select:<VEL>
- (match_operand:<VCON> 3 "register_operand" "<vwx>")
- (parallel [(match_operand:SI 4 "immediate_operand" "i")])))
- ))
- (const_int 1))))]
- "TARGET_SIMD"
- "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
- [(set_attr "simd_type" "simd_sat_mlal")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (SBINQOPS:<VWIDE>
- (match_operand:<VWIDE> 1 "register_operand" "0")
- (ss_ashift:<VWIDE>
- (mult:<VWIDE>
- (sign_extend:<VWIDE>
- (match_operand:SD_HSI 2 "register_operand" "w"))
- (sign_extend:<VWIDE>
- (vec_select:<VEL>
- (match_operand:<VCON> 3 "register_operand" "<vwx>")
- (parallel [(match_operand:SI 4 "immediate_operand" "i")])))
- )
- (const_int 1))))]
- "TARGET_SIMD"
- "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
- [(set_attr "simd_type" "simd_sat_mlal")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "aarch64_sqdmlal_lane<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "0")
- (match_operand:VSD_HSI 2 "register_operand" "w")
- (match_operand:<VCON> 3 "register_operand" "<vwx>")
- (match_operand:SI 4 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
- emit_insn (gen_aarch64_sqdmlal_lane<mode>_internal (operands[0], operands[1],
- operands[2], operands[3],
- operands[4]));
- DONE;
-})
-
-(define_expand "aarch64_sqdmlal_laneq<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "0")
- (match_operand:VSD_HSI 2 "register_operand" "w")
- (match_operand:<VCON> 3 "register_operand" "<vwx>")
- (match_operand:SI 4 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode));
- emit_insn (gen_aarch64_sqdmlal_lane<mode>_internal (operands[0], operands[1],
- operands[2], operands[3],
- operands[4]));
- DONE;
-})
-
-(define_expand "aarch64_sqdmlsl_lane<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "0")
- (match_operand:VSD_HSI 2 "register_operand" "w")
- (match_operand:<VCON> 3 "register_operand" "<vwx>")
- (match_operand:SI 4 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
- emit_insn (gen_aarch64_sqdmlsl_lane<mode>_internal (operands[0], operands[1],
- operands[2], operands[3],
- operands[4]));
- DONE;
-})
-
-(define_expand "aarch64_sqdmlsl_laneq<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "0")
- (match_operand:VSD_HSI 2 "register_operand" "w")
- (match_operand:<VCON> 3 "register_operand" "<vwx>")
- (match_operand:SI 4 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode));
- emit_insn (gen_aarch64_sqdmlsl_lane<mode>_internal (operands[0], operands[1],
- operands[2], operands[3],
- operands[4]));
- DONE;
-})
-
-;; vqdml[sa]l_n
-
-(define_insn "aarch64_sqdml<SBINQOPS:as>l_n<mode>"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (SBINQOPS:<VWIDE>
- (match_operand:<VWIDE> 1 "register_operand" "0")
- (ss_ashift:<VWIDE>
- (mult:<VWIDE>
- (sign_extend:<VWIDE>
- (match_operand:VD_HSI 2 "register_operand" "w"))
- (sign_extend:<VWIDE>
- (vec_duplicate:VD_HSI
- (match_operand:<VEL> 3 "register_operand" "w"))))
- (const_int 1))))]
- "TARGET_SIMD"
- "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[0]"
- [(set_attr "simd_type" "simd_sat_mlal")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; sqdml[as]l2
-
-(define_insn "aarch64_sqdml<SBINQOPS:as>l2<mode>_internal"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (SBINQOPS:<VWIDE>
- (match_operand:<VWIDE> 1 "register_operand" "0")
- (ss_ashift:<VWIDE>
- (mult:<VWIDE>
- (sign_extend:<VWIDE>
- (vec_select:<VHALF>
- (match_operand:VQ_HSI 2 "register_operand" "w")
- (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
- (sign_extend:<VWIDE>
- (vec_select:<VHALF>
- (match_operand:VQ_HSI 3 "register_operand" "w")
- (match_dup 4))))
- (const_int 1))))]
- "TARGET_SIMD"
- "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %<v>3<Vmtype>"
- [(set_attr "simd_type" "simd_sat_mlal")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "aarch64_sqdmlal2<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "w")
- (match_operand:VQ_HSI 2 "register_operand" "w")
- (match_operand:VQ_HSI 3 "register_operand" "w")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_sqdmlal2<mode>_internal (operands[0], operands[1],
- operands[2], operands[3], p));
- DONE;
-})
-
-(define_expand "aarch64_sqdmlsl2<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "w")
- (match_operand:VQ_HSI 2 "register_operand" "w")
- (match_operand:VQ_HSI 3 "register_operand" "w")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_sqdmlsl2<mode>_internal (operands[0], operands[1],
- operands[2], operands[3], p));
- DONE;
-})
-
-;; vqdml[sa]l2_lane
-
-(define_insn "aarch64_sqdml<SBINQOPS:as>l2_lane<mode>_internal"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (SBINQOPS:<VWIDE>
- (match_operand:<VWIDE> 1 "register_operand" "0")
- (ss_ashift:<VWIDE>
- (mult:<VWIDE>
- (sign_extend:<VWIDE>
- (vec_select:<VHALF>
- (match_operand:VQ_HSI 2 "register_operand" "w")
- (match_operand:VQ_HSI 5 "vect_par_cnst_hi_half" "")))
- (sign_extend:<VWIDE>
- (vec_duplicate:<VHALF>
- (vec_select:<VEL>
- (match_operand:<VCON> 3 "register_operand" "<vwx>")
- (parallel [(match_operand:SI 4 "immediate_operand" "i")])
- ))))
- (const_int 1))))]
- "TARGET_SIMD"
- "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
- [(set_attr "simd_type" "simd_sat_mlal")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "aarch64_sqdmlal2_lane<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "w")
- (match_operand:VQ_HSI 2 "register_operand" "w")
- (match_operand:<VCON> 3 "register_operand" "<vwx>")
- (match_operand:SI 4 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
- emit_insn (gen_aarch64_sqdmlal2_lane<mode>_internal (operands[0], operands[1],
- operands[2], operands[3],
- operands[4], p));
- DONE;
-})
-
-(define_expand "aarch64_sqdmlal2_laneq<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "w")
- (match_operand:VQ_HSI 2 "register_operand" "w")
- (match_operand:<VCON> 3 "register_operand" "<vwx>")
- (match_operand:SI 4 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode));
- emit_insn (gen_aarch64_sqdmlal2_lane<mode>_internal (operands[0], operands[1],
- operands[2], operands[3],
- operands[4], p));
- DONE;
-})
-
-(define_expand "aarch64_sqdmlsl2_lane<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "w")
- (match_operand:VQ_HSI 2 "register_operand" "w")
- (match_operand:<VCON> 3 "register_operand" "<vwx>")
- (match_operand:SI 4 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
- emit_insn (gen_aarch64_sqdmlsl2_lane<mode>_internal (operands[0], operands[1],
- operands[2], operands[3],
- operands[4], p));
- DONE;
-})
-
-(define_expand "aarch64_sqdmlsl2_laneq<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "w")
- (match_operand:VQ_HSI 2 "register_operand" "w")
- (match_operand:<VCON> 3 "register_operand" "<vwx>")
- (match_operand:SI 4 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode));
- emit_insn (gen_aarch64_sqdmlsl2_lane<mode>_internal (operands[0], operands[1],
- operands[2], operands[3],
- operands[4], p));
- DONE;
-})
-
-(define_insn "aarch64_sqdml<SBINQOPS:as>l2_n<mode>_internal"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (SBINQOPS:<VWIDE>
- (match_operand:<VWIDE> 1 "register_operand" "0")
- (ss_ashift:<VWIDE>
- (mult:<VWIDE>
- (sign_extend:<VWIDE>
- (vec_select:<VHALF>
- (match_operand:VQ_HSI 2 "register_operand" "w")
- (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
- (sign_extend:<VWIDE>
- (vec_duplicate:<VHALF>
- (match_operand:<VEL> 3 "register_operand" "w"))))
- (const_int 1))))]
- "TARGET_SIMD"
- "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[0]"
- [(set_attr "simd_type" "simd_sat_mlal")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "aarch64_sqdmlal2_n<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "w")
- (match_operand:VQ_HSI 2 "register_operand" "w")
- (match_operand:<VEL> 3 "register_operand" "w")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_sqdmlal2_n<mode>_internal (operands[0], operands[1],
- operands[2], operands[3],
- p));
- DONE;
-})
-
-(define_expand "aarch64_sqdmlsl2_n<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:<VWIDE> 1 "register_operand" "w")
- (match_operand:VQ_HSI 2 "register_operand" "w")
- (match_operand:<VEL> 3 "register_operand" "w")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_sqdmlsl2_n<mode>_internal (operands[0], operands[1],
- operands[2], operands[3],
- p));
- DONE;
-})
-
-;; vqdmull
-
-(define_insn "aarch64_sqdmull<mode>"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (ss_ashift:<VWIDE>
- (mult:<VWIDE>
- (sign_extend:<VWIDE>
- (match_operand:VSD_HSI 1 "register_operand" "w"))
- (sign_extend:<VWIDE>
- (match_operand:VSD_HSI 2 "register_operand" "w")))
- (const_int 1)))]
- "TARGET_SIMD"
- "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
- [(set_attr "simd_type" "simd_sat_mul")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; vqdmull_lane
-
-(define_insn "aarch64_sqdmull_lane<mode>_internal"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (ss_ashift:<VWIDE>
- (mult:<VWIDE>
- (sign_extend:<VWIDE>
- (match_operand:VD_HSI 1 "register_operand" "w"))
- (sign_extend:<VWIDE>
- (vec_duplicate:VD_HSI
- (vec_select:<VEL>
- (match_operand:<VCON> 2 "register_operand" "<vwx>")
- (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
- ))
- (const_int 1)))]
- "TARGET_SIMD"
- "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
- [(set_attr "simd_type" "simd_sat_mul")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_sqdmull_lane<mode>_internal"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (ss_ashift:<VWIDE>
- (mult:<VWIDE>
- (sign_extend:<VWIDE>
- (match_operand:SD_HSI 1 "register_operand" "w"))
- (sign_extend:<VWIDE>
- (vec_select:<VEL>
- (match_operand:<VCON> 2 "register_operand" "<vwx>")
- (parallel [(match_operand:SI 3 "immediate_operand" "i")]))
- ))
- (const_int 1)))]
- "TARGET_SIMD"
- "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
- [(set_attr "simd_type" "simd_sat_mul")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "aarch64_sqdmull_lane<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:VSD_HSI 1 "register_operand" "w")
- (match_operand:<VCON> 2 "register_operand" "<vwx>")
- (match_operand:SI 3 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
- emit_insn (gen_aarch64_sqdmull_lane<mode>_internal (operands[0], operands[1],
- operands[2], operands[3]));
- DONE;
-})
-
-(define_expand "aarch64_sqdmull_laneq<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:VD_HSI 1 "register_operand" "w")
- (match_operand:<VCON> 2 "register_operand" "<vwx>")
- (match_operand:SI 3 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode));
- emit_insn (gen_aarch64_sqdmull_lane<mode>_internal
- (operands[0], operands[1], operands[2], operands[3]));
- DONE;
-})
-
-;; vqdmull_n
-
-(define_insn "aarch64_sqdmull_n<mode>"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (ss_ashift:<VWIDE>
- (mult:<VWIDE>
- (sign_extend:<VWIDE>
- (match_operand:VD_HSI 1 "register_operand" "w"))
- (sign_extend:<VWIDE>
- (vec_duplicate:VD_HSI
- (match_operand:<VEL> 2 "register_operand" "w")))
- )
- (const_int 1)))]
- "TARGET_SIMD"
- "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[0]"
- [(set_attr "simd_type" "simd_sat_mul")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; vqdmull2
-
-
-
-(define_insn "aarch64_sqdmull2<mode>_internal"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (ss_ashift:<VWIDE>
- (mult:<VWIDE>
- (sign_extend:<VWIDE>
- (vec_select:<VHALF>
- (match_operand:VQ_HSI 1 "register_operand" "w")
- (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
- (sign_extend:<VWIDE>
- (vec_select:<VHALF>
- (match_operand:VQ_HSI 2 "register_operand" "w")
- (match_dup 3)))
- )
- (const_int 1)))]
- "TARGET_SIMD"
- "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
- [(set_attr "simd_type" "simd_sat_mul")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "aarch64_sqdmull2<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:VQ_HSI 1 "register_operand" "w")
- (match_operand:<VCON> 2 "register_operand" "w")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_sqdmull2<mode>_internal (operands[0], operands[1],
- operands[2], p));
- DONE;
-})
-
-;; vqdmull2_lane
-
-(define_insn "aarch64_sqdmull2_lane<mode>_internal"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (ss_ashift:<VWIDE>
- (mult:<VWIDE>
- (sign_extend:<VWIDE>
- (vec_select:<VHALF>
- (match_operand:VQ_HSI 1 "register_operand" "w")
- (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
- (sign_extend:<VWIDE>
- (vec_duplicate:<VHALF>
- (vec_select:<VEL>
- (match_operand:<VCON> 2 "register_operand" "<vwx>")
- (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
- ))
- (const_int 1)))]
- "TARGET_SIMD"
- "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
- [(set_attr "simd_type" "simd_sat_mul")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "aarch64_sqdmull2_lane<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:VQ_HSI 1 "register_operand" "w")
- (match_operand:<VCON> 2 "register_operand" "<vwx>")
- (match_operand:SI 3 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
- emit_insn (gen_aarch64_sqdmull2_lane<mode>_internal (operands[0], operands[1],
- operands[2], operands[3],
- p));
- DONE;
-})
-
-(define_expand "aarch64_sqdmull2_laneq<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:VQ_HSI 1 "register_operand" "w")
- (match_operand:<VCON> 2 "register_operand" "<vwx>")
- (match_operand:SI 3 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode));
- emit_insn (gen_aarch64_sqdmull2_lane<mode>_internal (operands[0], operands[1],
- operands[2], operands[3],
- p));
- DONE;
-})
-
-;; vqdmull2_n
-
-(define_insn "aarch64_sqdmull2_n<mode>_internal"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (ss_ashift:<VWIDE>
- (mult:<VWIDE>
- (sign_extend:<VWIDE>
- (vec_select:<VHALF>
- (match_operand:VQ_HSI 1 "register_operand" "w")
- (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
- (sign_extend:<VWIDE>
- (vec_duplicate:<VHALF>
- (match_operand:<VEL> 2 "register_operand" "w")))
- )
- (const_int 1)))]
- "TARGET_SIMD"
- "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[0]"
- [(set_attr "simd_type" "simd_sat_mul")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "aarch64_sqdmull2_n<mode>"
- [(match_operand:<VWIDE> 0 "register_operand" "=w")
- (match_operand:VQ_HSI 1 "register_operand" "w")
- (match_operand:<VEL> 2 "register_operand" "w")]
- "TARGET_SIMD"
-{
- rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
- emit_insn (gen_aarch64_sqdmull2_n<mode>_internal (operands[0], operands[1],
- operands[2], p));
- DONE;
-})
-
-;; vshl
-
-(define_insn "aarch64_<sur>shl<mode>"
- [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
- (unspec:VSDQ_I_DI
- [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
- (match_operand:VSDQ_I_DI 2 "register_operand" "w")]
- VSHL))]
- "TARGET_SIMD"
- "<sur>shl\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>";
- [(set_attr "simd_type" "simd_shift")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-
-;; vqshl
-
-(define_insn "aarch64_<sur>q<r>shl<mode>"
- [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
- (unspec:VSDQ_I
- [(match_operand:VSDQ_I 1 "register_operand" "w")
- (match_operand:VSDQ_I 2 "register_operand" "w")]
- VQSHL))]
- "TARGET_SIMD"
- "<sur>q<r>shl\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>";
- [(set_attr "simd_type" "simd_sat_shift")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; vshl_n
-
-(define_expand "aarch64_sshl_n<mode>"
- [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
- (match_operand:VSDQ_I_DI 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- emit_insn (gen_ashl<mode>3 (operands[0], operands[1], operands[2]));
- DONE;
-})
-
-(define_expand "aarch64_ushl_n<mode>"
- [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
- (match_operand:VSDQ_I_DI 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- emit_insn (gen_ashl<mode>3 (operands[0], operands[1], operands[2]));
- DONE;
-})
-
-;; vshll_n
-
-(define_insn "aarch64_<sur>shll_n<mode>"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (unspec:<VWIDE> [(match_operand:VDW 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- VSHLL))]
- "TARGET_SIMD"
- "*
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
- if (INTVAL (operands[2]) == bit_width)
- {
- return \"shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
- }
- else {
- return \"<sur>shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
- }"
- [(set_attr "simd_type" "simd_shift_imm")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; vshll_high_n
-
-(define_insn "aarch64_<sur>shll2_n<mode>"
- [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (unspec:<VWIDE> [(match_operand:VQW 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- VSHLL))]
- "TARGET_SIMD"
- "*
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
- if (INTVAL (operands[2]) == bit_width)
- {
- return \"shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
- }
- else {
- return \"<sur>shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
- }"
- [(set_attr "simd_type" "simd_shift_imm")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; vshr_n
-
-(define_expand "aarch64_sshr_n<mode>"
- [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
- (match_operand:VSDQ_I_DI 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- emit_insn (gen_ashr<mode>3 (operands[0], operands[1], operands[2]));
- DONE;
-})
-
-(define_expand "aarch64_ushr_n<mode>"
- [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
- (match_operand:VSDQ_I_DI 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- emit_insn (gen_lshr<mode>3 (operands[0], operands[1], operands[2]));
- DONE;
-})
-
-;; vrshr_n
-
-(define_insn "aarch64_<sur>shr_n<mode>"
- [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
- (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- VRSHR_N))]
- "TARGET_SIMD"
- "*
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
- return \"<sur>shr\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
- [(set_attr "simd_type" "simd_shift_imm")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; v(r)sra_n
-
-(define_insn "aarch64_<sur>sra_n<mode>"
- [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
- (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
- (match_operand:VSDQ_I_DI 2 "register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- VSRA))]
- "TARGET_SIMD"
- "*
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- aarch64_simd_const_bounds (operands[3], 1, bit_width + 1);
- return \"<sur>sra\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
- [(set_attr "simd_type" "simd_shift_imm_acc")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; vs<lr>i_n
-
-(define_insn "aarch64_<sur>s<lr>i_n<mode>"
- [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
- (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
- (match_operand:VSDQ_I_DI 2 "register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- VSLRI))]
- "TARGET_SIMD"
- "*
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- aarch64_simd_const_bounds (operands[3], 1 - <VSLRI:offsetlr>,
- bit_width - <VSLRI:offsetlr> + 1);
- return \"s<lr>i\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
- [(set_attr "simd_type" "simd_shift_imm")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; vqshl(u)
-
-(define_insn "aarch64_<sur>qshl<u>_n<mode>"
- [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
- (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- VQSHL_N))]
- "TARGET_SIMD"
- "*
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- aarch64_simd_const_bounds (operands[2], 0, bit_width);
- return \"<sur>qshl<u>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
- [(set_attr "simd_type" "simd_sat_shift_imm")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-
-;; vq(r)shr(u)n_n
-
-(define_insn "aarch64_<sur>q<r>shr<u>n_n<mode>"
- [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
- (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- VQSHRN_N))]
- "TARGET_SIMD"
- "*
- int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
- aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
- return \"<sur>q<r>shr<u>n\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>, %2\";"
- [(set_attr "simd_type" "simd_sat_shiftn_imm")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-
-;; cm(eq|ge|le|lt|gt)
-
-(define_insn "aarch64_cm<cmp><mode>"
- [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
- (unspec:<V_cmp_result>
- [(match_operand:VSDQ_I_DI 1 "register_operand" "w,w")
- (match_operand:VSDQ_I_DI 2 "aarch64_simd_reg_or_zero" "w,Z")]
- VCMP_S))]
- "TARGET_SIMD"
- "@
- cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>
- cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, #0"
- [(set_attr "simd_type" "simd_cmp")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; cm(hs|hi|tst)
-
-(define_insn "aarch64_cm<cmp><mode>"
- [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
- (unspec:<V_cmp_result>
- [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
- (match_operand:VSDQ_I_DI 2 "register_operand" "w")]
- VCMP_U))]
- "TARGET_SIMD"
- "cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
- [(set_attr "simd_type" "simd_cmp")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; fcm(eq|ge|le|lt|gt)
-
-(define_insn "aarch64_cm<cmp><mode>"
- [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
- (unspec:<V_cmp_result>
- [(match_operand:VDQF 1 "register_operand" "w,w")
- (match_operand:VDQF 2 "aarch64_simd_reg_or_zero" "w,Dz")]
- VCMP_S))]
- "TARGET_SIMD"
- "@
- fcm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>
- fcm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, 0"
- [(set_attr "simd_type" "simd_fcmp")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; addp
-
-(define_insn "aarch64_addp<mode>"
- [(set (match_operand:VD_BHSI 0 "register_operand" "=w")
- (unspec:VD_BHSI
- [(match_operand:VD_BHSI 1 "register_operand" "w")
- (match_operand:VD_BHSI 2 "register_operand" "w")]
- UNSPEC_ADDP))]
- "TARGET_SIMD"
- "addp\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
- [(set_attr "simd_type" "simd_add")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_addpdi"
- [(set (match_operand:DI 0 "register_operand" "=w")
- (unspec:DI
- [(match_operand:V2DI 1 "register_operand" "w")]
- UNSPEC_ADDP))]
- "TARGET_SIMD"
- "addp\t%d0, %1.2d"
- [(set_attr "simd_type" "simd_add")
- (set_attr "simd_mode" "DI")]
-)
-
-;; v(max|min)
-
-(define_expand "aarch64_<maxmin><mode>"
- [(set (match_operand:VDQ_BHSI 0 "register_operand" "=w")
- (MAXMIN:VDQ_BHSI (match_operand:VDQ_BHSI 1 "register_operand" "w")
- (match_operand:VDQ_BHSI 2 "register_operand" "w")))]
- "TARGET_SIMD"
-{
- emit_insn (gen_<maxmin><mode>3 (operands[0], operands[1], operands[2]));
- DONE;
-})
-
-
-(define_insn "aarch64_<fmaxmin><mode>"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")
- (match_operand:VDQF 2 "register_operand" "w")]
- FMAXMIN))]
- "TARGET_SIMD"
- "<fmaxmin>\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_fminmax")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; sqrt
-
-(define_insn "sqrt<mode>2"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (sqrt:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
- "TARGET_SIMD"
- "fsqrt\\t%0.<Vtype>, %1.<Vtype>"
- [(set_attr "simd_type" "simd_fsqrt")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "aarch64_sqrt<mode>"
- [(match_operand:VDQF 0 "register_operand" "=w")
- (match_operand:VDQF 1 "register_operand" "w")]
- "TARGET_SIMD"
-{
- emit_insn (gen_sqrt<mode>2 (operands[0], operands[1]));
- DONE;
-})
-
-
-;; Patterns for vector struct loads and stores.
-
-(define_insn "vec_load_lanesoi<mode>"
- [(set (match_operand:OI 0 "register_operand" "=w")
- (unspec:OI [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")
- (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_LD2))]
- "TARGET_SIMD"
- "ld2\\t{%S0.<Vtype> - %T0.<Vtype>}, %1"
- [(set_attr "simd_type" "simd_load2")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "vec_store_lanesoi<mode>"
- [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
- (unspec:OI [(match_operand:OI 1 "register_operand" "w")
- (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_ST2))]
- "TARGET_SIMD"
- "st2\\t{%S1.<Vtype> - %T1.<Vtype>}, %0"
- [(set_attr "simd_type" "simd_store2")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "vec_load_lanesci<mode>"
- [(set (match_operand:CI 0 "register_operand" "=w")
- (unspec:CI [(match_operand:CI 1 "aarch64_simd_struct_operand" "Utv")
- (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_LD3))]
- "TARGET_SIMD"
- "ld3\\t{%S0.<Vtype> - %U0.<Vtype>}, %1"
- [(set_attr "simd_type" "simd_load3")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "vec_store_lanesci<mode>"
- [(set (match_operand:CI 0 "aarch64_simd_struct_operand" "=Utv")
- (unspec:CI [(match_operand:CI 1 "register_operand" "w")
- (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_ST3))]
- "TARGET_SIMD"
- "st3\\t{%S1.<Vtype> - %U1.<Vtype>}, %0"
- [(set_attr "simd_type" "simd_store3")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "vec_load_lanesxi<mode>"
- [(set (match_operand:XI 0 "register_operand" "=w")
- (unspec:XI [(match_operand:XI 1 "aarch64_simd_struct_operand" "Utv")
- (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_LD4))]
- "TARGET_SIMD"
- "ld4\\t{%S0.<Vtype> - %V0.<Vtype>}, %1"
- [(set_attr "simd_type" "simd_load4")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "vec_store_lanesxi<mode>"
- [(set (match_operand:XI 0 "aarch64_simd_struct_operand" "=Utv")
- (unspec:XI [(match_operand:XI 1 "register_operand" "w")
- (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_ST4))]
- "TARGET_SIMD"
- "st4\\t{%S1.<Vtype> - %V1.<Vtype>}, %0"
- [(set_attr "simd_type" "simd_store4")
- (set_attr "simd_mode" "<MODE>")])
-
-;; Reload patterns for AdvSIMD register list operands.
-
-(define_expand "mov<mode>"
- [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "")
- (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" ""))]
- "TARGET_SIMD"
-{
- if (can_create_pseudo_p ())
- {
- if (GET_CODE (operands[0]) != REG)
- operands[1] = force_reg (<MODE>mode, operands[1]);
- }
-})
-
-(define_insn "*aarch64_mov<mode>"
- [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "=w,Utv,w")
- (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" " w,w,Utv"))]
- "TARGET_SIMD
- && (register_operand (operands[0], <MODE>mode)
- || register_operand (operands[1], <MODE>mode))"
-
-{
- switch (which_alternative)
- {
- case 0: return "#";
- case 1: return "st1\\t{%S1.16b - %<Vendreg>1.16b}, %0";
- case 2: return "ld1\\t{%S0.16b - %<Vendreg>0.16b}, %1";
- default: gcc_unreachable ();
- }
-}
- [(set_attr "simd_type" "simd_move,simd_store<nregs>,simd_load<nregs>")
- (set (attr "length") (symbol_ref "aarch64_simd_attr_length_move (insn)"))
- (set_attr "simd_mode" "<MODE>")])
-
-(define_split
- [(set (match_operand:OI 0 "register_operand" "")
- (match_operand:OI 1 "register_operand" ""))]
- "TARGET_SIMD && reload_completed"
- [(set (match_dup 0) (match_dup 1))
- (set (match_dup 2) (match_dup 3))]
-{
- int rdest = REGNO (operands[0]);
- int rsrc = REGNO (operands[1]);
- rtx dest[2], src[2];
-
- dest[0] = gen_rtx_REG (TFmode, rdest);
- src[0] = gen_rtx_REG (TFmode, rsrc);
- dest[1] = gen_rtx_REG (TFmode, rdest + 1);
- src[1] = gen_rtx_REG (TFmode, rsrc + 1);
-
- aarch64_simd_disambiguate_copy (operands, dest, src, 2);
-})
-
-(define_split
- [(set (match_operand:CI 0 "register_operand" "")
- (match_operand:CI 1 "register_operand" ""))]
- "TARGET_SIMD && reload_completed"
- [(set (match_dup 0) (match_dup 1))
- (set (match_dup 2) (match_dup 3))
- (set (match_dup 4) (match_dup 5))]
-{
- int rdest = REGNO (operands[0]);
- int rsrc = REGNO (operands[1]);
- rtx dest[3], src[3];
-
- dest[0] = gen_rtx_REG (TFmode, rdest);
- src[0] = gen_rtx_REG (TFmode, rsrc);
- dest[1] = gen_rtx_REG (TFmode, rdest + 1);
- src[1] = gen_rtx_REG (TFmode, rsrc + 1);
- dest[2] = gen_rtx_REG (TFmode, rdest + 2);
- src[2] = gen_rtx_REG (TFmode, rsrc + 2);
-
- aarch64_simd_disambiguate_copy (operands, dest, src, 3);
-})
-
-(define_split
- [(set (match_operand:XI 0 "register_operand" "")
- (match_operand:XI 1 "register_operand" ""))]
- "TARGET_SIMD && reload_completed"
- [(set (match_dup 0) (match_dup 1))
- (set (match_dup 2) (match_dup 3))
- (set (match_dup 4) (match_dup 5))
- (set (match_dup 6) (match_dup 7))]
-{
- int rdest = REGNO (operands[0]);
- int rsrc = REGNO (operands[1]);
- rtx dest[4], src[4];
-
- dest[0] = gen_rtx_REG (TFmode, rdest);
- src[0] = gen_rtx_REG (TFmode, rsrc);
- dest[1] = gen_rtx_REG (TFmode, rdest + 1);
- src[1] = gen_rtx_REG (TFmode, rsrc + 1);
- dest[2] = gen_rtx_REG (TFmode, rdest + 2);
- src[2] = gen_rtx_REG (TFmode, rsrc + 2);
- dest[3] = gen_rtx_REG (TFmode, rdest + 3);
- src[3] = gen_rtx_REG (TFmode, rsrc + 3);
-
- aarch64_simd_disambiguate_copy (operands, dest, src, 4);
-})
-
-(define_insn "aarch64_ld2<mode>_dreg"
- [(set (match_operand:OI 0 "register_operand" "=w")
- (subreg:OI
- (vec_concat:<VRL2>
- (vec_concat:<VDBL>
- (unspec:VD [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")]
- UNSPEC_LD2)
- (vec_duplicate:VD (const_int 0)))
- (vec_concat:<VDBL>
- (unspec:VD [(match_dup 1)]
- UNSPEC_LD2)
- (vec_duplicate:VD (const_int 0)))) 0))]
- "TARGET_SIMD"
- "ld2\\t{%S0.<Vtype> - %T0.<Vtype>}, %1"
- [(set_attr "simd_type" "simd_load2")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "aarch64_ld2<mode>_dreg"
- [(set (match_operand:OI 0 "register_operand" "=w")
- (subreg:OI
- (vec_concat:<VRL2>
- (vec_concat:<VDBL>
- (unspec:DX [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")]
- UNSPEC_LD2)
- (const_int 0))
- (vec_concat:<VDBL>
- (unspec:DX [(match_dup 1)]
- UNSPEC_LD2)
- (const_int 0))) 0))]
- "TARGET_SIMD"
- "ld1\\t{%S0.1d - %T0.1d}, %1"
- [(set_attr "simd_type" "simd_load2")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "aarch64_ld3<mode>_dreg"
- [(set (match_operand:CI 0 "register_operand" "=w")
- (subreg:CI
- (vec_concat:<VRL3>
- (vec_concat:<VRL2>
- (vec_concat:<VDBL>
- (unspec:VD [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")]
- UNSPEC_LD3)
- (vec_duplicate:VD (const_int 0)))
- (vec_concat:<VDBL>
- (unspec:VD [(match_dup 1)]
- UNSPEC_LD3)
- (vec_duplicate:VD (const_int 0))))
- (vec_concat:<VDBL>
- (unspec:VD [(match_dup 1)]
- UNSPEC_LD3)
- (vec_duplicate:VD (const_int 0)))) 0))]
- "TARGET_SIMD"
- "ld3\\t{%S0.<Vtype> - %U0.<Vtype>}, %1"
- [(set_attr "simd_type" "simd_load3")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "aarch64_ld3<mode>_dreg"
- [(set (match_operand:CI 0 "register_operand" "=w")
- (subreg:CI
- (vec_concat:<VRL3>
- (vec_concat:<VRL2>
- (vec_concat:<VDBL>
- (unspec:DX [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")]
- UNSPEC_LD3)
- (const_int 0))
- (vec_concat:<VDBL>
- (unspec:DX [(match_dup 1)]
- UNSPEC_LD3)
- (const_int 0)))
- (vec_concat:<VDBL>
- (unspec:DX [(match_dup 1)]
- UNSPEC_LD3)
- (const_int 0))) 0))]
- "TARGET_SIMD"
- "ld1\\t{%S0.1d - %U0.1d}, %1"
- [(set_attr "simd_type" "simd_load3")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "aarch64_ld4<mode>_dreg"
- [(set (match_operand:XI 0 "register_operand" "=w")
- (subreg:XI
- (vec_concat:<VRL4>
- (vec_concat:<VRL2>
- (vec_concat:<VDBL>
- (unspec:VD [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")]
- UNSPEC_LD4)
- (vec_duplicate:VD (const_int 0)))
- (vec_concat:<VDBL>
- (unspec:VD [(match_dup 1)]
- UNSPEC_LD4)
- (vec_duplicate:VD (const_int 0))))
- (vec_concat:<VRL2>
- (vec_concat:<VDBL>
- (unspec:VD [(match_dup 1)]
- UNSPEC_LD4)
- (vec_duplicate:VD (const_int 0)))
- (vec_concat:<VDBL>
- (unspec:VD [(match_dup 1)]
- UNSPEC_LD4)
- (vec_duplicate:VD (const_int 0))))) 0))]
- "TARGET_SIMD"
- "ld4\\t{%S0.<Vtype> - %V0.<Vtype>}, %1"
- [(set_attr "simd_type" "simd_load4")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "aarch64_ld4<mode>_dreg"
- [(set (match_operand:XI 0 "register_operand" "=w")
- (subreg:XI
- (vec_concat:<VRL4>
- (vec_concat:<VRL2>
- (vec_concat:<VDBL>
- (unspec:DX [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")]
- UNSPEC_LD4)
- (const_int 0))
- (vec_concat:<VDBL>
- (unspec:DX [(match_dup 1)]
- UNSPEC_LD4)
- (const_int 0)))
- (vec_concat:<VRL2>
- (vec_concat:<VDBL>
- (unspec:DX [(match_dup 1)]
- UNSPEC_LD4)
- (const_int 0))
- (vec_concat:<VDBL>
- (unspec:DX [(match_dup 1)]
- UNSPEC_LD4)
- (const_int 0)))) 0))]
- "TARGET_SIMD"
- "ld1\\t{%S0.1d - %V0.1d}, %1"
- [(set_attr "simd_type" "simd_load4")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_expand "aarch64_ld<VSTRUCT:nregs><VDC:mode>"
- [(match_operand:VSTRUCT 0 "register_operand" "=w")
- (match_operand:DI 1 "register_operand" "r")
- (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- "TARGET_SIMD"
-{
- enum machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode;
- rtx mem = gen_rtx_MEM (mode, operands[1]);
-
- emit_insn (gen_aarch64_ld<VSTRUCT:nregs><VDC:mode>_dreg (operands[0], mem));
- DONE;
-})
-
-(define_expand "aarch64_ld<VSTRUCT:nregs><VQ:mode>"
- [(match_operand:VSTRUCT 0 "register_operand" "=w")
- (match_operand:DI 1 "register_operand" "r")
- (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- "TARGET_SIMD"
-{
- enum machine_mode mode = <VSTRUCT:MODE>mode;
- rtx mem = gen_rtx_MEM (mode, operands[1]);
-
- emit_insn (gen_vec_load_lanes<VSTRUCT:mode><VQ:mode> (operands[0], mem));
- DONE;
-})
-
-;; Expanders for builtins to extract vector registers from large
-;; opaque integer modes.
-
-;; D-register list.
-
-(define_expand "aarch64_get_dreg<VSTRUCT:mode><VDC:mode>"
- [(match_operand:VDC 0 "register_operand" "=w")
- (match_operand:VSTRUCT 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- int part = INTVAL (operands[2]);
- rtx temp = gen_reg_rtx (<VDC:VDBL>mode);
- int offset = part * 16;
-
- emit_move_insn (temp, gen_rtx_SUBREG (<VDC:VDBL>mode, operands[1], offset));
- emit_move_insn (operands[0], gen_lowpart (<VDC:MODE>mode, temp));
- DONE;
-})
-
-;; Q-register list.
-
-(define_expand "aarch64_get_qreg<VSTRUCT:mode><VQ:mode>"
- [(match_operand:VQ 0 "register_operand" "=w")
- (match_operand:VSTRUCT 1 "register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- int part = INTVAL (operands[2]);
- int offset = part * 16;
-
- emit_move_insn (operands[0],
- gen_rtx_SUBREG (<VQ:MODE>mode, operands[1], offset));
- DONE;
-})
-
-;; Permuted-store expanders for neon intrinsics.
-
-;; Permute instructions
-
-;; vec_perm support
-
-(define_expand "vec_perm_const<mode>"
- [(match_operand:VALL 0 "register_operand")
- (match_operand:VALL 1 "register_operand")
- (match_operand:VALL 2 "register_operand")
- (match_operand:<V_cmp_result> 3)]
- "TARGET_SIMD"
-{
- if (aarch64_expand_vec_perm_const (operands[0], operands[1],
- operands[2], operands[3]))
- DONE;
- else
- FAIL;
-})
-
-(define_expand "vec_perm<mode>"
- [(match_operand:VB 0 "register_operand")
- (match_operand:VB 1 "register_operand")
- (match_operand:VB 2 "register_operand")
- (match_operand:VB 3 "register_operand")]
- "TARGET_SIMD"
-{
- aarch64_expand_vec_perm (operands[0], operands[1],
- operands[2], operands[3]);
- DONE;
-})
-
-(define_insn "aarch64_tbl1<mode>"
- [(set (match_operand:VB 0 "register_operand" "=w")
- (unspec:VB [(match_operand:V16QI 1 "register_operand" "w")
- (match_operand:VB 2 "register_operand" "w")]
- UNSPEC_TBL))]
- "TARGET_SIMD"
- "tbl\\t%0.<Vtype>, {%1.16b}, %2.<Vtype>"
- [(set_attr "simd_type" "simd_tbl")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-;; Two source registers.
-
-(define_insn "aarch64_tbl2v16qi"
- [(set (match_operand:V16QI 0 "register_operand" "=w")
- (unspec:V16QI [(match_operand:OI 1 "register_operand" "w")
- (match_operand:V16QI 2 "register_operand" "w")]
- UNSPEC_TBL))]
- "TARGET_SIMD"
- "tbl\\t%0.16b, {%S1.16b - %T1.16b}, %2.16b"
- [(set_attr "simd_type" "simd_tbl")
- (set_attr "simd_mode" "V16QI")]
-)
-
-(define_insn_and_split "aarch64_combinev16qi"
- [(set (match_operand:OI 0 "register_operand" "=w")
- (unspec:OI [(match_operand:V16QI 1 "register_operand" "w")
- (match_operand:V16QI 2 "register_operand" "w")]
- UNSPEC_CONCAT))]
- "TARGET_SIMD"
- "#"
- "&& reload_completed"
- [(const_int 0)]
-{
- aarch64_split_combinev16qi (operands);
- DONE;
-})
-
-(define_insn "aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>"
- [(set (match_operand:VALL 0 "register_operand" "=w")
- (unspec:VALL [(match_operand:VALL 1 "register_operand" "w")
- (match_operand:VALL 2 "register_operand" "w")]
- PERMUTE))]
- "TARGET_SIMD"
- "<PERMUTE:perm_insn><PERMUTE:perm_hilo>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "simd_type" "simd_<PERMUTE:perm_insn>")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_insn "aarch64_st2<mode>_dreg"
- [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv")
- (unspec:TI [(match_operand:OI 1 "register_operand" "w")
- (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_ST2))]
- "TARGET_SIMD"
- "st2\\t{%S1.<Vtype> - %T1.<Vtype>}, %0"
- [(set_attr "simd_type" "simd_store2")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "aarch64_st2<mode>_dreg"
- [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv")
- (unspec:TI [(match_operand:OI 1 "register_operand" "w")
- (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_ST2))]
- "TARGET_SIMD"
- "st1\\t{%S1.1d - %T1.1d}, %0"
- [(set_attr "simd_type" "simd_store2")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "aarch64_st3<mode>_dreg"
- [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv")
- (unspec:EI [(match_operand:CI 1 "register_operand" "w")
- (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_ST3))]
- "TARGET_SIMD"
- "st3\\t{%S1.<Vtype> - %U1.<Vtype>}, %0"
- [(set_attr "simd_type" "simd_store3")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "aarch64_st3<mode>_dreg"
- [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv")
- (unspec:EI [(match_operand:CI 1 "register_operand" "w")
- (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_ST3))]
- "TARGET_SIMD"
- "st1\\t{%S1.1d - %U1.1d}, %0"
- [(set_attr "simd_type" "simd_store3")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "aarch64_st4<mode>_dreg"
- [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
- (unspec:OI [(match_operand:XI 1 "register_operand" "w")
- (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_ST4))]
- "TARGET_SIMD"
- "st4\\t{%S1.<Vtype> - %V1.<Vtype>}, %0"
- [(set_attr "simd_type" "simd_store4")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_insn "aarch64_st4<mode>_dreg"
- [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
- (unspec:OI [(match_operand:XI 1 "register_operand" "w")
- (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- UNSPEC_ST4))]
- "TARGET_SIMD"
- "st1\\t{%S1.1d - %V1.1d}, %0"
- [(set_attr "simd_type" "simd_store4")
- (set_attr "simd_mode" "<MODE>")])
-
-(define_expand "aarch64_st<VSTRUCT:nregs><VDC:mode>"
- [(match_operand:DI 0 "register_operand" "r")
- (match_operand:VSTRUCT 1 "register_operand" "w")
- (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- "TARGET_SIMD"
-{
- enum machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode;
- rtx mem = gen_rtx_MEM (mode, operands[0]);
-
- emit_insn (gen_aarch64_st<VSTRUCT:nregs><VDC:mode>_dreg (mem, operands[1]));
- DONE;
-})
-
-(define_expand "aarch64_st<VSTRUCT:nregs><VQ:mode>"
- [(match_operand:DI 0 "register_operand" "r")
- (match_operand:VSTRUCT 1 "register_operand" "w")
- (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
- "TARGET_SIMD"
-{
- enum machine_mode mode = <VSTRUCT:MODE>mode;
- rtx mem = gen_rtx_MEM (mode, operands[0]);
-
- emit_insn (gen_vec_store_lanes<VSTRUCT:mode><VQ:mode> (mem, operands[1]));
- DONE;
-})
-
-;; Expander for builtins to insert vector registers into large
-;; opaque integer modes.
-
-;; Q-register list. We don't need a D-reg inserter as we zero
-;; extend them in arm_neon.h and insert the resulting Q-regs.
-
-(define_expand "aarch64_set_qreg<VSTRUCT:mode><VQ:mode>"
- [(match_operand:VSTRUCT 0 "register_operand" "+w")
- (match_operand:VSTRUCT 1 "register_operand" "0")
- (match_operand:VQ 2 "register_operand" "w")
- (match_operand:SI 3 "immediate_operand" "i")]
- "TARGET_SIMD"
-{
- int part = INTVAL (operands[3]);
- int offset = part * 16;
-
- emit_move_insn (operands[0], operands[1]);
- emit_move_insn (gen_rtx_SUBREG (<VQ:MODE>mode, operands[0], offset),
- operands[2]);
- DONE;
-})
-
-;; Standard pattern name vec_init<mode>.
-
-(define_expand "vec_init<mode>"
- [(match_operand:VALL 0 "register_operand" "")
- (match_operand 1 "" "")]
- "TARGET_SIMD"
-{
- aarch64_expand_vector_init (operands[0], operands[1]);
- DONE;
-})
-
-(define_insn "*aarch64_simd_ld1r<mode>"
- [(set (match_operand:VALLDI 0 "register_operand" "=w")
- (vec_duplicate:VALLDI
- (match_operand:<VEL> 1 "aarch64_simd_struct_operand" "Utv")))]
- "TARGET_SIMD"
- "ld1r\\t{%0.<Vtype>}, %1"
- [(set_attr "simd_type" "simd_load1r")
- (set_attr "simd_mode" "<MODE>")])
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64-tune.md b/gcc-4.8.1/gcc/config/aarch64/aarch64-tune.md
deleted file mode 100644
index 02699e35c..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64-tune.md
+++ /dev/null
@@ -1,5 +0,0 @@
-;; -*- buffer-read-only: t -*-
-;; Generated automatically by gentune.sh from aarch64-cores.def
-(define_attr "tune"
- "cortexa53,cortexa57,large,small"
- (const (symbol_ref "((enum attr_tune) aarch64_tune)")))
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64.c b/gcc-4.8.1/gcc/config/aarch64/aarch64.c
deleted file mode 100644
index 4166782b8..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64.c
+++ /dev/null
@@ -1,8007 +0,0 @@
-/* Machine description for AArch64 architecture.
- Copyright (C) 2009-2013 Free Software Foundation, Inc.
- Contributed by ARM Ltd.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3, or (at your option)
- any later version.
-
- GCC is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>. */
-
-#include "config.h"
-#include "system.h"
-#include "coretypes.h"
-#include "tm.h"
-#include "insn-codes.h"
-#include "rtl.h"
-#include "insn-attr.h"
-#include "tree.h"
-#include "regs.h"
-#include "df.h"
-#include "hard-reg-set.h"
-#include "output.h"
-#include "expr.h"
-#include "reload.h"
-#include "toplev.h"
-#include "target.h"
-#include "target-def.h"
-#include "targhooks.h"
-#include "ggc.h"
-#include "function.h"
-#include "tm_p.h"
-#include "recog.h"
-#include "langhooks.h"
-#include "diagnostic-core.h"
-#include "gimple.h"
-#include "optabs.h"
-#include "dwarf2.h"
-
-/* Classifies an address.
-
- ADDRESS_REG_IMM
- A simple base register plus immediate offset.
-
- ADDRESS_REG_WB
- A base register indexed by immediate offset with writeback.
-
- ADDRESS_REG_REG
- A base register indexed by (optionally scaled) register.
-
- ADDRESS_REG_UXTW
- A base register indexed by (optionally scaled) zero-extended register.
-
- ADDRESS_REG_SXTW
- A base register indexed by (optionally scaled) sign-extended register.
-
- ADDRESS_LO_SUM
- A LO_SUM rtx with a base register and "LO12" symbol relocation.
-
- ADDRESS_SYMBOLIC:
- A constant symbolic address, in pc-relative literal pool. */
-
-enum aarch64_address_type {
- ADDRESS_REG_IMM,
- ADDRESS_REG_WB,
- ADDRESS_REG_REG,
- ADDRESS_REG_UXTW,
- ADDRESS_REG_SXTW,
- ADDRESS_LO_SUM,
- ADDRESS_SYMBOLIC
-};
-
-struct aarch64_address_info {
- enum aarch64_address_type type;
- rtx base;
- rtx offset;
- int shift;
- enum aarch64_symbol_type symbol_type;
-};
-
-/* The current code model. */
-enum aarch64_code_model aarch64_cmodel;
-
-#ifdef HAVE_AS_TLS
-#undef TARGET_HAVE_TLS
-#define TARGET_HAVE_TLS 1
-#endif
-
-static bool aarch64_composite_type_p (const_tree, enum machine_mode);
-static bool aarch64_vfp_is_call_or_return_candidate (enum machine_mode,
- const_tree,
- enum machine_mode *, int *,
- bool *);
-static void aarch64_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED;
-static void aarch64_elf_asm_destructor (rtx, int) ATTRIBUTE_UNUSED;
-static void aarch64_override_options_after_change (void);
-static int aarch64_simd_valid_immediate (rtx, enum machine_mode, int, rtx *,
- int *, unsigned char *, int *, int *);
-static bool aarch64_vector_mode_supported_p (enum machine_mode);
-static unsigned bit_count (unsigned HOST_WIDE_INT);
-static bool aarch64_const_vec_all_same_int_p (rtx,
- HOST_WIDE_INT, HOST_WIDE_INT);
-
-static bool aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
- const unsigned char *sel);
-
-/* The processor for which instructions should be scheduled. */
-enum aarch64_processor aarch64_tune = generic;
-
-/* The current tuning set. */
-const struct tune_params *aarch64_tune_params;
-
-/* Mask to specify which instructions we are allowed to generate. */
-unsigned long aarch64_isa_flags = 0;
-
-/* Mask to specify which instruction scheduling options should be used. */
-unsigned long aarch64_tune_flags = 0;
-
-/* Tuning parameters. */
-
-#if HAVE_DESIGNATED_INITIALIZERS
-#define NAMED_PARAM(NAME, VAL) .NAME = (VAL)
-#else
-#define NAMED_PARAM(NAME, VAL) (VAL)
-#endif
-
-#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
-__extension__
-#endif
-static const struct cpu_rtx_cost_table generic_rtx_cost_table =
-{
- NAMED_PARAM (memory_load, COSTS_N_INSNS (1)),
- NAMED_PARAM (memory_store, COSTS_N_INSNS (0)),
- NAMED_PARAM (register_shift, COSTS_N_INSNS (1)),
- NAMED_PARAM (int_divide, COSTS_N_INSNS (6)),
- NAMED_PARAM (float_divide, COSTS_N_INSNS (2)),
- NAMED_PARAM (double_divide, COSTS_N_INSNS (6)),
- NAMED_PARAM (int_multiply, COSTS_N_INSNS (1)),
- NAMED_PARAM (int_multiply_extend, COSTS_N_INSNS (1)),
- NAMED_PARAM (int_multiply_add, COSTS_N_INSNS (1)),
- NAMED_PARAM (int_multiply_extend_add, COSTS_N_INSNS (1)),
- NAMED_PARAM (float_multiply, COSTS_N_INSNS (0)),
- NAMED_PARAM (double_multiply, COSTS_N_INSNS (1))
-};
-
-#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
-__extension__
-#endif
-static const struct cpu_addrcost_table generic_addrcost_table =
-{
- NAMED_PARAM (pre_modify, 0),
- NAMED_PARAM (post_modify, 0),
- NAMED_PARAM (register_offset, 0),
- NAMED_PARAM (register_extend, 0),
- NAMED_PARAM (imm_offset, 0)
-};
-
-#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
-__extension__
-#endif
-static const struct cpu_regmove_cost generic_regmove_cost =
-{
- NAMED_PARAM (GP2GP, 1),
- NAMED_PARAM (GP2FP, 2),
- NAMED_PARAM (FP2GP, 2),
- /* We currently do not provide direct support for TFmode Q->Q move.
- Therefore we need to raise the cost above 2 in order to have
- reload handle the situation. */
- NAMED_PARAM (FP2FP, 4)
-};
-
-#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007
-__extension__
-#endif
-static const struct tune_params generic_tunings =
-{
- &generic_rtx_cost_table,
- &generic_addrcost_table,
- &generic_regmove_cost,
- NAMED_PARAM (memmov_cost, 4)
-};
-
-/* A processor implementing AArch64. */
-struct processor
-{
- const char *const name;
- enum aarch64_processor core;
- const char *arch;
- const unsigned long flags;
- const struct tune_params *const tune;
-};
-
-/* Processor cores implementing AArch64. */
-static const struct processor all_cores[] =
-{
-#define AARCH64_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
- {NAME, IDENT, #ARCH, FLAGS | AARCH64_FL_FOR_ARCH##ARCH, &COSTS##_tunings},
-#include "aarch64-cores.def"
-#undef AARCH64_CORE
- {"generic", generic, "8", AARCH64_FL_FPSIMD | AARCH64_FL_FOR_ARCH8, &generic_tunings},
- {NULL, aarch64_none, NULL, 0, NULL}
-};
-
-/* Architectures implementing AArch64. */
-static const struct processor all_architectures[] =
-{
-#define AARCH64_ARCH(NAME, CORE, ARCH, FLAGS) \
- {NAME, CORE, #ARCH, FLAGS, NULL},
-#include "aarch64-arches.def"
-#undef AARCH64_ARCH
- {"generic", generic, "8", AARCH64_FL_FOR_ARCH8, NULL},
- {NULL, aarch64_none, NULL, 0, NULL}
-};
-
-/* Target specification. These are populated as commandline arguments
- are processed, or NULL if not specified. */
-static const struct processor *selected_arch;
-static const struct processor *selected_cpu;
-static const struct processor *selected_tune;
-
-#define AARCH64_CPU_DEFAULT_FLAGS ((selected_cpu) ? selected_cpu->flags : 0)
-
-/* An ISA extension in the co-processor and main instruction set space. */
-struct aarch64_option_extension
-{
- const char *const name;
- const unsigned long flags_on;
- const unsigned long flags_off;
-};
-
-/* ISA extensions in AArch64. */
-static const struct aarch64_option_extension all_extensions[] =
-{
-#define AARCH64_OPT_EXTENSION(NAME, FLAGS_ON, FLAGS_OFF) \
- {NAME, FLAGS_ON, FLAGS_OFF},
-#include "aarch64-option-extensions.def"
-#undef AARCH64_OPT_EXTENSION
- {NULL, 0, 0}
-};
-
-/* Used to track the size of an address when generating a pre/post
- increment address. */
-static enum machine_mode aarch64_memory_reference_mode;
-
-/* Used to force GTY into this file. */
-static GTY(()) int gty_dummy;
-
-/* A table of valid AArch64 "bitmask immediate" values for
- logical instructions. */
-
-#define AARCH64_NUM_BITMASKS 5334
-static unsigned HOST_WIDE_INT aarch64_bitmasks[AARCH64_NUM_BITMASKS];
-
-/* Did we set flag_omit_frame_pointer just so
- aarch64_frame_pointer_required would be called? */
-static bool faked_omit_frame_pointer;
-
-typedef enum aarch64_cond_code
-{
- AARCH64_EQ = 0, AARCH64_NE, AARCH64_CS, AARCH64_CC, AARCH64_MI, AARCH64_PL,
- AARCH64_VS, AARCH64_VC, AARCH64_HI, AARCH64_LS, AARCH64_GE, AARCH64_LT,
- AARCH64_GT, AARCH64_LE, AARCH64_AL, AARCH64_NV
-}
-aarch64_cc;
-
-#define AARCH64_INVERSE_CONDITION_CODE(X) ((aarch64_cc) (((int) X) ^ 1))
-
-/* The condition codes of the processor, and the inverse function. */
-static const char * const aarch64_condition_codes[] =
-{
- "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
- "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
-};
-
-/* Provide a mapping from gcc register numbers to dwarf register numbers. */
-unsigned
-aarch64_dbx_register_number (unsigned regno)
-{
- if (GP_REGNUM_P (regno))
- return AARCH64_DWARF_R0 + regno - R0_REGNUM;
- else if (regno == SP_REGNUM)
- return AARCH64_DWARF_SP;
- else if (FP_REGNUM_P (regno))
- return AARCH64_DWARF_V0 + regno - V0_REGNUM;
-
- /* Return values >= DWARF_FRAME_REGISTERS indicate that there is no
- equivalent DWARF register. */
- return DWARF_FRAME_REGISTERS;
-}
-
-/* Return TRUE if MODE is any of the large INT modes. */
-static bool
-aarch64_vect_struct_mode_p (enum machine_mode mode)
-{
- return mode == OImode || mode == CImode || mode == XImode;
-}
-
-/* Return TRUE if MODE is any of the vector modes. */
-static bool
-aarch64_vector_mode_p (enum machine_mode mode)
-{
- return aarch64_vector_mode_supported_p (mode)
- || aarch64_vect_struct_mode_p (mode);
-}
-
-/* Implement target hook TARGET_ARRAY_MODE_SUPPORTED_P. */
-static bool
-aarch64_array_mode_supported_p (enum machine_mode mode,
- unsigned HOST_WIDE_INT nelems)
-{
- if (TARGET_SIMD
- && AARCH64_VALID_SIMD_QREG_MODE (mode)
- && (nelems >= 2 && nelems <= 4))
- return true;
-
- return false;
-}
-
-/* Implement HARD_REGNO_NREGS. */
-
-int
-aarch64_hard_regno_nregs (unsigned regno, enum machine_mode mode)
-{
- switch (aarch64_regno_regclass (regno))
- {
- case FP_REGS:
- case FP_LO_REGS:
- return (GET_MODE_SIZE (mode) + UNITS_PER_VREG - 1) / UNITS_PER_VREG;
- default:
- return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
- }
- gcc_unreachable ();
-}
-
-/* Implement HARD_REGNO_MODE_OK. */
-
-int
-aarch64_hard_regno_mode_ok (unsigned regno, enum machine_mode mode)
-{
- if (GET_MODE_CLASS (mode) == MODE_CC)
- return regno == CC_REGNUM;
-
- if (regno == SP_REGNUM || regno == FRAME_POINTER_REGNUM
- || regno == ARG_POINTER_REGNUM)
- return mode == Pmode;
-
- if (GP_REGNUM_P (regno) && ! aarch64_vect_struct_mode_p (mode))
- return 1;
-
- if (FP_REGNUM_P (regno))
- {
- if (aarch64_vect_struct_mode_p (mode))
- return
- (regno + aarch64_hard_regno_nregs (regno, mode) - 1) <= V31_REGNUM;
- else
- return 1;
- }
-
- return 0;
-}
-
-/* Return true if calls to DECL should be treated as
- long-calls (ie called via a register). */
-static bool
-aarch64_decl_is_long_call_p (const_tree decl ATTRIBUTE_UNUSED)
-{
- return false;
-}
-
-/* Return true if calls to symbol-ref SYM should be treated as
- long-calls (ie called via a register). */
-bool
-aarch64_is_long_call_p (rtx sym)
-{
- return aarch64_decl_is_long_call_p (SYMBOL_REF_DECL (sym));
-}
-
-/* Return true if the offsets to a zero/sign-extract operation
- represent an expression that matches an extend operation. The
- operands represent the paramters from
-
- (extract (mult (reg) (mult_imm)) (extract_imm) (const_int 0)). */
-bool
-aarch64_is_extend_from_extract (enum machine_mode mode, rtx mult_imm,
- rtx extract_imm)
-{
- HOST_WIDE_INT mult_val, extract_val;
-
- if (! CONST_INT_P (mult_imm) || ! CONST_INT_P (extract_imm))
- return false;
-
- mult_val = INTVAL (mult_imm);
- extract_val = INTVAL (extract_imm);
-
- if (extract_val > 8
- && extract_val < GET_MODE_BITSIZE (mode)
- && exact_log2 (extract_val & ~7) > 0
- && (extract_val & 7) <= 4
- && mult_val == (1 << (extract_val & 7)))
- return true;
-
- return false;
-}
-
-/* Emit an insn that's a simple single-set. Both the operands must be
- known to be valid. */
-inline static rtx
-emit_set_insn (rtx x, rtx y)
-{
- return emit_insn (gen_rtx_SET (VOIDmode, x, y));
-}
-
-/* X and Y are two things to compare using CODE. Emit the compare insn and
- return the rtx for register 0 in the proper mode. */
-rtx
-aarch64_gen_compare_reg (RTX_CODE code, rtx x, rtx y)
-{
- enum machine_mode mode = SELECT_CC_MODE (code, x, y);
- rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
-
- emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
- return cc_reg;
-}
-
-/* Build the SYMBOL_REF for __tls_get_addr. */
-
-static GTY(()) rtx tls_get_addr_libfunc;
-
-rtx
-aarch64_tls_get_addr (void)
-{
- if (!tls_get_addr_libfunc)
- tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
- return tls_get_addr_libfunc;
-}
-
-/* Return the TLS model to use for ADDR. */
-
-static enum tls_model
-tls_symbolic_operand_type (rtx addr)
-{
- enum tls_model tls_kind = TLS_MODEL_NONE;
- rtx sym, addend;
-
- if (GET_CODE (addr) == CONST)
- {
- split_const (addr, &sym, &addend);
- if (GET_CODE (sym) == SYMBOL_REF)
- tls_kind = SYMBOL_REF_TLS_MODEL (sym);
- }
- else if (GET_CODE (addr) == SYMBOL_REF)
- tls_kind = SYMBOL_REF_TLS_MODEL (addr);
-
- return tls_kind;
-}
-
-/* We'll allow lo_sum's in addresses in our legitimate addresses
- so that combine would take care of combining addresses where
- necessary, but for generation purposes, we'll generate the address
- as :
- RTL Absolute
- tmp = hi (symbol_ref); adrp x1, foo
- dest = lo_sum (tmp, symbol_ref); add dest, x1, :lo_12:foo
- nop
-
- PIC TLS
- adrp x1, :got:foo adrp tmp, :tlsgd:foo
- ldr x1, [:got_lo12:foo] add dest, tmp, :tlsgd_lo12:foo
- bl __tls_get_addr
- nop
-
- Load TLS symbol, depending on TLS mechanism and TLS access model.
-
- Global Dynamic - Traditional TLS:
- adrp tmp, :tlsgd:imm
- add dest, tmp, #:tlsgd_lo12:imm
- bl __tls_get_addr
-
- Global Dynamic - TLS Descriptors:
- adrp dest, :tlsdesc:imm
- ldr tmp, [dest, #:tlsdesc_lo12:imm]
- add dest, dest, #:tlsdesc_lo12:imm
- blr tmp
- mrs tp, tpidr_el0
- add dest, dest, tp
-
- Initial Exec:
- mrs tp, tpidr_el0
- adrp tmp, :gottprel:imm
- ldr dest, [tmp, #:gottprel_lo12:imm]
- add dest, dest, tp
-
- Local Exec:
- mrs tp, tpidr_el0
- add t0, tp, #:tprel_hi12:imm
- add t0, #:tprel_lo12_nc:imm
-*/
-
-static void
-aarch64_load_symref_appropriately (rtx dest, rtx imm,
- enum aarch64_symbol_type type)
-{
- switch (type)
- {
- case SYMBOL_SMALL_ABSOLUTE:
- {
- rtx tmp_reg = dest;
- if (can_create_pseudo_p ())
- {
- tmp_reg = gen_reg_rtx (Pmode);
- }
-
- emit_move_insn (tmp_reg, gen_rtx_HIGH (Pmode, imm));
- emit_insn (gen_add_losym (dest, tmp_reg, imm));
- return;
- }
-
- case SYMBOL_SMALL_GOT:
- {
- rtx tmp_reg = dest;
- if (can_create_pseudo_p ())
- {
- tmp_reg = gen_reg_rtx (Pmode);
- }
- emit_move_insn (tmp_reg, gen_rtx_HIGH (Pmode, imm));
- emit_insn (gen_ldr_got_small (dest, tmp_reg, imm));
- return;
- }
-
- case SYMBOL_SMALL_TLSGD:
- {
- rtx insns;
- rtx result = gen_rtx_REG (Pmode, R0_REGNUM);
-
- start_sequence ();
- emit_call_insn (gen_tlsgd_small (result, imm));
- insns = get_insns ();
- end_sequence ();
-
- RTL_CONST_CALL_P (insns) = 1;
- emit_libcall_block (insns, dest, result, imm);
- return;
- }
-
- case SYMBOL_SMALL_TLSDESC:
- {
- rtx x0 = gen_rtx_REG (Pmode, R0_REGNUM);
- rtx tp;
-
- emit_insn (gen_tlsdesc_small (imm));
- tp = aarch64_load_tp (NULL);
- emit_insn (gen_rtx_SET (Pmode, dest, gen_rtx_PLUS (Pmode, tp, x0)));
- set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
- return;
- }
-
- case SYMBOL_SMALL_GOTTPREL:
- {
- rtx tmp_reg = gen_reg_rtx (Pmode);
- rtx tp = aarch64_load_tp (NULL);
- emit_insn (gen_tlsie_small (tmp_reg, imm));
- emit_insn (gen_rtx_SET (Pmode, dest, gen_rtx_PLUS (Pmode, tp, tmp_reg)));
- set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
- return;
- }
-
- case SYMBOL_SMALL_TPREL:
- {
- rtx tp = aarch64_load_tp (NULL);
- emit_insn (gen_tlsle_small (dest, tp, imm));
- set_unique_reg_note (get_last_insn (), REG_EQUIV, imm);
- return;
- }
-
- default:
- gcc_unreachable ();
- }
-}
-
-/* Emit a move from SRC to DEST. Assume that the move expanders can
- handle all moves if !can_create_pseudo_p (). The distinction is
- important because, unlike emit_move_insn, the move expanders know
- how to force Pmode objects into the constant pool even when the
- constant pool address is not itself legitimate. */
-static rtx
-aarch64_emit_move (rtx dest, rtx src)
-{
- return (can_create_pseudo_p ()
- ? emit_move_insn (dest, src)
- : emit_move_insn_1 (dest, src));
-}
-
-void
-aarch64_split_128bit_move (rtx dst, rtx src)
-{
- rtx low_dst;
-
- gcc_assert (GET_MODE (dst) == TImode);
-
- if (REG_P (dst) && REG_P (src))
- {
- int src_regno = REGNO (src);
- int dst_regno = REGNO (dst);
-
- gcc_assert (GET_MODE (src) == TImode);
-
- /* Handle r -> w, w -> r. */
- if (FP_REGNUM_P (dst_regno) && GP_REGNUM_P (src_regno))
- {
- emit_insn (gen_aarch64_movtilow_di (dst,
- gen_lowpart (word_mode, src)));
- emit_insn (gen_aarch64_movtihigh_di (dst,
- gen_highpart (word_mode, src)));
- return;
- }
- else if (GP_REGNUM_P (dst_regno) && FP_REGNUM_P (src_regno))
- {
- emit_insn (gen_aarch64_movdi_tilow (gen_lowpart (word_mode, dst),
- src));
- emit_insn (gen_aarch64_movdi_tihigh (gen_highpart (word_mode, dst),
- src));
- return;
- }
- /* Fall through to r -> r cases. */
- }
-
- low_dst = gen_lowpart (word_mode, dst);
- if (REG_P (low_dst)
- && reg_overlap_mentioned_p (low_dst, src))
- {
- aarch64_emit_move (gen_highpart (word_mode, dst),
- gen_highpart_mode (word_mode, TImode, src));
- aarch64_emit_move (low_dst, gen_lowpart (word_mode, src));
- }
- else
- {
- aarch64_emit_move (low_dst, gen_lowpart (word_mode, src));
- aarch64_emit_move (gen_highpart (word_mode, dst),
- gen_highpart_mode (word_mode, TImode, src));
- }
-}
-
-bool
-aarch64_split_128bit_move_p (rtx dst, rtx src)
-{
- return (! REG_P (src)
- || ! (FP_REGNUM_P (REGNO (dst)) && FP_REGNUM_P (REGNO (src))));
-}
-
-static rtx
-aarch64_force_temporary (rtx x, rtx value)
-{
- if (can_create_pseudo_p ())
- return force_reg (Pmode, value);
- else
- {
- x = aarch64_emit_move (x, value);
- return x;
- }
-}
-
-
-static rtx
-aarch64_add_offset (enum machine_mode mode, rtx temp, rtx reg, HOST_WIDE_INT offset)
-{
- if (!aarch64_plus_immediate (GEN_INT (offset), DImode))
- {
- rtx high;
- /* Load the full offset into a register. This
- might be improvable in the future. */
- high = GEN_INT (offset);
- offset = 0;
- high = aarch64_force_temporary (temp, high);
- reg = aarch64_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
- }
- return plus_constant (mode, reg, offset);
-}
-
-void
-aarch64_expand_mov_immediate (rtx dest, rtx imm)
-{
- enum machine_mode mode = GET_MODE (dest);
- unsigned HOST_WIDE_INT mask;
- int i;
- bool first;
- unsigned HOST_WIDE_INT val;
- bool subtargets;
- rtx subtarget;
- int one_match, zero_match;
-
- gcc_assert (mode == SImode || mode == DImode);
-
- /* Check on what type of symbol it is. */
- if (GET_CODE (imm) == SYMBOL_REF
- || GET_CODE (imm) == LABEL_REF
- || GET_CODE (imm) == CONST)
- {
- rtx mem, base, offset;
- enum aarch64_symbol_type sty;
-
- /* If we have (const (plus symbol offset)), separate out the offset
- before we start classifying the symbol. */
- split_const (imm, &base, &offset);
-
- sty = aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR);
- switch (sty)
- {
- case SYMBOL_FORCE_TO_MEM:
- if (offset != const0_rtx
- && targetm.cannot_force_const_mem (mode, imm))
- {
- gcc_assert(can_create_pseudo_p ());
- base = aarch64_force_temporary (dest, base);
- base = aarch64_add_offset (mode, NULL, base, INTVAL (offset));
- aarch64_emit_move (dest, base);
- return;
- }
- mem = force_const_mem (mode, imm);
- gcc_assert (mem);
- emit_insn (gen_rtx_SET (VOIDmode, dest, mem));
- return;
-
- case SYMBOL_SMALL_TLSGD:
- case SYMBOL_SMALL_TLSDESC:
- case SYMBOL_SMALL_GOTTPREL:
- case SYMBOL_SMALL_GOT:
- if (offset != const0_rtx)
- {
- gcc_assert(can_create_pseudo_p ());
- base = aarch64_force_temporary (dest, base);
- base = aarch64_add_offset (mode, NULL, base, INTVAL (offset));
- aarch64_emit_move (dest, base);
- return;
- }
- /* FALLTHRU */
-
- case SYMBOL_SMALL_TPREL:
- case SYMBOL_SMALL_ABSOLUTE:
- aarch64_load_symref_appropriately (dest, imm, sty);
- return;
-
- default:
- gcc_unreachable ();
- }
- }
-
- if (CONST_INT_P (imm) && aarch64_move_imm (INTVAL (imm), mode))
- {
- emit_insn (gen_rtx_SET (VOIDmode, dest, imm));
- return;
- }
-
- if (!CONST_INT_P (imm))
- {
- if (GET_CODE (imm) == HIGH)
- emit_insn (gen_rtx_SET (VOIDmode, dest, imm));
- else
- {
- rtx mem = force_const_mem (mode, imm);
- gcc_assert (mem);
- emit_insn (gen_rtx_SET (VOIDmode, dest, mem));
- }
-
- return;
- }
-
- if (mode == SImode)
- {
- /* We know we can't do this in 1 insn, and we must be able to do it
- in two; so don't mess around looking for sequences that don't buy
- us anything. */
- emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (INTVAL (imm) & 0xffff)));
- emit_insn (gen_insv_immsi (dest, GEN_INT (16),
- GEN_INT ((INTVAL (imm) >> 16) & 0xffff)));
- return;
- }
-
- /* Remaining cases are all for DImode. */
-
- val = INTVAL (imm);
- subtargets = optimize && can_create_pseudo_p ();
-
- one_match = 0;
- zero_match = 0;
- mask = 0xffff;
-
- for (i = 0; i < 64; i += 16, mask <<= 16)
- {
- if ((val & mask) == 0)
- zero_match++;
- else if ((val & mask) == mask)
- one_match++;
- }
-
- if (one_match == 2)
- {
- mask = 0xffff;
- for (i = 0; i < 64; i += 16, mask <<= 16)
- {
- if ((val & mask) != mask)
- {
- emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (val | mask)));
- emit_insn (gen_insv_immdi (dest, GEN_INT (i),
- GEN_INT ((val >> i) & 0xffff)));
- return;
- }
- }
- gcc_unreachable ();
- }
-
- if (zero_match == 2)
- goto simple_sequence;
-
- mask = 0x0ffff0000UL;
- for (i = 16; i < 64; i += 16, mask <<= 16)
- {
- HOST_WIDE_INT comp = mask & ~(mask - 1);
-
- if (aarch64_uimm12_shift (val - (val & mask)))
- {
- subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
-
- emit_insn (gen_rtx_SET (VOIDmode, subtarget, GEN_INT (val & mask)));
- emit_insn (gen_adddi3 (dest, subtarget,
- GEN_INT (val - (val & mask))));
- return;
- }
- else if (aarch64_uimm12_shift (-(val - ((val + comp) & mask))))
- {
- subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
-
- emit_insn (gen_rtx_SET (VOIDmode, subtarget,
- GEN_INT ((val + comp) & mask)));
- emit_insn (gen_adddi3 (dest, subtarget,
- GEN_INT (val - ((val + comp) & mask))));
- return;
- }
- else if (aarch64_uimm12_shift (val - ((val - comp) | ~mask)))
- {
- subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
-
- emit_insn (gen_rtx_SET (VOIDmode, subtarget,
- GEN_INT ((val - comp) | ~mask)));
- emit_insn (gen_adddi3 (dest, subtarget,
- GEN_INT (val - ((val - comp) | ~mask))));
- return;
- }
- else if (aarch64_uimm12_shift (-(val - (val | ~mask))))
- {
- subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
-
- emit_insn (gen_rtx_SET (VOIDmode, subtarget,
- GEN_INT (val | ~mask)));
- emit_insn (gen_adddi3 (dest, subtarget,
- GEN_INT (val - (val | ~mask))));
- return;
- }
- }
-
- /* See if we can do it by arithmetically combining two
- immediates. */
- for (i = 0; i < AARCH64_NUM_BITMASKS; i++)
- {
- int j;
- mask = 0xffff;
-
- if (aarch64_uimm12_shift (val - aarch64_bitmasks[i])
- || aarch64_uimm12_shift (-val + aarch64_bitmasks[i]))
- {
- subtarget = subtargets ? gen_reg_rtx (DImode) : dest;
- emit_insn (gen_rtx_SET (VOIDmode, subtarget,
- GEN_INT (aarch64_bitmasks[i])));
- emit_insn (gen_adddi3 (dest, subtarget,
- GEN_INT (val - aarch64_bitmasks[i])));
- return;
- }
-
- for (j = 0; j < 64; j += 16, mask <<= 16)
- {
- if ((aarch64_bitmasks[i] & ~mask) == (val & ~mask))
- {
- emit_insn (gen_rtx_SET (VOIDmode, dest,
- GEN_INT (aarch64_bitmasks[i])));
- emit_insn (gen_insv_immdi (dest, GEN_INT (j),
- GEN_INT ((val >> j) & 0xffff)));
- return;
- }
- }
- }
-
- /* See if we can do it by logically combining two immediates. */
- for (i = 0; i < AARCH64_NUM_BITMASKS; i++)
- {
- if ((aarch64_bitmasks[i] & val) == aarch64_bitmasks[i])
- {
- int j;
-
- for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++)
- if (val == (aarch64_bitmasks[i] | aarch64_bitmasks[j]))
- {
- subtarget = subtargets ? gen_reg_rtx (mode) : dest;
- emit_insn (gen_rtx_SET (VOIDmode, subtarget,
- GEN_INT (aarch64_bitmasks[i])));
- emit_insn (gen_iordi3 (dest, subtarget,
- GEN_INT (aarch64_bitmasks[j])));
- return;
- }
- }
- else if ((val & aarch64_bitmasks[i]) == val)
- {
- int j;
-
- for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++)
- if (val == (aarch64_bitmasks[j] & aarch64_bitmasks[i]))
- {
-
- subtarget = subtargets ? gen_reg_rtx (mode) : dest;
- emit_insn (gen_rtx_SET (VOIDmode, subtarget,
- GEN_INT (aarch64_bitmasks[j])));
- emit_insn (gen_anddi3 (dest, subtarget,
- GEN_INT (aarch64_bitmasks[i])));
- return;
- }
- }
- }
-
- simple_sequence:
- first = true;
- mask = 0xffff;
- for (i = 0; i < 64; i += 16, mask <<= 16)
- {
- if ((val & mask) != 0)
- {
- if (first)
- {
- emit_insn (gen_rtx_SET (VOIDmode, dest,
- GEN_INT (val & mask)));
- first = false;
- }
- else
- emit_insn (gen_insv_immdi (dest, GEN_INT (i),
- GEN_INT ((val >> i) & 0xffff)));
- }
- }
-}
-
-static bool
-aarch64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
-{
- /* Indirect calls are not currently supported. */
- if (decl == NULL)
- return false;
-
- /* Cannot tail-call to long-calls, since these are outside of the
- range of a branch instruction (we could handle this if we added
- support for indirect tail-calls. */
- if (aarch64_decl_is_long_call_p (decl))
- return false;
-
- return true;
-}
-
-/* Implement TARGET_PASS_BY_REFERENCE. */
-
-static bool
-aarch64_pass_by_reference (cumulative_args_t pcum ATTRIBUTE_UNUSED,
- enum machine_mode mode,
- const_tree type,
- bool named ATTRIBUTE_UNUSED)
-{
- HOST_WIDE_INT size;
- enum machine_mode dummymode;
- int nregs;
-
- /* GET_MODE_SIZE (BLKmode) is useless since it is 0. */
- size = (mode == BLKmode && type)
- ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
-
- if (type)
- {
- /* Arrays always passed by reference. */
- if (TREE_CODE (type) == ARRAY_TYPE)
- return true;
- /* Other aggregates based on their size. */
- if (AGGREGATE_TYPE_P (type))
- size = int_size_in_bytes (type);
- }
-
- /* Variable sized arguments are always returned by reference. */
- if (size < 0)
- return true;
-
- /* Can this be a candidate to be passed in fp/simd register(s)? */
- if (aarch64_vfp_is_call_or_return_candidate (mode, type,
- &dummymode, &nregs,
- NULL))
- return false;
-
- /* Arguments which are variable sized or larger than 2 registers are
- passed by reference unless they are a homogenous floating point
- aggregate. */
- return size > 2 * UNITS_PER_WORD;
-}
-
-/* Return TRUE if VALTYPE is padded to its least significant bits. */
-static bool
-aarch64_return_in_msb (const_tree valtype)
-{
- enum machine_mode dummy_mode;
- int dummy_int;
-
- /* Never happens in little-endian mode. */
- if (!BYTES_BIG_ENDIAN)
- return false;
-
- /* Only composite types smaller than or equal to 16 bytes can
- be potentially returned in registers. */
- if (!aarch64_composite_type_p (valtype, TYPE_MODE (valtype))
- || int_size_in_bytes (valtype) <= 0
- || int_size_in_bytes (valtype) > 16)
- return false;
-
- /* But not a composite that is an HFA (Homogeneous Floating-point Aggregate)
- or an HVA (Homogeneous Short-Vector Aggregate); such a special composite
- is always passed/returned in the least significant bits of fp/simd
- register(s). */
- if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (valtype), valtype,
- &dummy_mode, &dummy_int, NULL))
- return false;
-
- return true;
-}
-
-/* Implement TARGET_FUNCTION_VALUE.
- Define how to find the value returned by a function. */
-
-static rtx
-aarch64_function_value (const_tree type, const_tree func,
- bool outgoing ATTRIBUTE_UNUSED)
-{
- enum machine_mode mode;
- int unsignedp;
- int count;
- enum machine_mode ag_mode;
-
- mode = TYPE_MODE (type);
- if (INTEGRAL_TYPE_P (type))
- mode = promote_function_mode (type, mode, &unsignedp, func, 1);
-
- if (aarch64_return_in_msb (type))
- {
- HOST_WIDE_INT size = int_size_in_bytes (type);
-
- if (size % UNITS_PER_WORD != 0)
- {
- size += UNITS_PER_WORD - size % UNITS_PER_WORD;
- mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
- }
- }
-
- if (aarch64_vfp_is_call_or_return_candidate (mode, type,
- &ag_mode, &count, NULL))
- {
- if (!aarch64_composite_type_p (type, mode))
- {
- gcc_assert (count == 1 && mode == ag_mode);
- return gen_rtx_REG (mode, V0_REGNUM);
- }
- else
- {
- int i;
- rtx par;
-
- par = gen_rtx_PARALLEL (mode, rtvec_alloc (count));
- for (i = 0; i < count; i++)
- {
- rtx tmp = gen_rtx_REG (ag_mode, V0_REGNUM + i);
- tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
- GEN_INT (i * GET_MODE_SIZE (ag_mode)));
- XVECEXP (par, 0, i) = tmp;
- }
- return par;
- }
- }
- else
- return gen_rtx_REG (mode, R0_REGNUM);
-}
-
-/* Implements TARGET_FUNCTION_VALUE_REGNO_P.
- Return true if REGNO is the number of a hard register in which the values
- of called function may come back. */
-
-static bool
-aarch64_function_value_regno_p (const unsigned int regno)
-{
- /* Maximum of 16 bytes can be returned in the general registers. Examples
- of 16-byte return values are: 128-bit integers and 16-byte small
- structures (excluding homogeneous floating-point aggregates). */
- if (regno == R0_REGNUM || regno == R1_REGNUM)
- return true;
-
- /* Up to four fp/simd registers can return a function value, e.g. a
- homogeneous floating-point aggregate having four members. */
- if (regno >= V0_REGNUM && regno < V0_REGNUM + HA_MAX_NUM_FLDS)
- return !TARGET_GENERAL_REGS_ONLY;
-
- return false;
-}
-
-/* Implement TARGET_RETURN_IN_MEMORY.
-
- If the type T of the result of a function is such that
- void func (T arg)
- would require that arg be passed as a value in a register (or set of
- registers) according to the parameter passing rules, then the result
- is returned in the same registers as would be used for such an
- argument. */
-
-static bool
-aarch64_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
-{
- HOST_WIDE_INT size;
- enum machine_mode ag_mode;
- int count;
-
- if (!AGGREGATE_TYPE_P (type)
- && TREE_CODE (type) != COMPLEX_TYPE
- && TREE_CODE (type) != VECTOR_TYPE)
- /* Simple scalar types always returned in registers. */
- return false;
-
- if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (type),
- type,
- &ag_mode,
- &count,
- NULL))
- return false;
-
- /* Types larger than 2 registers returned in memory. */
- size = int_size_in_bytes (type);
- return (size < 0 || size > 2 * UNITS_PER_WORD);
-}
-
-static bool
-aarch64_vfp_is_call_candidate (cumulative_args_t pcum_v, enum machine_mode mode,
- const_tree type, int *nregs)
-{
- CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
- return aarch64_vfp_is_call_or_return_candidate (mode,
- type,
- &pcum->aapcs_vfp_rmode,
- nregs,
- NULL);
-}
-
-/* Given MODE and TYPE of a function argument, return the alignment in
- bits. The idea is to suppress any stronger alignment requested by
- the user and opt for the natural alignment (specified in AAPCS64 \S 4.1).
- This is a helper function for local use only. */
-
-static unsigned int
-aarch64_function_arg_alignment (enum machine_mode mode, const_tree type)
-{
- unsigned int alignment;
-
- if (type)
- {
- if (!integer_zerop (TYPE_SIZE (type)))
- {
- if (TYPE_MODE (type) == mode)
- alignment = TYPE_ALIGN (type);
- else
- alignment = GET_MODE_ALIGNMENT (mode);
- }
- else
- alignment = 0;
- }
- else
- alignment = GET_MODE_ALIGNMENT (mode);
-
- return alignment;
-}
-
-/* Layout a function argument according to the AAPCS64 rules. The rule
- numbers refer to the rule numbers in the AAPCS64. */
-
-static void
-aarch64_layout_arg (cumulative_args_t pcum_v, enum machine_mode mode,
- const_tree type,
- bool named ATTRIBUTE_UNUSED)
-{
- CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
- int ncrn, nvrn, nregs;
- bool allocate_ncrn, allocate_nvrn;
-
- /* We need to do this once per argument. */
- if (pcum->aapcs_arg_processed)
- return;
-
- pcum->aapcs_arg_processed = true;
-
- allocate_ncrn = (type) ? !(FLOAT_TYPE_P (type)) : !FLOAT_MODE_P (mode);
- allocate_nvrn = aarch64_vfp_is_call_candidate (pcum_v,
- mode,
- type,
- &nregs);
-
- /* allocate_ncrn may be false-positive, but allocate_nvrn is quite reliable.
- The following code thus handles passing by SIMD/FP registers first. */
-
- nvrn = pcum->aapcs_nvrn;
-
- /* C1 - C5 for floating point, homogenous floating point aggregates (HFA)
- and homogenous short-vector aggregates (HVA). */
- if (allocate_nvrn)
- {
- if (nvrn + nregs <= NUM_FP_ARG_REGS)
- {
- pcum->aapcs_nextnvrn = nvrn + nregs;
- if (!aarch64_composite_type_p (type, mode))
- {
- gcc_assert (nregs == 1);
- pcum->aapcs_reg = gen_rtx_REG (mode, V0_REGNUM + nvrn);
- }
- else
- {
- rtx par;
- int i;
- par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
- for (i = 0; i < nregs; i++)
- {
- rtx tmp = gen_rtx_REG (pcum->aapcs_vfp_rmode,
- V0_REGNUM + nvrn + i);
- tmp = gen_rtx_EXPR_LIST
- (VOIDmode, tmp,
- GEN_INT (i * GET_MODE_SIZE (pcum->aapcs_vfp_rmode)));
- XVECEXP (par, 0, i) = tmp;
- }
- pcum->aapcs_reg = par;
- }
- return;
- }
- else
- {
- /* C.3 NSRN is set to 8. */
- pcum->aapcs_nextnvrn = NUM_FP_ARG_REGS;
- goto on_stack;
- }
- }
-
- ncrn = pcum->aapcs_ncrn;
- nregs = ((type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode))
- + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
-
-
- /* C6 - C9. though the sign and zero extension semantics are
- handled elsewhere. This is the case where the argument fits
- entirely general registers. */
- if (allocate_ncrn && (ncrn + nregs <= NUM_ARG_REGS))
- {
- unsigned int alignment = aarch64_function_arg_alignment (mode, type);
-
- gcc_assert (nregs == 0 || nregs == 1 || nregs == 2);
-
- /* C.8 if the argument has an alignment of 16 then the NGRN is
- rounded up to the next even number. */
- if (nregs == 2 && alignment == 16 * BITS_PER_UNIT && ncrn % 2)
- {
- ++ncrn;
- gcc_assert (ncrn + nregs <= NUM_ARG_REGS);
- }
- /* NREGS can be 0 when e.g. an empty structure is to be passed.
- A reg is still generated for it, but the caller should be smart
- enough not to use it. */
- if (nregs == 0 || nregs == 1 || GET_MODE_CLASS (mode) == MODE_INT)
- {
- pcum->aapcs_reg = gen_rtx_REG (mode, R0_REGNUM + ncrn);
- }
- else
- {
- rtx par;
- int i;
-
- par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
- for (i = 0; i < nregs; i++)
- {
- rtx tmp = gen_rtx_REG (word_mode, R0_REGNUM + ncrn + i);
- tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
- GEN_INT (i * UNITS_PER_WORD));
- XVECEXP (par, 0, i) = tmp;
- }
- pcum->aapcs_reg = par;
- }
-
- pcum->aapcs_nextncrn = ncrn + nregs;
- return;
- }
-
- /* C.11 */
- pcum->aapcs_nextncrn = NUM_ARG_REGS;
-
- /* The argument is passed on stack; record the needed number of words for
- this argument (we can re-use NREGS) and align the total size if
- necessary. */
-on_stack:
- pcum->aapcs_stack_words = nregs;
- if (aarch64_function_arg_alignment (mode, type) == 16 * BITS_PER_UNIT)
- pcum->aapcs_stack_size = AARCH64_ROUND_UP (pcum->aapcs_stack_size,
- 16 / UNITS_PER_WORD) + 1;
- return;
-}
-
-/* Implement TARGET_FUNCTION_ARG. */
-
-static rtx
-aarch64_function_arg (cumulative_args_t pcum_v, enum machine_mode mode,
- const_tree type, bool named)
-{
- CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
- gcc_assert (pcum->pcs_variant == ARM_PCS_AAPCS64);
-
- if (mode == VOIDmode)
- return NULL_RTX;
-
- aarch64_layout_arg (pcum_v, mode, type, named);
- return pcum->aapcs_reg;
-}
-
-void
-aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
- const_tree fntype ATTRIBUTE_UNUSED,
- rtx libname ATTRIBUTE_UNUSED,
- const_tree fndecl ATTRIBUTE_UNUSED,
- unsigned n_named ATTRIBUTE_UNUSED)
-{
- pcum->aapcs_ncrn = 0;
- pcum->aapcs_nvrn = 0;
- pcum->aapcs_nextncrn = 0;
- pcum->aapcs_nextnvrn = 0;
- pcum->pcs_variant = ARM_PCS_AAPCS64;
- pcum->aapcs_reg = NULL_RTX;
- pcum->aapcs_arg_processed = false;
- pcum->aapcs_stack_words = 0;
- pcum->aapcs_stack_size = 0;
-
- return;
-}
-
-static void
-aarch64_function_arg_advance (cumulative_args_t pcum_v,
- enum machine_mode mode,
- const_tree type,
- bool named)
-{
- CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
- if (pcum->pcs_variant == ARM_PCS_AAPCS64)
- {
- aarch64_layout_arg (pcum_v, mode, type, named);
- gcc_assert ((pcum->aapcs_reg != NULL_RTX)
- != (pcum->aapcs_stack_words != 0));
- pcum->aapcs_arg_processed = false;
- pcum->aapcs_ncrn = pcum->aapcs_nextncrn;
- pcum->aapcs_nvrn = pcum->aapcs_nextnvrn;
- pcum->aapcs_stack_size += pcum->aapcs_stack_words;
- pcum->aapcs_stack_words = 0;
- pcum->aapcs_reg = NULL_RTX;
- }
-}
-
-bool
-aarch64_function_arg_regno_p (unsigned regno)
-{
- return ((GP_REGNUM_P (regno) && regno < R0_REGNUM + NUM_ARG_REGS)
- || (FP_REGNUM_P (regno) && regno < V0_REGNUM + NUM_FP_ARG_REGS));
-}
-
-/* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
- PARM_BOUNDARY bits of alignment, but will be given anything up
- to STACK_BOUNDARY bits if the type requires it. This makes sure
- that both before and after the layout of each argument, the Next
- Stacked Argument Address (NSAA) will have a minimum alignment of
- 8 bytes. */
-
-static unsigned int
-aarch64_function_arg_boundary (enum machine_mode mode, const_tree type)
-{
- unsigned int alignment = aarch64_function_arg_alignment (mode, type);
-
- if (alignment < PARM_BOUNDARY)
- alignment = PARM_BOUNDARY;
- if (alignment > STACK_BOUNDARY)
- alignment = STACK_BOUNDARY;
- return alignment;
-}
-
-/* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
-
- Return true if an argument passed on the stack should be padded upwards,
- i.e. if the least-significant byte of the stack slot has useful data.
-
- Small aggregate types are placed in the lowest memory address.
-
- The related parameter passing rules are B.4, C.3, C.5 and C.14. */
-
-bool
-aarch64_pad_arg_upward (enum machine_mode mode, const_tree type)
-{
- /* On little-endian targets, the least significant byte of every stack
- argument is passed at the lowest byte address of the stack slot. */
- if (!BYTES_BIG_ENDIAN)
- return true;
-
- /* Otherwise, integral types and floating point types are padded downward:
- the least significant byte of a stack argument is passed at the highest
- byte address of the stack slot. */
- if (type
- ? (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type))
- : (SCALAR_INT_MODE_P (mode) || SCALAR_FLOAT_MODE_P (mode)))
- return false;
-
- /* Everything else padded upward, i.e. data in first byte of stack slot. */
- return true;
-}
-
-/* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
-
- It specifies padding for the last (may also be the only)
- element of a block move between registers and memory. If
- assuming the block is in the memory, padding upward means that
- the last element is padded after its highest significant byte,
- while in downward padding, the last element is padded at the
- its least significant byte side.
-
- Small aggregates and small complex types are always padded
- upwards.
-
- We don't need to worry about homogeneous floating-point or
- short-vector aggregates; their move is not affected by the
- padding direction determined here. Regardless of endianness,
- each element of such an aggregate is put in the least
- significant bits of a fp/simd register.
-
- Return !BYTES_BIG_ENDIAN if the least significant byte of the
- register has useful data, and return the opposite if the most
- significant byte does. */
-
-bool
-aarch64_pad_reg_upward (enum machine_mode mode, const_tree type,
- bool first ATTRIBUTE_UNUSED)
-{
-
- /* Small composite types are always padded upward. */
- if (BYTES_BIG_ENDIAN && aarch64_composite_type_p (type, mode))
- {
- HOST_WIDE_INT size = (type ? int_size_in_bytes (type)
- : GET_MODE_SIZE (mode));
- if (size < 2 * UNITS_PER_WORD)
- return true;
- }
-
- /* Otherwise, use the default padding. */
- return !BYTES_BIG_ENDIAN;
-}
-
-static enum machine_mode
-aarch64_libgcc_cmp_return_mode (void)
-{
- return SImode;
-}
-
-static bool
-aarch64_frame_pointer_required (void)
-{
- /* If the function contains dynamic stack allocations, we need to
- use the frame pointer to access the static parts of the frame. */
- if (cfun->calls_alloca)
- return true;
-
- /* We may have turned flag_omit_frame_pointer on in order to have this
- function called; if we did, we also set the 'faked_omit_frame_pointer' flag
- and we'll check it here.
- If we really did set flag_omit_frame_pointer normally, then we return false
- (no frame pointer required) in all cases. */
-
- if (flag_omit_frame_pointer && !faked_omit_frame_pointer)
- return false;
- else if (flag_omit_leaf_frame_pointer)
- return !crtl->is_leaf;
- return true;
-}
-
-/* Mark the registers that need to be saved by the callee and calculate
- the size of the callee-saved registers area and frame record (both FP
- and LR may be omitted). */
-static void
-aarch64_layout_frame (void)
-{
- HOST_WIDE_INT offset = 0;
- int regno;
-
- if (reload_completed && cfun->machine->frame.laid_out)
- return;
-
- cfun->machine->frame.fp_lr_offset = 0;
-
- /* First mark all the registers that really need to be saved... */
- for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
- cfun->machine->frame.reg_offset[regno] = -1;
-
- for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
- cfun->machine->frame.reg_offset[regno] = -1;
-
- /* ... that includes the eh data registers (if needed)... */
- if (crtl->calls_eh_return)
- for (regno = 0; EH_RETURN_DATA_REGNO (regno) != INVALID_REGNUM; regno++)
- cfun->machine->frame.reg_offset[EH_RETURN_DATA_REGNO (regno)] = 0;
-
- /* ... and any callee saved register that dataflow says is live. */
- for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
- if (df_regs_ever_live_p (regno)
- && !call_used_regs[regno])
- cfun->machine->frame.reg_offset[regno] = 0;
-
- for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
- if (df_regs_ever_live_p (regno)
- && !call_used_regs[regno])
- cfun->machine->frame.reg_offset[regno] = 0;
-
- if (frame_pointer_needed)
- {
- cfun->machine->frame.reg_offset[R30_REGNUM] = 0;
- cfun->machine->frame.reg_offset[R29_REGNUM] = 0;
- cfun->machine->frame.hardfp_offset = 2 * UNITS_PER_WORD;
- }
-
- /* Now assign stack slots for them. */
- for (regno = R0_REGNUM; regno <= R28_REGNUM; regno++)
- if (cfun->machine->frame.reg_offset[regno] != -1)
- {
- cfun->machine->frame.reg_offset[regno] = offset;
- offset += UNITS_PER_WORD;
- }
-
- for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
- if (cfun->machine->frame.reg_offset[regno] != -1)
- {
- cfun->machine->frame.reg_offset[regno] = offset;
- offset += UNITS_PER_WORD;
- }
-
- if (frame_pointer_needed)
- {
- cfun->machine->frame.reg_offset[R29_REGNUM] = offset;
- offset += UNITS_PER_WORD;
- cfun->machine->frame.fp_lr_offset = UNITS_PER_WORD;
- }
-
- if (cfun->machine->frame.reg_offset[R30_REGNUM] != -1)
- {
- cfun->machine->frame.reg_offset[R30_REGNUM] = offset;
- offset += UNITS_PER_WORD;
- cfun->machine->frame.fp_lr_offset += UNITS_PER_WORD;
- }
-
- cfun->machine->frame.padding0 =
- (AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT) - offset);
- offset = AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT);
-
- cfun->machine->frame.saved_regs_size = offset;
- cfun->machine->frame.laid_out = true;
-}
-
-/* Make the last instruction frame-related and note that it performs
- the operation described by FRAME_PATTERN. */
-
-static void
-aarch64_set_frame_expr (rtx frame_pattern)
-{
- rtx insn;
-
- insn = get_last_insn ();
- RTX_FRAME_RELATED_P (insn) = 1;
- RTX_FRAME_RELATED_P (frame_pattern) = 1;
- REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- frame_pattern,
- REG_NOTES (insn));
-}
-
-static bool
-aarch64_register_saved_on_entry (int regno)
-{
- return cfun->machine->frame.reg_offset[regno] != -1;
-}
-
-
-static void
-aarch64_save_or_restore_fprs (int start_offset, int increment,
- bool restore, rtx base_rtx)
-
-{
- unsigned regno;
- unsigned regno2;
- rtx insn;
- rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
-
-
- for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
- {
- if (aarch64_register_saved_on_entry (regno))
- {
- rtx mem;
- mem = gen_mem_ref (DFmode,
- plus_constant (Pmode,
- base_rtx,
- start_offset));
-
- for (regno2 = regno + 1;
- regno2 <= V31_REGNUM
- && !aarch64_register_saved_on_entry (regno2);
- regno2++)
- {
- /* Empty loop. */
- }
- if (regno2 <= V31_REGNUM &&
- aarch64_register_saved_on_entry (regno2))
- {
- rtx mem2;
- /* Next highest register to be saved. */
- mem2 = gen_mem_ref (DFmode,
- plus_constant
- (Pmode,
- base_rtx,
- start_offset + increment));
- if (restore == false)
- {
- insn = emit_insn
- ( gen_store_pairdf (mem, gen_rtx_REG (DFmode, regno),
- mem2, gen_rtx_REG (DFmode, regno2)));
-
- }
- else
- {
- insn = emit_insn
- ( gen_load_pairdf (gen_rtx_REG (DFmode, regno), mem,
- gen_rtx_REG (DFmode, regno2), mem2));
-
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DFmode, regno));
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DFmode, regno2));
- }
-
- /* The first part of a frame-related parallel insn
- is always assumed to be relevant to the frame
- calculations; subsequent parts, are only
- frame-related if explicitly marked. */
- RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0,
- 1)) = 1;
- regno = regno2;
- start_offset += increment * 2;
- }
- else
- {
- if (restore == false)
- insn = emit_move_insn (mem, gen_rtx_REG (DFmode, regno));
- else
- {
- insn = emit_move_insn (gen_rtx_REG (DFmode, regno), mem);
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
- }
- start_offset += increment;
- }
- RTX_FRAME_RELATED_P (insn) = 1;
- }
- }
-
-}
-
-
-/* offset from the stack pointer of where the saves and
- restore's have to happen. */
-static void
-aarch64_save_or_restore_callee_save_registers (HOST_WIDE_INT offset,
- bool restore)
-{
- rtx insn;
- rtx base_rtx = stack_pointer_rtx;
- HOST_WIDE_INT start_offset = offset;
- HOST_WIDE_INT increment = UNITS_PER_WORD;
- rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM;
- unsigned limit = (frame_pointer_needed)? R28_REGNUM: R30_REGNUM;
- unsigned regno;
- unsigned regno2;
-
- for (regno = R0_REGNUM; regno <= limit; regno++)
- {
- if (aarch64_register_saved_on_entry (regno))
- {
- rtx mem;
- mem = gen_mem_ref (Pmode,
- plus_constant (Pmode,
- base_rtx,
- start_offset));
-
- for (regno2 = regno + 1;
- regno2 <= limit
- && !aarch64_register_saved_on_entry (regno2);
- regno2++)
- {
- /* Empty loop. */
- }
- if (regno2 <= limit &&
- aarch64_register_saved_on_entry (regno2))
- {
- rtx mem2;
- /* Next highest register to be saved. */
- mem2 = gen_mem_ref (Pmode,
- plus_constant
- (Pmode,
- base_rtx,
- start_offset + increment));
- if (restore == false)
- {
- insn = emit_insn
- ( gen_store_pairdi (mem, gen_rtx_REG (DImode, regno),
- mem2, gen_rtx_REG (DImode, regno2)));
-
- }
- else
- {
- insn = emit_insn
- ( gen_load_pairdi (gen_rtx_REG (DImode, regno), mem,
- gen_rtx_REG (DImode, regno2), mem2));
-
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno2));
- }
-
- /* The first part of a frame-related parallel insn
- is always assumed to be relevant to the frame
- calculations; subsequent parts, are only
- frame-related if explicitly marked. */
- RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0,
- 1)) = 1;
- regno = regno2;
- start_offset += increment * 2;
- }
- else
- {
- if (restore == false)
- insn = emit_move_insn (mem, gen_rtx_REG (DImode, regno));
- else
- {
- insn = emit_move_insn (gen_rtx_REG (DImode, regno), mem);
- add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno));
- }
- start_offset += increment;
- }
- RTX_FRAME_RELATED_P (insn) = 1;
- }
- }
-
- aarch64_save_or_restore_fprs (start_offset, increment, restore, base_rtx);
-
-}
-
-/* AArch64 stack frames generated by this compiler look like:
-
- +-------------------------------+
- | |
- | incoming stack arguments |
- | |
- +-------------------------------+ <-- arg_pointer_rtx
- | |
- | callee-allocated save area |
- | for register varargs |
- | |
- +-------------------------------+
- | |
- | local variables |
- | |
- +-------------------------------+ <-- frame_pointer_rtx
- | |
- | callee-saved registers |
- | |
- +-------------------------------+
- | LR' |
- +-------------------------------+
- | FP' |
- P +-------------------------------+ <-- hard_frame_pointer_rtx
- | dynamic allocation |
- +-------------------------------+
- | |
- | outgoing stack arguments |
- | |
- +-------------------------------+ <-- stack_pointer_rtx
-
- Dynamic stack allocations such as alloca insert data at point P.
- They decrease stack_pointer_rtx but leave frame_pointer_rtx and
- hard_frame_pointer_rtx unchanged. */
-
-/* Generate the prologue instructions for entry into a function.
- Establish the stack frame by decreasing the stack pointer with a
- properly calculated size and, if necessary, create a frame record
- filled with the values of LR and previous frame pointer. The
- current FP is also set up is it is in use. */
-
-void
-aarch64_expand_prologue (void)
-{
- /* sub sp, sp, #<frame_size>
- stp {fp, lr}, [sp, #<frame_size> - 16]
- add fp, sp, #<frame_size> - hardfp_offset
- stp {cs_reg}, [fp, #-16] etc.
-
- sub sp, sp, <final_adjustment_if_any>
- */
- HOST_WIDE_INT original_frame_size; /* local variables + vararg save */
- HOST_WIDE_INT frame_size, offset;
- HOST_WIDE_INT fp_offset; /* FP offset from SP */
- rtx insn;
-
- aarch64_layout_frame ();
- original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
- gcc_assert ((!cfun->machine->saved_varargs_size || cfun->stdarg)
- && (cfun->stdarg || !cfun->machine->saved_varargs_size));
- frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
- + crtl->outgoing_args_size);
- offset = frame_size = AARCH64_ROUND_UP (frame_size,
- STACK_BOUNDARY / BITS_PER_UNIT);
-
- if (flag_stack_usage_info)
- current_function_static_stack_size = frame_size;
-
- fp_offset = (offset
- - original_frame_size
- - cfun->machine->frame.saved_regs_size);
-
- /* Store pairs and load pairs have a range only -512 to 504. */
- if (offset >= 512)
- {
- /* When the frame has a large size, an initial decrease is done on
- the stack pointer to jump over the callee-allocated save area for
- register varargs, the local variable area and/or the callee-saved
- register area. This will allow the pre-index write-back
- store pair instructions to be used for setting up the stack frame
- efficiently. */
- offset = original_frame_size + cfun->machine->frame.saved_regs_size;
- if (offset >= 512)
- offset = cfun->machine->frame.saved_regs_size;
-
- frame_size -= (offset + crtl->outgoing_args_size);
- fp_offset = 0;
-
- if (frame_size >= 0x1000000)
- {
- rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM);
- emit_move_insn (op0, GEN_INT (-frame_size));
- emit_insn (gen_add2_insn (stack_pointer_rtx, op0));
- aarch64_set_frame_expr (gen_rtx_SET
- (Pmode, stack_pointer_rtx,
- gen_rtx_PLUS (Pmode,
- stack_pointer_rtx,
- GEN_INT (-frame_size))));
- }
- else if (frame_size > 0)
- {
- if ((frame_size & 0xfff) != frame_size)
- {
- insn = emit_insn (gen_add2_insn
- (stack_pointer_rtx,
- GEN_INT (-(frame_size
- & ~(HOST_WIDE_INT)0xfff))));
- RTX_FRAME_RELATED_P (insn) = 1;
- }
- if ((frame_size & 0xfff) != 0)
- {
- insn = emit_insn (gen_add2_insn
- (stack_pointer_rtx,
- GEN_INT (-(frame_size
- & (HOST_WIDE_INT)0xfff))));
- RTX_FRAME_RELATED_P (insn) = 1;
- }
- }
- }
- else
- frame_size = -1;
-
- if (offset > 0)
- {
- /* Save the frame pointer and lr if the frame pointer is needed
- first. Make the frame pointer point to the location of the
- old frame pointer on the stack. */
- if (frame_pointer_needed)
- {
- rtx mem_fp, mem_lr;
-
- if (fp_offset)
- {
- insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
- GEN_INT (-offset)));
- RTX_FRAME_RELATED_P (insn) = 1;
- aarch64_set_frame_expr (gen_rtx_SET
- (Pmode, stack_pointer_rtx,
- gen_rtx_MINUS (Pmode,
- stack_pointer_rtx,
- GEN_INT (offset))));
- mem_fp = gen_frame_mem (DImode,
- plus_constant (Pmode,
- stack_pointer_rtx,
- fp_offset));
- mem_lr = gen_frame_mem (DImode,
- plus_constant (Pmode,
- stack_pointer_rtx,
- fp_offset
- + UNITS_PER_WORD));
- insn = emit_insn (gen_store_pairdi (mem_fp,
- hard_frame_pointer_rtx,
- mem_lr,
- gen_rtx_REG (DImode,
- LR_REGNUM)));
- }
- else
- {
- insn = emit_insn (gen_storewb_pairdi_di
- (stack_pointer_rtx, stack_pointer_rtx,
- hard_frame_pointer_rtx,
- gen_rtx_REG (DImode, LR_REGNUM),
- GEN_INT (-offset),
- GEN_INT (GET_MODE_SIZE (DImode) - offset)));
- RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1;
- }
-
- /* The first part of a frame-related parallel insn is always
- assumed to be relevant to the frame calculations;
- subsequent parts, are only frame-related if explicitly
- marked. */
- RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
- RTX_FRAME_RELATED_P (insn) = 1;
-
- /* Set up frame pointer to point to the location of the
- previous frame pointer on the stack. */
- insn = emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
- stack_pointer_rtx,
- GEN_INT (fp_offset)));
- aarch64_set_frame_expr (gen_rtx_SET
- (Pmode, hard_frame_pointer_rtx,
- gen_rtx_PLUS (Pmode,
- stack_pointer_rtx,
- GEN_INT (fp_offset))));
- RTX_FRAME_RELATED_P (insn) = 1;
- insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
- hard_frame_pointer_rtx));
- }
- else
- {
- insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
- GEN_INT (-offset)));
- RTX_FRAME_RELATED_P (insn) = 1;
- }
-
- aarch64_save_or_restore_callee_save_registers
- (fp_offset + cfun->machine->frame.hardfp_offset, 0);
- }
-
- /* when offset >= 512,
- sub sp, sp, #<outgoing_args_size> */
- if (frame_size > -1)
- {
- if (crtl->outgoing_args_size > 0)
- {
- insn = emit_insn (gen_add2_insn
- (stack_pointer_rtx,
- GEN_INT (- crtl->outgoing_args_size)));
- RTX_FRAME_RELATED_P (insn) = 1;
- }
- }
-}
-
-/* Generate the epilogue instructions for returning from a function. */
-void
-aarch64_expand_epilogue (bool for_sibcall)
-{
- HOST_WIDE_INT original_frame_size, frame_size, offset;
- HOST_WIDE_INT fp_offset;
- rtx insn;
- rtx cfa_reg;
-
- aarch64_layout_frame ();
- original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
- frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
- + crtl->outgoing_args_size);
- offset = frame_size = AARCH64_ROUND_UP (frame_size,
- STACK_BOUNDARY / BITS_PER_UNIT);
-
- fp_offset = (offset
- - original_frame_size
- - cfun->machine->frame.saved_regs_size);
-
- cfa_reg = frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx;
-
- /* Store pairs and load pairs have a range only -512 to 504. */
- if (offset >= 512)
- {
- offset = original_frame_size + cfun->machine->frame.saved_regs_size;
- if (offset >= 512)
- offset = cfun->machine->frame.saved_regs_size;
-
- frame_size -= (offset + crtl->outgoing_args_size);
- fp_offset = 0;
- if (!frame_pointer_needed && crtl->outgoing_args_size > 0)
- {
- insn = emit_insn (gen_add2_insn
- (stack_pointer_rtx,
- GEN_INT (crtl->outgoing_args_size)));
- RTX_FRAME_RELATED_P (insn) = 1;
- }
- }
- else
- frame_size = -1;
-
- /* If there were outgoing arguments or we've done dynamic stack
- allocation, then restore the stack pointer from the frame
- pointer. This is at most one insn and more efficient than using
- GCC's internal mechanism. */
- if (frame_pointer_needed
- && (crtl->outgoing_args_size || cfun->calls_alloca))
- {
- insn = emit_insn (gen_add3_insn (stack_pointer_rtx,
- hard_frame_pointer_rtx,
- GEN_INT (- fp_offset)));
- RTX_FRAME_RELATED_P (insn) = 1;
- /* As SP is set to (FP - fp_offset), according to the rules in
- dwarf2cfi.c:dwarf2out_frame_debug_expr, CFA should be calculated
- from the value of SP from now on. */
- cfa_reg = stack_pointer_rtx;
- }
-
- aarch64_save_or_restore_callee_save_registers
- (fp_offset + cfun->machine->frame.hardfp_offset, 1);
-
- /* Restore the frame pointer and lr if the frame pointer is needed. */
- if (offset > 0)
- {
- if (frame_pointer_needed)
- {
- rtx mem_fp, mem_lr;
-
- if (fp_offset)
- {
- mem_fp = gen_frame_mem (DImode,
- plus_constant (Pmode,
- stack_pointer_rtx,
- fp_offset));
- mem_lr = gen_frame_mem (DImode,
- plus_constant (Pmode,
- stack_pointer_rtx,
- fp_offset
- + UNITS_PER_WORD));
- insn = emit_insn (gen_load_pairdi (hard_frame_pointer_rtx,
- mem_fp,
- gen_rtx_REG (DImode,
- LR_REGNUM),
- mem_lr));
- }
- else
- {
- insn = emit_insn (gen_loadwb_pairdi_di
- (stack_pointer_rtx,
- stack_pointer_rtx,
- hard_frame_pointer_rtx,
- gen_rtx_REG (DImode, LR_REGNUM),
- GEN_INT (offset),
- GEN_INT (GET_MODE_SIZE (DImode) + offset)));
- RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1;
- add_reg_note (insn, REG_CFA_ADJUST_CFA,
- (gen_rtx_SET (Pmode, stack_pointer_rtx,
- plus_constant (Pmode, cfa_reg,
- offset))));
- }
-
- /* The first part of a frame-related parallel insn
- is always assumed to be relevant to the frame
- calculations; subsequent parts, are only
- frame-related if explicitly marked. */
- RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
- RTX_FRAME_RELATED_P (insn) = 1;
- add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
- add_reg_note (insn, REG_CFA_RESTORE,
- gen_rtx_REG (DImode, LR_REGNUM));
-
- if (fp_offset)
- {
- insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
- GEN_INT (offset)));
- RTX_FRAME_RELATED_P (insn) = 1;
- }
- }
- else
- {
- insn = emit_insn (gen_add2_insn (stack_pointer_rtx,
- GEN_INT (offset)));
- RTX_FRAME_RELATED_P (insn) = 1;
- }
- }
-
- /* Stack adjustment for exception handler. */
- if (crtl->calls_eh_return)
- {
- /* We need to unwind the stack by the offset computed by
- EH_RETURN_STACKADJ_RTX. However, at this point the CFA is
- based on SP. Ideally we would update the SP and define the
- CFA along the lines of:
-
- SP = SP + EH_RETURN_STACKADJ_RTX
- (regnote CFA = SP - EH_RETURN_STACKADJ_RTX)
-
- However the dwarf emitter only understands a constant
- register offset.
-
- The solution choosen here is to use the otherwise unused IP0
- as a temporary register to hold the current SP value. The
- CFA is described using IP0 then SP is modified. */
-
- rtx ip0 = gen_rtx_REG (DImode, IP0_REGNUM);
-
- insn = emit_move_insn (ip0, stack_pointer_rtx);
- add_reg_note (insn, REG_CFA_DEF_CFA, ip0);
- RTX_FRAME_RELATED_P (insn) = 1;
-
- emit_insn (gen_add2_insn (stack_pointer_rtx, EH_RETURN_STACKADJ_RTX));
-
- /* Ensure the assignment to IP0 does not get optimized away. */
- emit_use (ip0);
- }
-
- if (frame_size > -1)
- {
- if (frame_size >= 0x1000000)
- {
- rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM);
- emit_move_insn (op0, GEN_INT (frame_size));
- emit_insn (gen_add2_insn (stack_pointer_rtx, op0));
- aarch64_set_frame_expr (gen_rtx_SET
- (Pmode, stack_pointer_rtx,
- gen_rtx_PLUS (Pmode,
- stack_pointer_rtx,
- GEN_INT (frame_size))));
- }
- else if (frame_size > 0)
- {
- if ((frame_size & 0xfff) != 0)
- {
- insn = emit_insn (gen_add2_insn
- (stack_pointer_rtx,
- GEN_INT ((frame_size
- & (HOST_WIDE_INT) 0xfff))));
- RTX_FRAME_RELATED_P (insn) = 1;
- }
- if ((frame_size & 0xfff) != frame_size)
- {
- insn = emit_insn (gen_add2_insn
- (stack_pointer_rtx,
- GEN_INT ((frame_size
- & ~ (HOST_WIDE_INT) 0xfff))));
- RTX_FRAME_RELATED_P (insn) = 1;
- }
- }
-
- aarch64_set_frame_expr (gen_rtx_SET (Pmode, stack_pointer_rtx,
- gen_rtx_PLUS (Pmode,
- stack_pointer_rtx,
- GEN_INT (offset))));
- }
-
- emit_use (gen_rtx_REG (DImode, LR_REGNUM));
- if (!for_sibcall)
- emit_jump_insn (ret_rtx);
-}
-
-/* Return the place to copy the exception unwinding return address to.
- This will probably be a stack slot, but could (in theory be the
- return register). */
-rtx
-aarch64_final_eh_return_addr (void)
-{
- HOST_WIDE_INT original_frame_size, frame_size, offset, fp_offset;
- aarch64_layout_frame ();
- original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size;
- frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size
- + crtl->outgoing_args_size);
- offset = frame_size = AARCH64_ROUND_UP (frame_size,
- STACK_BOUNDARY / BITS_PER_UNIT);
- fp_offset = offset
- - original_frame_size
- - cfun->machine->frame.saved_regs_size;
-
- if (cfun->machine->frame.reg_offset[LR_REGNUM] < 0)
- return gen_rtx_REG (DImode, LR_REGNUM);
-
- /* DSE and CSELIB do not detect an alias between sp+k1 and fp+k2. This can
- result in a store to save LR introduced by builtin_eh_return () being
- incorrectly deleted because the alias is not detected.
- So in the calculation of the address to copy the exception unwinding
- return address to, we note 2 cases.
- If FP is needed and the fp_offset is 0, it means that SP = FP and hence
- we return a SP-relative location since all the addresses are SP-relative
- in this case. This prevents the store from being optimized away.
- If the fp_offset is not 0, then the addresses will be FP-relative and
- therefore we return a FP-relative location. */
-
- if (frame_pointer_needed)
- {
- if (fp_offset)
- return gen_frame_mem (DImode,
- plus_constant (Pmode, hard_frame_pointer_rtx, UNITS_PER_WORD));
- else
- return gen_frame_mem (DImode,
- plus_constant (Pmode, stack_pointer_rtx, UNITS_PER_WORD));
- }
-
- /* If FP is not needed, we calculate the location of LR, which would be
- at the top of the saved registers block. */
-
- return gen_frame_mem (DImode,
- plus_constant (Pmode,
- stack_pointer_rtx,
- fp_offset
- + cfun->machine->frame.saved_regs_size
- - 2 * UNITS_PER_WORD));
-}
-
-/* Output code to build up a constant in a register. */
-static void
-aarch64_build_constant (int regnum, HOST_WIDE_INT val)
-{
- if (aarch64_bitmask_imm (val, DImode))
- emit_move_insn (gen_rtx_REG (Pmode, regnum), GEN_INT (val));
- else
- {
- int i;
- int ncount = 0;
- int zcount = 0;
- HOST_WIDE_INT valp = val >> 16;
- HOST_WIDE_INT valm;
- HOST_WIDE_INT tval;
-
- for (i = 16; i < 64; i += 16)
- {
- valm = (valp & 0xffff);
-
- if (valm != 0)
- ++ zcount;
-
- if (valm != 0xffff)
- ++ ncount;
-
- valp >>= 16;
- }
-
- /* zcount contains the number of additional MOVK instructions
- required if the constant is built up with an initial MOVZ instruction,
- while ncount is the number of MOVK instructions required if starting
- with a MOVN instruction. Choose the sequence that yields the fewest
- number of instructions, preferring MOVZ instructions when they are both
- the same. */
- if (ncount < zcount)
- {
- emit_move_insn (gen_rtx_REG (Pmode, regnum),
- GEN_INT ((~val) & 0xffff));
- tval = 0xffff;
- }
- else
- {
- emit_move_insn (gen_rtx_REG (Pmode, regnum),
- GEN_INT (val & 0xffff));
- tval = 0;
- }
-
- val >>= 16;
-
- for (i = 16; i < 64; i += 16)
- {
- if ((val & 0xffff) != tval)
- emit_insn (gen_insv_immdi (gen_rtx_REG (Pmode, regnum),
- GEN_INT (i), GEN_INT (val & 0xffff)));
- val >>= 16;
- }
- }
-}
-
-static void
-aarch64_add_constant (int regnum, int scratchreg, HOST_WIDE_INT delta)
-{
- HOST_WIDE_INT mdelta = delta;
- rtx this_rtx = gen_rtx_REG (Pmode, regnum);
- rtx scratch_rtx = gen_rtx_REG (Pmode, scratchreg);
-
- if (mdelta < 0)
- mdelta = -mdelta;
-
- if (mdelta >= 4096 * 4096)
- {
- aarch64_build_constant (scratchreg, delta);
- emit_insn (gen_add3_insn (this_rtx, this_rtx, scratch_rtx));
- }
- else if (mdelta > 0)
- {
- if (mdelta >= 4096)
- {
- emit_insn (gen_rtx_SET (Pmode, scratch_rtx, GEN_INT (mdelta / 4096)));
- rtx shift = gen_rtx_ASHIFT (Pmode, scratch_rtx, GEN_INT (12));
- if (delta < 0)
- emit_insn (gen_rtx_SET (Pmode, this_rtx,
- gen_rtx_MINUS (Pmode, this_rtx, shift)));
- else
- emit_insn (gen_rtx_SET (Pmode, this_rtx,
- gen_rtx_PLUS (Pmode, this_rtx, shift)));
- }
- if (mdelta % 4096 != 0)
- {
- scratch_rtx = GEN_INT ((delta < 0 ? -1 : 1) * (mdelta % 4096));
- emit_insn (gen_rtx_SET (Pmode, this_rtx,
- gen_rtx_PLUS (Pmode, this_rtx, scratch_rtx)));
- }
- }
-}
-
-/* Output code to add DELTA to the first argument, and then jump
- to FUNCTION. Used for C++ multiple inheritance. */
-static void
-aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
- HOST_WIDE_INT delta,
- HOST_WIDE_INT vcall_offset,
- tree function)
-{
- /* The this pointer is always in x0. Note that this differs from
- Arm where the this pointer maybe bumped to r1 if r0 is required
- to return a pointer to an aggregate. On AArch64 a result value
- pointer will be in x8. */
- int this_regno = R0_REGNUM;
- rtx this_rtx, temp0, temp1, addr, insn, funexp;
-
- reload_completed = 1;
- emit_note (NOTE_INSN_PROLOGUE_END);
-
- if (vcall_offset == 0)
- aarch64_add_constant (this_regno, IP1_REGNUM, delta);
- else
- {
- gcc_assert ((vcall_offset & 0x7) == 0);
-
- this_rtx = gen_rtx_REG (Pmode, this_regno);
- temp0 = gen_rtx_REG (Pmode, IP0_REGNUM);
- temp1 = gen_rtx_REG (Pmode, IP1_REGNUM);
-
- addr = this_rtx;
- if (delta != 0)
- {
- if (delta >= -256 && delta < 256)
- addr = gen_rtx_PRE_MODIFY (Pmode, this_rtx,
- plus_constant (Pmode, this_rtx, delta));
- else
- aarch64_add_constant (this_regno, IP1_REGNUM, delta);
- }
-
- aarch64_emit_move (temp0, gen_rtx_MEM (Pmode, addr));
-
- if (vcall_offset >= -256 && vcall_offset < 32768)
- addr = plus_constant (Pmode, temp0, vcall_offset);
- else
- {
- aarch64_build_constant (IP1_REGNUM, vcall_offset);
- addr = gen_rtx_PLUS (Pmode, temp0, temp1);
- }
-
- aarch64_emit_move (temp1, gen_rtx_MEM (Pmode,addr));
- emit_insn (gen_add2_insn (this_rtx, temp1));
- }
-
- /* Generate a tail call to the target function. */
- if (!TREE_USED (function))
- {
- assemble_external (function);
- TREE_USED (function) = 1;
- }
- funexp = XEXP (DECL_RTL (function), 0);
- funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
- insn = emit_call_insn (gen_sibcall (funexp, const0_rtx, NULL_RTX));
- SIBLING_CALL_P (insn) = 1;
-
- insn = get_insns ();
- shorten_branches (insn);
- final_start_function (insn, file, 1);
- final (insn, file, 1);
- final_end_function ();
-
- /* Stop pretending to be a post-reload pass. */
- reload_completed = 0;
-}
-
-static int
-aarch64_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
-{
- if (GET_CODE (*x) == SYMBOL_REF)
- return SYMBOL_REF_TLS_MODEL (*x) != 0;
-
- /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
- TLS offsets, not real symbol references. */
- if (GET_CODE (*x) == UNSPEC
- && XINT (*x, 1) == UNSPEC_TLS)
- return -1;
-
- return 0;
-}
-
-static bool
-aarch64_tls_referenced_p (rtx x)
-{
- if (!TARGET_HAVE_TLS)
- return false;
-
- return for_each_rtx (&x, aarch64_tls_operand_p_1, NULL);
-}
-
-
-static int
-aarch64_bitmasks_cmp (const void *i1, const void *i2)
-{
- const unsigned HOST_WIDE_INT *imm1 = (const unsigned HOST_WIDE_INT *) i1;
- const unsigned HOST_WIDE_INT *imm2 = (const unsigned HOST_WIDE_INT *) i2;
-
- if (*imm1 < *imm2)
- return -1;
- if (*imm1 > *imm2)
- return +1;
- return 0;
-}
-
-
-static void
-aarch64_build_bitmask_table (void)
-{
- unsigned HOST_WIDE_INT mask, imm;
- unsigned int log_e, e, s, r;
- unsigned int nimms = 0;
-
- for (log_e = 1; log_e <= 6; log_e++)
- {
- e = 1 << log_e;
- if (e == 64)
- mask = ~(HOST_WIDE_INT) 0;
- else
- mask = ((HOST_WIDE_INT) 1 << e) - 1;
- for (s = 1; s < e; s++)
- {
- for (r = 0; r < e; r++)
- {
- /* set s consecutive bits to 1 (s < 64) */
- imm = ((unsigned HOST_WIDE_INT)1 << s) - 1;
- /* rotate right by r */
- if (r != 0)
- imm = ((imm >> r) | (imm << (e - r))) & mask;
- /* replicate the constant depending on SIMD size */
- switch (log_e) {
- case 1: imm |= (imm << 2);
- case 2: imm |= (imm << 4);
- case 3: imm |= (imm << 8);
- case 4: imm |= (imm << 16);
- case 5: imm |= (imm << 32);
- case 6:
- break;
- default:
- gcc_unreachable ();
- }
- gcc_assert (nimms < AARCH64_NUM_BITMASKS);
- aarch64_bitmasks[nimms++] = imm;
- }
- }
- }
-
- gcc_assert (nimms == AARCH64_NUM_BITMASKS);
- qsort (aarch64_bitmasks, nimms, sizeof (aarch64_bitmasks[0]),
- aarch64_bitmasks_cmp);
-}
-
-
-/* Return true if val can be encoded as a 12-bit unsigned immediate with
- a left shift of 0 or 12 bits. */
-bool
-aarch64_uimm12_shift (HOST_WIDE_INT val)
-{
- return ((val & (((HOST_WIDE_INT) 0xfff) << 0)) == val
- || (val & (((HOST_WIDE_INT) 0xfff) << 12)) == val
- );
-}
-
-
-/* Return true if val is an immediate that can be loaded into a
- register by a MOVZ instruction. */
-static bool
-aarch64_movw_imm (HOST_WIDE_INT val, enum machine_mode mode)
-{
- if (GET_MODE_SIZE (mode) > 4)
- {
- if ((val & (((HOST_WIDE_INT) 0xffff) << 32)) == val
- || (val & (((HOST_WIDE_INT) 0xffff) << 48)) == val)
- return 1;
- }
- else
- {
- /* Ignore sign extension. */
- val &= (HOST_WIDE_INT) 0xffffffff;
- }
- return ((val & (((HOST_WIDE_INT) 0xffff) << 0)) == val
- || (val & (((HOST_WIDE_INT) 0xffff) << 16)) == val);
-}
-
-
-/* Return true if val is a valid bitmask immediate. */
-bool
-aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode mode)
-{
- if (GET_MODE_SIZE (mode) < 8)
- {
- /* Replicate bit pattern. */
- val &= (HOST_WIDE_INT) 0xffffffff;
- val |= val << 32;
- }
- return bsearch (&val, aarch64_bitmasks, AARCH64_NUM_BITMASKS,
- sizeof (aarch64_bitmasks[0]), aarch64_bitmasks_cmp) != NULL;
-}
-
-
-/* Return true if val is an immediate that can be loaded into a
- register in a single instruction. */
-bool
-aarch64_move_imm (HOST_WIDE_INT val, enum machine_mode mode)
-{
- if (aarch64_movw_imm (val, mode) || aarch64_movw_imm (~val, mode))
- return 1;
- return aarch64_bitmask_imm (val, mode);
-}
-
-static bool
-aarch64_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
-{
- rtx base, offset;
- if (GET_CODE (x) == HIGH)
- return true;
-
- split_const (x, &base, &offset);
- if (GET_CODE (base) == SYMBOL_REF || GET_CODE (base) == LABEL_REF)
- return (aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR) != SYMBOL_FORCE_TO_MEM);
-
- return aarch64_tls_referenced_p (x);
-}
-
-/* Return true if register REGNO is a valid index register.
- STRICT_P is true if REG_OK_STRICT is in effect. */
-
-bool
-aarch64_regno_ok_for_index_p (int regno, bool strict_p)
-{
- if (!HARD_REGISTER_NUM_P (regno))
- {
- if (!strict_p)
- return true;
-
- if (!reg_renumber)
- return false;
-
- regno = reg_renumber[regno];
- }
- return GP_REGNUM_P (regno);
-}
-
-/* Return true if register REGNO is a valid base register for mode MODE.
- STRICT_P is true if REG_OK_STRICT is in effect. */
-
-bool
-aarch64_regno_ok_for_base_p (int regno, bool strict_p)
-{
- if (!HARD_REGISTER_NUM_P (regno))
- {
- if (!strict_p)
- return true;
-
- if (!reg_renumber)
- return false;
-
- regno = reg_renumber[regno];
- }
-
- /* The fake registers will be eliminated to either the stack or
- hard frame pointer, both of which are usually valid base registers.
- Reload deals with the cases where the eliminated form isn't valid. */
- return (GP_REGNUM_P (regno)
- || regno == SP_REGNUM
- || regno == FRAME_POINTER_REGNUM
- || regno == ARG_POINTER_REGNUM);
-}
-
-/* Return true if X is a valid base register for mode MODE.
- STRICT_P is true if REG_OK_STRICT is in effect. */
-
-static bool
-aarch64_base_register_rtx_p (rtx x, bool strict_p)
-{
- if (!strict_p && GET_CODE (x) == SUBREG)
- x = SUBREG_REG (x);
-
- return (REG_P (x) && aarch64_regno_ok_for_base_p (REGNO (x), strict_p));
-}
-
-/* Return true if address offset is a valid index. If it is, fill in INFO
- appropriately. STRICT_P is true if REG_OK_STRICT is in effect. */
-
-static bool
-aarch64_classify_index (struct aarch64_address_info *info, rtx x,
- enum machine_mode mode, bool strict_p)
-{
- enum aarch64_address_type type;
- rtx index;
- int shift;
-
- /* (reg:P) */
- if ((REG_P (x) || GET_CODE (x) == SUBREG)
- && GET_MODE (x) == Pmode)
- {
- type = ADDRESS_REG_REG;
- index = x;
- shift = 0;
- }
- /* (sign_extend:DI (reg:SI)) */
- else if ((GET_CODE (x) == SIGN_EXTEND
- || GET_CODE (x) == ZERO_EXTEND)
- && GET_MODE (x) == DImode
- && GET_MODE (XEXP (x, 0)) == SImode)
- {
- type = (GET_CODE (x) == SIGN_EXTEND)
- ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
- index = XEXP (x, 0);
- shift = 0;
- }
- /* (mult:DI (sign_extend:DI (reg:SI)) (const_int scale)) */
- else if (GET_CODE (x) == MULT
- && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
- || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
- && GET_MODE (XEXP (x, 0)) == DImode
- && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
- && CONST_INT_P (XEXP (x, 1)))
- {
- type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
- ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
- index = XEXP (XEXP (x, 0), 0);
- shift = exact_log2 (INTVAL (XEXP (x, 1)));
- }
- /* (ashift:DI (sign_extend:DI (reg:SI)) (const_int shift)) */
- else if (GET_CODE (x) == ASHIFT
- && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
- || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
- && GET_MODE (XEXP (x, 0)) == DImode
- && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode
- && CONST_INT_P (XEXP (x, 1)))
- {
- type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
- ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
- index = XEXP (XEXP (x, 0), 0);
- shift = INTVAL (XEXP (x, 1));
- }
- /* (sign_extract:DI (mult:DI (reg:DI) (const_int scale)) 32+shift 0) */
- else if ((GET_CODE (x) == SIGN_EXTRACT
- || GET_CODE (x) == ZERO_EXTRACT)
- && GET_MODE (x) == DImode
- && GET_CODE (XEXP (x, 0)) == MULT
- && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
- && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
- {
- type = (GET_CODE (x) == SIGN_EXTRACT)
- ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
- index = XEXP (XEXP (x, 0), 0);
- shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)));
- if (INTVAL (XEXP (x, 1)) != 32 + shift
- || INTVAL (XEXP (x, 2)) != 0)
- shift = -1;
- }
- /* (and:DI (mult:DI (reg:DI) (const_int scale))
- (const_int 0xffffffff<<shift)) */
- else if (GET_CODE (x) == AND
- && GET_MODE (x) == DImode
- && GET_CODE (XEXP (x, 0)) == MULT
- && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
- && CONST_INT_P (XEXP (XEXP (x, 0), 1))
- && CONST_INT_P (XEXP (x, 1)))
- {
- type = ADDRESS_REG_UXTW;
- index = XEXP (XEXP (x, 0), 0);
- shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)));
- if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift)
- shift = -1;
- }
- /* (sign_extract:DI (ashift:DI (reg:DI) (const_int shift)) 32+shift 0) */
- else if ((GET_CODE (x) == SIGN_EXTRACT
- || GET_CODE (x) == ZERO_EXTRACT)
- && GET_MODE (x) == DImode
- && GET_CODE (XEXP (x, 0)) == ASHIFT
- && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
- && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
- {
- type = (GET_CODE (x) == SIGN_EXTRACT)
- ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW;
- index = XEXP (XEXP (x, 0), 0);
- shift = INTVAL (XEXP (XEXP (x, 0), 1));
- if (INTVAL (XEXP (x, 1)) != 32 + shift
- || INTVAL (XEXP (x, 2)) != 0)
- shift = -1;
- }
- /* (and:DI (ashift:DI (reg:DI) (const_int shift))
- (const_int 0xffffffff<<shift)) */
- else if (GET_CODE (x) == AND
- && GET_MODE (x) == DImode
- && GET_CODE (XEXP (x, 0)) == ASHIFT
- && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode
- && CONST_INT_P (XEXP (XEXP (x, 0), 1))
- && CONST_INT_P (XEXP (x, 1)))
- {
- type = ADDRESS_REG_UXTW;
- index = XEXP (XEXP (x, 0), 0);
- shift = INTVAL (XEXP (XEXP (x, 0), 1));
- if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift)
- shift = -1;
- }
- /* (mult:P (reg:P) (const_int scale)) */
- else if (GET_CODE (x) == MULT
- && GET_MODE (x) == Pmode
- && GET_MODE (XEXP (x, 0)) == Pmode
- && CONST_INT_P (XEXP (x, 1)))
- {
- type = ADDRESS_REG_REG;
- index = XEXP (x, 0);
- shift = exact_log2 (INTVAL (XEXP (x, 1)));
- }
- /* (ashift:P (reg:P) (const_int shift)) */
- else if (GET_CODE (x) == ASHIFT
- && GET_MODE (x) == Pmode
- && GET_MODE (XEXP (x, 0)) == Pmode
- && CONST_INT_P (XEXP (x, 1)))
- {
- type = ADDRESS_REG_REG;
- index = XEXP (x, 0);
- shift = INTVAL (XEXP (x, 1));
- }
- else
- return false;
-
- if (GET_CODE (index) == SUBREG)
- index = SUBREG_REG (index);
-
- if ((shift == 0 ||
- (shift > 0 && shift <= 3
- && (1 << shift) == GET_MODE_SIZE (mode)))
- && REG_P (index)
- && aarch64_regno_ok_for_index_p (REGNO (index), strict_p))
- {
- info->type = type;
- info->offset = index;
- info->shift = shift;
- return true;
- }
-
- return false;
-}
-
-static inline bool
-offset_7bit_signed_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset)
-{
- return (offset >= -64 * GET_MODE_SIZE (mode)
- && offset < 64 * GET_MODE_SIZE (mode)
- && offset % GET_MODE_SIZE (mode) == 0);
-}
-
-static inline bool
-offset_9bit_signed_unscaled_p (enum machine_mode mode ATTRIBUTE_UNUSED,
- HOST_WIDE_INT offset)
-{
- return offset >= -256 && offset < 256;
-}
-
-static inline bool
-offset_12bit_unsigned_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset)
-{
- return (offset >= 0
- && offset < 4096 * GET_MODE_SIZE (mode)
- && offset % GET_MODE_SIZE (mode) == 0);
-}
-
-/* Return true if X is a valid address for machine mode MODE. If it is,
- fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
- effect. OUTER_CODE is PARALLEL for a load/store pair. */
-
-static bool
-aarch64_classify_address (struct aarch64_address_info *info,
- rtx x, enum machine_mode mode,
- RTX_CODE outer_code, bool strict_p)
-{
- enum rtx_code code = GET_CODE (x);
- rtx op0, op1;
- bool allow_reg_index_p =
- outer_code != PARALLEL && GET_MODE_SIZE(mode) != 16;
-
- /* Don't support anything other than POST_INC or REG addressing for
- AdvSIMD. */
- if (aarch64_vector_mode_p (mode)
- && (code != POST_INC && code != REG))
- return false;
-
- switch (code)
- {
- case REG:
- case SUBREG:
- info->type = ADDRESS_REG_IMM;
- info->base = x;
- info->offset = const0_rtx;
- return aarch64_base_register_rtx_p (x, strict_p);
-
- case PLUS:
- op0 = XEXP (x, 0);
- op1 = XEXP (x, 1);
- if (GET_MODE_SIZE (mode) != 0
- && CONST_INT_P (op1)
- && aarch64_base_register_rtx_p (op0, strict_p))
- {
- HOST_WIDE_INT offset = INTVAL (op1);
-
- info->type = ADDRESS_REG_IMM;
- info->base = op0;
- info->offset = op1;
-
- /* TImode and TFmode values are allowed in both pairs of X
- registers and individual Q registers. The available
- address modes are:
- X,X: 7-bit signed scaled offset
- Q: 9-bit signed offset
- We conservatively require an offset representable in either mode.
- */
- if (mode == TImode || mode == TFmode)
- return (offset_7bit_signed_scaled_p (mode, offset)
- && offset_9bit_signed_unscaled_p (mode, offset));
-
- if (outer_code == PARALLEL)
- return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
- && offset_7bit_signed_scaled_p (mode, offset));
- else
- return (offset_9bit_signed_unscaled_p (mode, offset)
- || offset_12bit_unsigned_scaled_p (mode, offset));
- }
-
- if (allow_reg_index_p)
- {
- /* Look for base + (scaled/extended) index register. */
- if (aarch64_base_register_rtx_p (op0, strict_p)
- && aarch64_classify_index (info, op1, mode, strict_p))
- {
- info->base = op0;
- return true;
- }
- if (aarch64_base_register_rtx_p (op1, strict_p)
- && aarch64_classify_index (info, op0, mode, strict_p))
- {
- info->base = op1;
- return true;
- }
- }
-
- return false;
-
- case POST_INC:
- case POST_DEC:
- case PRE_INC:
- case PRE_DEC:
- info->type = ADDRESS_REG_WB;
- info->base = XEXP (x, 0);
- info->offset = NULL_RTX;
- return aarch64_base_register_rtx_p (info->base, strict_p);
-
- case POST_MODIFY:
- case PRE_MODIFY:
- info->type = ADDRESS_REG_WB;
- info->base = XEXP (x, 0);
- if (GET_CODE (XEXP (x, 1)) == PLUS
- && CONST_INT_P (XEXP (XEXP (x, 1), 1))
- && rtx_equal_p (XEXP (XEXP (x, 1), 0), info->base)
- && aarch64_base_register_rtx_p (info->base, strict_p))
- {
- HOST_WIDE_INT offset;
- info->offset = XEXP (XEXP (x, 1), 1);
- offset = INTVAL (info->offset);
-
- /* TImode and TFmode values are allowed in both pairs of X
- registers and individual Q registers. The available
- address modes are:
- X,X: 7-bit signed scaled offset
- Q: 9-bit signed offset
- We conservatively require an offset representable in either mode.
- */
- if (mode == TImode || mode == TFmode)
- return (offset_7bit_signed_scaled_p (mode, offset)
- && offset_9bit_signed_unscaled_p (mode, offset));
-
- if (outer_code == PARALLEL)
- return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
- && offset_7bit_signed_scaled_p (mode, offset));
- else
- return offset_9bit_signed_unscaled_p (mode, offset);
- }
- return false;
-
- case CONST:
- case SYMBOL_REF:
- case LABEL_REF:
- /* load literal: pc-relative constant pool entry. Only supported
- for SI mode or larger. */
- info->type = ADDRESS_SYMBOLIC;
- if (outer_code != PARALLEL && GET_MODE_SIZE (mode) >= 4)
- {
- rtx sym, addend;
-
- split_const (x, &sym, &addend);
- return (GET_CODE (sym) == LABEL_REF
- || (GET_CODE (sym) == SYMBOL_REF
- && CONSTANT_POOL_ADDRESS_P (sym)));
- }
- return false;
-
- case LO_SUM:
- info->type = ADDRESS_LO_SUM;
- info->base = XEXP (x, 0);
- info->offset = XEXP (x, 1);
- if (allow_reg_index_p
- && aarch64_base_register_rtx_p (info->base, strict_p))
- {
- rtx sym, offs;
- split_const (info->offset, &sym, &offs);
- if (GET_CODE (sym) == SYMBOL_REF
- && (aarch64_classify_symbol (sym, SYMBOL_CONTEXT_MEM)
- == SYMBOL_SMALL_ABSOLUTE))
- {
- /* The symbol and offset must be aligned to the access size. */
- unsigned int align;
- unsigned int ref_size;
-
- if (CONSTANT_POOL_ADDRESS_P (sym))
- align = GET_MODE_ALIGNMENT (get_pool_mode (sym));
- else if (TREE_CONSTANT_POOL_ADDRESS_P (sym))
- {
- tree exp = SYMBOL_REF_DECL (sym);
- align = TYPE_ALIGN (TREE_TYPE (exp));
- align = CONSTANT_ALIGNMENT (exp, align);
- }
- else if (SYMBOL_REF_DECL (sym))
- align = DECL_ALIGN (SYMBOL_REF_DECL (sym));
- else
- align = BITS_PER_UNIT;
-
- ref_size = GET_MODE_SIZE (mode);
- if (ref_size == 0)
- ref_size = GET_MODE_SIZE (DImode);
-
- return ((INTVAL (offs) & (ref_size - 1)) == 0
- && ((align / BITS_PER_UNIT) & (ref_size - 1)) == 0);
- }
- }
- return false;
-
- default:
- return false;
- }
-}
-
-bool
-aarch64_symbolic_address_p (rtx x)
-{
- rtx offset;
-
- split_const (x, &x, &offset);
- return GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF;
-}
-
-/* Classify the base of symbolic expression X, given that X appears in
- context CONTEXT. */
-static enum aarch64_symbol_type
-aarch64_classify_symbolic_expression (rtx x, enum aarch64_symbol_context context)
-{
- rtx offset;
- split_const (x, &x, &offset);
- return aarch64_classify_symbol (x, context);
-}
-
-
-/* Return TRUE if X is a legitimate address for accessing memory in
- mode MODE. */
-static bool
-aarch64_legitimate_address_hook_p (enum machine_mode mode, rtx x, bool strict_p)
-{
- struct aarch64_address_info addr;
-
- return aarch64_classify_address (&addr, x, mode, MEM, strict_p);
-}
-
-/* Return TRUE if X is a legitimate address for accessing memory in
- mode MODE. OUTER_CODE will be PARALLEL if this is a load/store
- pair operation. */
-bool
-aarch64_legitimate_address_p (enum machine_mode mode, rtx x,
- RTX_CODE outer_code, bool strict_p)
-{
- struct aarch64_address_info addr;
-
- return aarch64_classify_address (&addr, x, mode, outer_code, strict_p);
-}
-
-/* Return TRUE if rtx X is immediate constant 0.0 */
-bool
-aarch64_float_const_zero_rtx_p (rtx x)
-{
- REAL_VALUE_TYPE r;
-
- if (GET_MODE (x) == VOIDmode)
- return false;
-
- REAL_VALUE_FROM_CONST_DOUBLE (r, x);
- if (REAL_VALUE_MINUS_ZERO (r))
- return !HONOR_SIGNED_ZEROS (GET_MODE (x));
- return REAL_VALUES_EQUAL (r, dconst0);
-}
-
-/* Return the fixed registers used for condition codes. */
-
-static bool
-aarch64_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
-{
- *p1 = CC_REGNUM;
- *p2 = INVALID_REGNUM;
- return true;
-}
-
-enum machine_mode
-aarch64_select_cc_mode (RTX_CODE code, rtx x, rtx y)
-{
- /* All floating point compares return CCFP if it is an equality
- comparison, and CCFPE otherwise. */
- if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
- {
- switch (code)
- {
- case EQ:
- case NE:
- case UNORDERED:
- case ORDERED:
- case UNLT:
- case UNLE:
- case UNGT:
- case UNGE:
- case UNEQ:
- case LTGT:
- return CCFPmode;
-
- case LT:
- case LE:
- case GT:
- case GE:
- return CCFPEmode;
-
- default:
- gcc_unreachable ();
- }
- }
-
- if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
- && y == const0_rtx
- && (code == EQ || code == NE || code == LT || code == GE)
- && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS || GET_CODE (x) == AND))
- return CC_NZmode;
-
- /* A compare with a shifted operand. Because of canonicalization,
- the comparison will have to be swapped when we emit the assembly
- code. */
- if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
- && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG)
- && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
- || GET_CODE (x) == LSHIFTRT
- || GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND))
- return CC_SWPmode;
-
- /* A compare of a mode narrower than SI mode against zero can be done
- by extending the value in the comparison. */
- if ((GET_MODE (x) == QImode || GET_MODE (x) == HImode)
- && y == const0_rtx)
- /* Only use sign-extension if we really need it. */
- return ((code == GT || code == GE || code == LE || code == LT)
- ? CC_SESWPmode : CC_ZESWPmode);
-
- /* For everything else, return CCmode. */
- return CCmode;
-}
-
-static unsigned
-aarch64_get_condition_code (rtx x)
-{
- enum machine_mode mode = GET_MODE (XEXP (x, 0));
- enum rtx_code comp_code = GET_CODE (x);
-
- if (GET_MODE_CLASS (mode) != MODE_CC)
- mode = SELECT_CC_MODE (comp_code, XEXP (x, 0), XEXP (x, 1));
-
- switch (mode)
- {
- case CCFPmode:
- case CCFPEmode:
- switch (comp_code)
- {
- case GE: return AARCH64_GE;
- case GT: return AARCH64_GT;
- case LE: return AARCH64_LS;
- case LT: return AARCH64_MI;
- case NE: return AARCH64_NE;
- case EQ: return AARCH64_EQ;
- case ORDERED: return AARCH64_VC;
- case UNORDERED: return AARCH64_VS;
- case UNLT: return AARCH64_LT;
- case UNLE: return AARCH64_LE;
- case UNGT: return AARCH64_HI;
- case UNGE: return AARCH64_PL;
- default: gcc_unreachable ();
- }
- break;
-
- case CCmode:
- switch (comp_code)
- {
- case NE: return AARCH64_NE;
- case EQ: return AARCH64_EQ;
- case GE: return AARCH64_GE;
- case GT: return AARCH64_GT;
- case LE: return AARCH64_LE;
- case LT: return AARCH64_LT;
- case GEU: return AARCH64_CS;
- case GTU: return AARCH64_HI;
- case LEU: return AARCH64_LS;
- case LTU: return AARCH64_CC;
- default: gcc_unreachable ();
- }
- break;
-
- case CC_SWPmode:
- case CC_ZESWPmode:
- case CC_SESWPmode:
- switch (comp_code)
- {
- case NE: return AARCH64_NE;
- case EQ: return AARCH64_EQ;
- case GE: return AARCH64_LE;
- case GT: return AARCH64_LT;
- case LE: return AARCH64_GE;
- case LT: return AARCH64_GT;
- case GEU: return AARCH64_LS;
- case GTU: return AARCH64_CC;
- case LEU: return AARCH64_CS;
- case LTU: return AARCH64_HI;
- default: gcc_unreachable ();
- }
- break;
-
- case CC_NZmode:
- switch (comp_code)
- {
- case NE: return AARCH64_NE;
- case EQ: return AARCH64_EQ;
- case GE: return AARCH64_PL;
- case LT: return AARCH64_MI;
- default: gcc_unreachable ();
- }
- break;
-
- default:
- gcc_unreachable ();
- break;
- }
-}
-
-static unsigned
-bit_count (unsigned HOST_WIDE_INT value)
-{
- unsigned count = 0;
-
- while (value)
- {
- count++;
- value &= value - 1;
- }
-
- return count;
-}
-
-void
-aarch64_print_operand (FILE *f, rtx x, char code)
-{
- switch (code)
- {
- case 'e':
- /* Print the sign/zero-extend size as a character 8->b, 16->h, 32->w. */
- {
- int n;
-
- if (GET_CODE (x) != CONST_INT
- || (n = exact_log2 (INTVAL (x) & ~7)) <= 0)
- {
- output_operand_lossage ("invalid operand for '%%%c'", code);
- return;
- }
-
- switch (n)
- {
- case 3:
- fputc ('b', f);
- break;
- case 4:
- fputc ('h', f);
- break;
- case 5:
- fputc ('w', f);
- break;
- default:
- output_operand_lossage ("invalid operand for '%%%c'", code);
- return;
- }
- }
- break;
-
- case 'p':
- {
- int n;
-
- /* Print N such that 2^N == X. */
- if (GET_CODE (x) != CONST_INT || (n = exact_log2 (INTVAL (x))) < 0)
- {
- output_operand_lossage ("invalid operand for '%%%c'", code);
- return;
- }
-
- asm_fprintf (f, "%d", n);
- }
- break;
-
- case 'P':
- /* Print the number of non-zero bits in X (a const_int). */
- if (GET_CODE (x) != CONST_INT)
- {
- output_operand_lossage ("invalid operand for '%%%c'", code);
- return;
- }
-
- asm_fprintf (f, "%u", bit_count (INTVAL (x)));
- break;
-
- case 'H':
- /* Print the higher numbered register of a pair (TImode) of regs. */
- if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1))
- {
- output_operand_lossage ("invalid operand for '%%%c'", code);
- return;
- }
-
- asm_fprintf (f, "%s", reg_names [REGNO (x) + 1]);
- break;
-
- case 'Q':
- /* Print the least significant register of a pair (TImode) of regs. */
- if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1))
- {
- output_operand_lossage ("invalid operand for '%%%c'", code);
- return;
- }
- asm_fprintf (f, "%s", reg_names [REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)]);
- break;
-
- case 'R':
- /* Print the most significant register of a pair (TImode) of regs. */
- if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1))
- {
- output_operand_lossage ("invalid operand for '%%%c'", code);
- return;
- }
- asm_fprintf (f, "%s", reg_names [REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)]);
- break;
-
- case 'm':
- /* Print a condition (eq, ne, etc). */
-
- /* CONST_TRUE_RTX means always -- that's the default. */
- if (x == const_true_rtx)
- return;
-
- if (!COMPARISON_P (x))
- {
- output_operand_lossage ("invalid operand for '%%%c'", code);
- return;
- }
-
- fputs (aarch64_condition_codes[aarch64_get_condition_code (x)], f);
- break;
-
- case 'M':
- /* Print the inverse of a condition (eq <-> ne, etc). */
-
- /* CONST_TRUE_RTX means never -- that's the default. */
- if (x == const_true_rtx)
- {
- fputs ("nv", f);
- return;
- }
-
- if (!COMPARISON_P (x))
- {
- output_operand_lossage ("invalid operand for '%%%c'", code);
- return;
- }
-
- fputs (aarch64_condition_codes[AARCH64_INVERSE_CONDITION_CODE
- (aarch64_get_condition_code (x))], f);
- break;
-
- case 'b':
- case 'h':
- case 's':
- case 'd':
- case 'q':
- /* Print a scalar FP/SIMD register name. */
- if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
- {
- output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
- return;
- }
- asm_fprintf (f, "%s%c%d", REGISTER_PREFIX, code, REGNO (x) - V0_REGNUM);
- break;
-
- case 'S':
- case 'T':
- case 'U':
- case 'V':
- /* Print the first FP/SIMD register name in a list. */
- if (!REG_P (x) || !FP_REGNUM_P (REGNO (x)))
- {
- output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
- return;
- }
- asm_fprintf (f, "%sv%d", REGISTER_PREFIX,
- REGNO (x) - V0_REGNUM + (code - 'S'));
- break;
-
- case 'X':
- /* Print integer constant in hex. */
- if (GET_CODE (x) != CONST_INT)
- {
- output_operand_lossage ("invalid operand for '%%%c'", code);
- return;
- }
- asm_fprintf (f, "0x%x", UINTVAL (x));
- break;
-
- case 'w':
- case 'x':
- /* Print a general register name or the zero register (32-bit or
- 64-bit). */
- if (x == const0_rtx
- || (CONST_DOUBLE_P (x) && aarch64_float_const_zero_rtx_p (x)))
- {
- asm_fprintf (f, "%s%czr", REGISTER_PREFIX, code);
- break;
- }
-
- if (REG_P (x) && GP_REGNUM_P (REGNO (x)))
- {
- asm_fprintf (f, "%s%c%d", REGISTER_PREFIX, code,
- REGNO (x) - R0_REGNUM);
- break;
- }
-
- if (REG_P (x) && REGNO (x) == SP_REGNUM)
- {
- asm_fprintf (f, "%s%ssp", REGISTER_PREFIX, code == 'w' ? "w" : "");
- break;
- }
-
- /* Fall through */
-
- case 0:
- /* Print a normal operand, if it's a general register, then we
- assume DImode. */
- if (x == NULL)
- {
- output_operand_lossage ("missing operand");
- return;
- }
-
- switch (GET_CODE (x))
- {
- case REG:
- asm_fprintf (f, "%s", reg_names [REGNO (x)]);
- break;
-
- case MEM:
- aarch64_memory_reference_mode = GET_MODE (x);
- output_address (XEXP (x, 0));
- break;
-
- case LABEL_REF:
- case SYMBOL_REF:
- output_addr_const (asm_out_file, x);
- break;
-
- case CONST_INT:
- asm_fprintf (f, "%wd", INTVAL (x));
- break;
-
- case CONST_VECTOR:
- if (GET_MODE_CLASS (GET_MODE (x)) == MODE_VECTOR_INT)
- {
- gcc_assert (aarch64_const_vec_all_same_int_p (x,
- HOST_WIDE_INT_MIN,
- HOST_WIDE_INT_MAX));
- asm_fprintf (f, "%wd", INTVAL (CONST_VECTOR_ELT (x, 0)));
- }
- else if (aarch64_simd_imm_zero_p (x, GET_MODE (x)))
- {
- fputc ('0', f);
- }
- else
- gcc_unreachable ();
- break;
-
- case CONST_DOUBLE:
- /* CONST_DOUBLE can represent a double-width integer.
- In this case, the mode of x is VOIDmode. */
- if (GET_MODE (x) == VOIDmode)
- ; /* Do Nothing. */
- else if (aarch64_float_const_zero_rtx_p (x))
- {
- fputc ('0', f);
- break;
- }
- else if (aarch64_float_const_representable_p (x))
- {
-#define buf_size 20
- char float_buf[buf_size] = {'\0'};
- REAL_VALUE_TYPE r;
- REAL_VALUE_FROM_CONST_DOUBLE (r, x);
- real_to_decimal_for_mode (float_buf, &r,
- buf_size, buf_size,
- 1, GET_MODE (x));
- asm_fprintf (asm_out_file, "%s", float_buf);
- break;
-#undef buf_size
- }
- output_operand_lossage ("invalid constant");
- return;
- default:
- output_operand_lossage ("invalid operand");
- return;
- }
- break;
-
- case 'A':
- if (GET_CODE (x) == HIGH)
- x = XEXP (x, 0);
-
- switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
- {
- case SYMBOL_SMALL_GOT:
- asm_fprintf (asm_out_file, ":got:");
- break;
-
- case SYMBOL_SMALL_TLSGD:
- asm_fprintf (asm_out_file, ":tlsgd:");
- break;
-
- case SYMBOL_SMALL_TLSDESC:
- asm_fprintf (asm_out_file, ":tlsdesc:");
- break;
-
- case SYMBOL_SMALL_GOTTPREL:
- asm_fprintf (asm_out_file, ":gottprel:");
- break;
-
- case SYMBOL_SMALL_TPREL:
- asm_fprintf (asm_out_file, ":tprel:");
- break;
-
- default:
- break;
- }
- output_addr_const (asm_out_file, x);
- break;
-
- case 'L':
- switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
- {
- case SYMBOL_SMALL_GOT:
- asm_fprintf (asm_out_file, ":lo12:");
- break;
-
- case SYMBOL_SMALL_TLSGD:
- asm_fprintf (asm_out_file, ":tlsgd_lo12:");
- break;
-
- case SYMBOL_SMALL_TLSDESC:
- asm_fprintf (asm_out_file, ":tlsdesc_lo12:");
- break;
-
- case SYMBOL_SMALL_GOTTPREL:
- asm_fprintf (asm_out_file, ":gottprel_lo12:");
- break;
-
- case SYMBOL_SMALL_TPREL:
- asm_fprintf (asm_out_file, ":tprel_lo12_nc:");
- break;
-
- default:
- break;
- }
- output_addr_const (asm_out_file, x);
- break;
-
- case 'G':
-
- switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR))
- {
- case SYMBOL_SMALL_TPREL:
- asm_fprintf (asm_out_file, ":tprel_hi12:");
- break;
- default:
- break;
- }
- output_addr_const (asm_out_file, x);
- break;
-
- default:
- output_operand_lossage ("invalid operand prefix '%%%c'", code);
- return;
- }
-}
-
-void
-aarch64_print_operand_address (FILE *f, rtx x)
-{
- struct aarch64_address_info addr;
-
- if (aarch64_classify_address (&addr, x, aarch64_memory_reference_mode,
- MEM, true))
- switch (addr.type)
- {
- case ADDRESS_REG_IMM:
- if (addr.offset == const0_rtx)
- asm_fprintf (f, "[%s]", reg_names [REGNO (addr.base)]);
- else
- asm_fprintf (f, "[%s,%wd]", reg_names [REGNO (addr.base)],
- INTVAL (addr.offset));
- return;
-
- case ADDRESS_REG_REG:
- if (addr.shift == 0)
- asm_fprintf (f, "[%s,%s]", reg_names [REGNO (addr.base)],
- reg_names [REGNO (addr.offset)]);
- else
- asm_fprintf (f, "[%s,%s,lsl %u]", reg_names [REGNO (addr.base)],
- reg_names [REGNO (addr.offset)], addr.shift);
- return;
-
- case ADDRESS_REG_UXTW:
- if (addr.shift == 0)
- asm_fprintf (f, "[%s,w%d,uxtw]", reg_names [REGNO (addr.base)],
- REGNO (addr.offset) - R0_REGNUM);
- else
- asm_fprintf (f, "[%s,w%d,uxtw %u]", reg_names [REGNO (addr.base)],
- REGNO (addr.offset) - R0_REGNUM, addr.shift);
- return;
-
- case ADDRESS_REG_SXTW:
- if (addr.shift == 0)
- asm_fprintf (f, "[%s,w%d,sxtw]", reg_names [REGNO (addr.base)],
- REGNO (addr.offset) - R0_REGNUM);
- else
- asm_fprintf (f, "[%s,w%d,sxtw %u]", reg_names [REGNO (addr.base)],
- REGNO (addr.offset) - R0_REGNUM, addr.shift);
- return;
-
- case ADDRESS_REG_WB:
- switch (GET_CODE (x))
- {
- case PRE_INC:
- asm_fprintf (f, "[%s,%d]!", reg_names [REGNO (addr.base)],
- GET_MODE_SIZE (aarch64_memory_reference_mode));
- return;
- case POST_INC:
- asm_fprintf (f, "[%s],%d", reg_names [REGNO (addr.base)],
- GET_MODE_SIZE (aarch64_memory_reference_mode));
- return;
- case PRE_DEC:
- asm_fprintf (f, "[%s,-%d]!", reg_names [REGNO (addr.base)],
- GET_MODE_SIZE (aarch64_memory_reference_mode));
- return;
- case POST_DEC:
- asm_fprintf (f, "[%s],-%d", reg_names [REGNO (addr.base)],
- GET_MODE_SIZE (aarch64_memory_reference_mode));
- return;
- case PRE_MODIFY:
- asm_fprintf (f, "[%s,%wd]!", reg_names [REGNO (addr.base)],
- INTVAL (addr.offset));
- return;
- case POST_MODIFY:
- asm_fprintf (f, "[%s],%wd", reg_names [REGNO (addr.base)],
- INTVAL (addr.offset));
- return;
- default:
- break;
- }
- break;
-
- case ADDRESS_LO_SUM:
- asm_fprintf (f, "[%s,#:lo12:", reg_names [REGNO (addr.base)]);
- output_addr_const (f, addr.offset);
- asm_fprintf (f, "]");
- return;
-
- case ADDRESS_SYMBOLIC:
- break;
- }
-
- output_addr_const (f, x);
-}
-
-void
-aarch64_function_profiler (FILE *f ATTRIBUTE_UNUSED,
- int labelno ATTRIBUTE_UNUSED)
-{
- sorry ("function profiling");
-}
-
-bool
-aarch64_label_mentioned_p (rtx x)
-{
- const char *fmt;
- int i;
-
- if (GET_CODE (x) == LABEL_REF)
- return true;
-
- /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the
- referencing instruction, but they are constant offsets, not
- symbols. */
- if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
- return false;
-
- fmt = GET_RTX_FORMAT (GET_CODE (x));
- for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'E')
- {
- int j;
-
- for (j = XVECLEN (x, i) - 1; j >= 0; j--)
- if (aarch64_label_mentioned_p (XVECEXP (x, i, j)))
- return 1;
- }
- else if (fmt[i] == 'e' && aarch64_label_mentioned_p (XEXP (x, i)))
- return 1;
- }
-
- return 0;
-}
-
-/* Implement REGNO_REG_CLASS. */
-
-enum reg_class
-aarch64_regno_regclass (unsigned regno)
-{
- if (GP_REGNUM_P (regno))
- return CORE_REGS;
-
- if (regno == SP_REGNUM)
- return STACK_REG;
-
- if (regno == FRAME_POINTER_REGNUM
- || regno == ARG_POINTER_REGNUM)
- return CORE_REGS;
-
- if (FP_REGNUM_P (regno))
- return FP_LO_REGNUM_P (regno) ? FP_LO_REGS : FP_REGS;
-
- return NO_REGS;
-}
-
-/* Try a machine-dependent way of reloading an illegitimate address
- operand. If we find one, push the reload and return the new rtx. */
-
-rtx
-aarch64_legitimize_reload_address (rtx *x_p,
- enum machine_mode mode,
- int opnum, int type,
- int ind_levels ATTRIBUTE_UNUSED)
-{
- rtx x = *x_p;
-
- /* Do not allow mem (plus (reg, const)) if vector mode. */
- if (aarch64_vector_mode_p (mode)
- && GET_CODE (x) == PLUS
- && REG_P (XEXP (x, 0))
- && CONST_INT_P (XEXP (x, 1)))
- {
- rtx orig_rtx = x;
- x = copy_rtx (x);
- push_reload (orig_rtx, NULL_RTX, x_p, NULL,
- BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
- opnum, (enum reload_type) type);
- return x;
- }
-
- /* We must recognize output that we have already generated ourselves. */
- if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 0)) == PLUS
- && REG_P (XEXP (XEXP (x, 0), 0))
- && CONST_INT_P (XEXP (XEXP (x, 0), 1))
- && CONST_INT_P (XEXP (x, 1)))
- {
- push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
- BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
- opnum, (enum reload_type) type);
- return x;
- }
-
- /* We wish to handle large displacements off a base register by splitting
- the addend across an add and the mem insn. This can cut the number of
- extra insns needed from 3 to 1. It is only useful for load/store of a
- single register with 12 bit offset field. */
- if (GET_CODE (x) == PLUS
- && REG_P (XEXP (x, 0))
- && CONST_INT_P (XEXP (x, 1))
- && HARD_REGISTER_P (XEXP (x, 0))
- && mode != TImode
- && mode != TFmode
- && aarch64_regno_ok_for_base_p (REGNO (XEXP (x, 0)), true))
- {
- HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
- HOST_WIDE_INT low = val & 0xfff;
- HOST_WIDE_INT high = val - low;
- HOST_WIDE_INT offs;
- rtx cst;
-
- /* Reload non-zero BLKmode offsets. This is because we cannot ascertain
- BLKmode alignment. */
- if (GET_MODE_SIZE (mode) == 0)
- return NULL_RTX;
-
- offs = low % GET_MODE_SIZE (mode);
-
- /* Align misaligned offset by adjusting high part to compensate. */
- if (offs != 0)
- {
- if (aarch64_uimm12_shift (high + offs))
- {
- /* Align down. */
- low = low - offs;
- high = high + offs;
- }
- else
- {
- /* Align up. */
- offs = GET_MODE_SIZE (mode) - offs;
- low = low + offs;
- high = high + (low & 0x1000) - offs;
- low &= 0xfff;
- }
- }
-
- /* Check for overflow. */
- if (high + low != val)
- return NULL_RTX;
-
- cst = GEN_INT (high);
- if (!aarch64_uimm12_shift (high))
- cst = force_const_mem (Pmode, cst);
-
- /* Reload high part into base reg, leaving the low part
- in the mem instruction. */
- x = gen_rtx_PLUS (Pmode,
- gen_rtx_PLUS (Pmode, XEXP (x, 0), cst),
- GEN_INT (low));
-
- push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
- BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
- opnum, (enum reload_type) type);
- return x;
- }
-
- return NULL_RTX;
-}
-
-
-static reg_class_t
-aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
- reg_class_t rclass,
- enum machine_mode mode,
- secondary_reload_info *sri)
-{
- /* Address expressions of the form PLUS (SP, large_offset) need two
- scratch registers, one for the constant, and one for holding a
- copy of SP, since SP cannot be used on the RHS of an add-reg
- instruction. */
- if (mode == DImode
- && GET_CODE (x) == PLUS
- && XEXP (x, 0) == stack_pointer_rtx
- && CONST_INT_P (XEXP (x, 1))
- && !aarch64_uimm12_shift (INTVAL (XEXP (x, 1))))
- {
- sri->icode = CODE_FOR_reload_sp_immediate;
- return NO_REGS;
- }
-
- /* Without the TARGET_SIMD instructions we cannot move a Q register
- to a Q register directly. We need a scratch. */
- if (REG_P (x) && (mode == TFmode || mode == TImode) && mode == GET_MODE (x)
- && FP_REGNUM_P (REGNO (x)) && !TARGET_SIMD
- && reg_class_subset_p (rclass, FP_REGS))
- {
- if (mode == TFmode)
- sri->icode = CODE_FOR_aarch64_reload_movtf;
- else if (mode == TImode)
- sri->icode = CODE_FOR_aarch64_reload_movti;
- return NO_REGS;
- }
-
- /* A TFmode or TImode memory access should be handled via an FP_REGS
- because AArch64 has richer addressing modes for LDR/STR instructions
- than LDP/STP instructions. */
- if (!TARGET_GENERAL_REGS_ONLY && rclass == CORE_REGS
- && GET_MODE_SIZE (mode) == 16 && MEM_P (x))
- return FP_REGS;
-
- if (rclass == FP_REGS && (mode == TImode || mode == TFmode) && CONSTANT_P(x))
- return CORE_REGS;
-
- return NO_REGS;
-}
-
-static bool
-aarch64_can_eliminate (const int from, const int to)
-{
- /* If we need a frame pointer, we must eliminate FRAME_POINTER_REGNUM into
- HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
-
- if (frame_pointer_needed)
- {
- if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
- return true;
- if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
- return false;
- if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
- && !cfun->calls_alloca)
- return true;
- if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
- return true;
- return false;
- }
- else
- {
- /* If we decided that we didn't need a leaf frame pointer but then used
- LR in the function, then we'll want a frame pointer after all, so
- prevent this elimination to ensure a frame pointer is used.
-
- NOTE: the original value of flag_omit_frame_pointer gets trashed
- IFF flag_omit_leaf_frame_pointer is true, so we check the value
- of faked_omit_frame_pointer here (which is true when we always
- wish to keep non-leaf frame pointers but only wish to keep leaf frame
- pointers when LR is clobbered). */
- if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
- && df_regs_ever_live_p (LR_REGNUM)
- && faked_omit_frame_pointer)
- return false;
- }
-
- return true;
-}
-
-HOST_WIDE_INT
-aarch64_initial_elimination_offset (unsigned from, unsigned to)
-{
- HOST_WIDE_INT frame_size;
- HOST_WIDE_INT offset;
-
- aarch64_layout_frame ();
- frame_size = (get_frame_size () + cfun->machine->frame.saved_regs_size
- + crtl->outgoing_args_size
- + cfun->machine->saved_varargs_size);
-
- frame_size = AARCH64_ROUND_UP (frame_size, STACK_BOUNDARY / BITS_PER_UNIT);
- offset = frame_size;
-
- if (to == HARD_FRAME_POINTER_REGNUM)
- {
- if (from == ARG_POINTER_REGNUM)
- return offset - crtl->outgoing_args_size;
-
- if (from == FRAME_POINTER_REGNUM)
- return cfun->machine->frame.saved_regs_size;
- }
-
- if (to == STACK_POINTER_REGNUM)
- {
- if (from == FRAME_POINTER_REGNUM)
- {
- HOST_WIDE_INT elim = crtl->outgoing_args_size
- + cfun->machine->frame.saved_regs_size
- - cfun->machine->frame.fp_lr_offset;
- elim = AARCH64_ROUND_UP (elim, STACK_BOUNDARY / BITS_PER_UNIT);
- return elim;
- }
- }
-
- return offset;
-}
-
-
-/* Implement RETURN_ADDR_RTX. We do not support moving back to a
- previous frame. */
-
-rtx
-aarch64_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
-{
- if (count != 0)
- return const0_rtx;
- return get_hard_reg_initial_val (Pmode, LR_REGNUM);
-}
-
-
-static void
-aarch64_asm_trampoline_template (FILE *f)
-{
- asm_fprintf (f, "\tldr\t%s, .+16\n", reg_names [IP1_REGNUM]);
- asm_fprintf (f, "\tldr\t%s, .+20\n", reg_names [STATIC_CHAIN_REGNUM]);
- asm_fprintf (f, "\tbr\t%s\n", reg_names [IP1_REGNUM]);
- assemble_aligned_integer (4, const0_rtx);
- assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
- assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
-}
-
-unsigned
-aarch64_trampoline_size (void)
-{
- return 32; /* 3 insns + padding + 2 dwords. */
-}
-
-static void
-aarch64_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
-{
- rtx fnaddr, mem, a_tramp;
-
- /* Don't need to copy the trailing D-words, we fill those in below. */
- emit_block_move (m_tramp, assemble_trampoline_template (),
- GEN_INT (TRAMPOLINE_SIZE - 16), BLOCK_OP_NORMAL);
- mem = adjust_address (m_tramp, DImode, 16);
- fnaddr = XEXP (DECL_RTL (fndecl), 0);
- emit_move_insn (mem, fnaddr);
-
- mem = adjust_address (m_tramp, DImode, 24);
- emit_move_insn (mem, chain_value);
-
- /* XXX We should really define a "clear_cache" pattern and use
- gen_clear_cache(). */
- a_tramp = XEXP (m_tramp, 0);
- emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"),
- LCT_NORMAL, VOIDmode, 2, a_tramp, Pmode,
- plus_constant (Pmode, a_tramp, TRAMPOLINE_SIZE), Pmode);
-}
-
-static unsigned char
-aarch64_class_max_nregs (reg_class_t regclass, enum machine_mode mode)
-{
- switch (regclass)
- {
- case CORE_REGS:
- case POINTER_REGS:
- case GENERAL_REGS:
- case ALL_REGS:
- case FP_REGS:
- case FP_LO_REGS:
- return
- aarch64_vector_mode_p (mode) ? (GET_MODE_SIZE (mode) + 15) / 16 :
- (GET_MODE_SIZE (mode) + 7) / 8;
- case STACK_REG:
- return 1;
-
- case NO_REGS:
- return 0;
-
- default:
- break;
- }
- gcc_unreachable ();
-}
-
-static reg_class_t
-aarch64_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t regclass)
-{
- return ((regclass == POINTER_REGS || regclass == STACK_REG)
- ? GENERAL_REGS : regclass);
-}
-
-void
-aarch64_asm_output_labelref (FILE* f, const char *name)
-{
- asm_fprintf (f, "%U%s", name);
-}
-
-static void
-aarch64_elf_asm_constructor (rtx symbol, int priority)
-{
- if (priority == DEFAULT_INIT_PRIORITY)
- default_ctor_section_asm_out_constructor (symbol, priority);
- else
- {
- section *s;
- char buf[18];
- snprintf (buf, sizeof (buf), ".init_array.%.5u", priority);
- s = get_section (buf, SECTION_WRITE, NULL);
- switch_to_section (s);
- assemble_align (POINTER_SIZE);
- fputs ("\t.dword\t", asm_out_file);
- output_addr_const (asm_out_file, symbol);
- fputc ('\n', asm_out_file);
- }
-}
-
-static void
-aarch64_elf_asm_destructor (rtx symbol, int priority)
-{
- if (priority == DEFAULT_INIT_PRIORITY)
- default_dtor_section_asm_out_destructor (symbol, priority);
- else
- {
- section *s;
- char buf[18];
- snprintf (buf, sizeof (buf), ".fini_array.%.5u", priority);
- s = get_section (buf, SECTION_WRITE, NULL);
- switch_to_section (s);
- assemble_align (POINTER_SIZE);
- fputs ("\t.dword\t", asm_out_file);
- output_addr_const (asm_out_file, symbol);
- fputc ('\n', asm_out_file);
- }
-}
-
-const char*
-aarch64_output_casesi (rtx *operands)
-{
- char buf[100];
- char label[100];
- rtx diff_vec = PATTERN (next_real_insn (operands[2]));
- int index;
- static const char *const patterns[4][2] =
- {
- {
- "ldrb\t%w3, [%0,%w1,uxtw]",
- "add\t%3, %4, %w3, sxtb #2"
- },
- {
- "ldrh\t%w3, [%0,%w1,uxtw #1]",
- "add\t%3, %4, %w3, sxth #2"
- },
- {
- "ldr\t%w3, [%0,%w1,uxtw #2]",
- "add\t%3, %4, %w3, sxtw #2"
- },
- /* We assume that DImode is only generated when not optimizing and
- that we don't really need 64-bit address offsets. That would
- imply an object file with 8GB of code in a single function! */
- {
- "ldr\t%w3, [%0,%w1,uxtw #2]",
- "add\t%3, %4, %w3, sxtw #2"
- }
- };
-
- gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
-
- index = exact_log2 (GET_MODE_SIZE (GET_MODE (diff_vec)));
-
- gcc_assert (index >= 0 && index <= 3);
-
- /* Need to implement table size reduction, by chaning the code below. */
- output_asm_insn (patterns[index][0], operands);
- ASM_GENERATE_INTERNAL_LABEL (label, "Lrtx", CODE_LABEL_NUMBER (operands[2]));
- snprintf (buf, sizeof (buf),
- "adr\t%%4, %s", targetm.strip_name_encoding (label));
- output_asm_insn (buf, operands);
- output_asm_insn (patterns[index][1], operands);
- output_asm_insn ("br\t%3", operands);
- assemble_label (asm_out_file, label);
- return "";
-}
-
-
-/* Return size in bits of an arithmetic operand which is shifted/scaled and
- masked such that it is suitable for a UXTB, UXTH, or UXTW extend
- operator. */
-
-int
-aarch64_uxt_size (int shift, HOST_WIDE_INT mask)
-{
- if (shift >= 0 && shift <= 3)
- {
- int size;
- for (size = 8; size <= 32; size *= 2)
- {
- HOST_WIDE_INT bits = ((HOST_WIDE_INT)1U << size) - 1;
- if (mask == bits << shift)
- return size;
- }
- }
- return 0;
-}
-
-static bool
-aarch64_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
- const_rtx x ATTRIBUTE_UNUSED)
-{
- /* We can't use blocks for constants when we're using a per-function
- constant pool. */
- return false;
-}
-
-static section *
-aarch64_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED,
- rtx x ATTRIBUTE_UNUSED,
- unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
-{
- /* Force all constant pool entries into the current function section. */
- return function_section (current_function_decl);
-}
-
-
-/* Costs. */
-
-/* Helper function for rtx cost calculation. Strip a shift expression
- from X. Returns the inner operand if successful, or the original
- expression on failure. */
-static rtx
-aarch64_strip_shift (rtx x)
-{
- rtx op = x;
-
- if ((GET_CODE (op) == ASHIFT
- || GET_CODE (op) == ASHIFTRT
- || GET_CODE (op) == LSHIFTRT)
- && CONST_INT_P (XEXP (op, 1)))
- return XEXP (op, 0);
-
- if (GET_CODE (op) == MULT
- && CONST_INT_P (XEXP (op, 1))
- && ((unsigned) exact_log2 (INTVAL (XEXP (op, 1)))) < 64)
- return XEXP (op, 0);
-
- return x;
-}
-
-/* Helper function for rtx cost calculation. Strip a shift or extend
- expression from X. Returns the inner operand if successful, or the
- original expression on failure. We deal with a number of possible
- canonicalization variations here. */
-static rtx
-aarch64_strip_shift_or_extend (rtx x)
-{
- rtx op = x;
-
- /* Zero and sign extraction of a widened value. */
- if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
- && XEXP (op, 2) == const0_rtx
- && aarch64_is_extend_from_extract (GET_MODE (op), XEXP (XEXP (op, 0), 1),
- XEXP (op, 1)))
- return XEXP (XEXP (op, 0), 0);
-
- /* It can also be represented (for zero-extend) as an AND with an
- immediate. */
- if (GET_CODE (op) == AND
- && GET_CODE (XEXP (op, 0)) == MULT
- && CONST_INT_P (XEXP (XEXP (op, 0), 1))
- && CONST_INT_P (XEXP (op, 1))
- && aarch64_uxt_size (exact_log2 (INTVAL (XEXP (XEXP (op, 0), 1))),
- INTVAL (XEXP (op, 1))) != 0)
- return XEXP (XEXP (op, 0), 0);
-
- /* Now handle extended register, as this may also have an optional
- left shift by 1..4. */
- if (GET_CODE (op) == ASHIFT
- && CONST_INT_P (XEXP (op, 1))
- && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op, 1))) <= 4)
- op = XEXP (op, 0);
-
- if (GET_CODE (op) == ZERO_EXTEND
- || GET_CODE (op) == SIGN_EXTEND)
- op = XEXP (op, 0);
-
- if (op != x)
- return op;
-
- return aarch64_strip_shift (x);
-}
-
-/* Calculate the cost of calculating X, storing it in *COST. Result
- is true if the total cost of the operation has now been calculated. */
-static bool
-aarch64_rtx_costs (rtx x, int code, int outer ATTRIBUTE_UNUSED,
- int param ATTRIBUTE_UNUSED, int *cost, bool speed)
-{
- rtx op0, op1;
- const struct cpu_rtx_cost_table *extra_cost
- = aarch64_tune_params->insn_extra_cost;
-
- switch (code)
- {
- case SET:
- op0 = SET_DEST (x);
- op1 = SET_SRC (x);
-
- switch (GET_CODE (op0))
- {
- case MEM:
- if (speed)
- *cost += extra_cost->memory_store;
-
- if (op1 != const0_rtx)
- *cost += rtx_cost (op1, SET, 1, speed);
- return true;
-
- case SUBREG:
- if (! REG_P (SUBREG_REG (op0)))
- *cost += rtx_cost (SUBREG_REG (op0), SET, 0, speed);
- /* Fall through. */
- case REG:
- /* Cost is just the cost of the RHS of the set. */
- *cost += rtx_cost (op1, SET, 1, true);
- return true;
-
- case ZERO_EXTRACT: /* Bit-field insertion. */
- case SIGN_EXTRACT:
- /* Strip any redundant widening of the RHS to meet the width of
- the target. */
- if (GET_CODE (op1) == SUBREG)
- op1 = SUBREG_REG (op1);
- if ((GET_CODE (op1) == ZERO_EXTEND
- || GET_CODE (op1) == SIGN_EXTEND)
- && GET_CODE (XEXP (op0, 1)) == CONST_INT
- && (GET_MODE_BITSIZE (GET_MODE (XEXP (op1, 0)))
- >= INTVAL (XEXP (op0, 1))))
- op1 = XEXP (op1, 0);
- *cost += rtx_cost (op1, SET, 1, speed);
- return true;
-
- default:
- break;
- }
- return false;
-
- case MEM:
- if (speed)
- *cost += extra_cost->memory_load;
-
- return true;
-
- case NEG:
- op0 = CONST0_RTX (GET_MODE (x));
- op1 = XEXP (x, 0);
- goto cost_minus;
-
- case COMPARE:
- op0 = XEXP (x, 0);
- op1 = XEXP (x, 1);
-
- if (op1 == const0_rtx
- && GET_CODE (op0) == AND)
- {
- x = op0;
- goto cost_logic;
- }
-
- /* Comparisons can work if the order is swapped.
- Canonicalization puts the more complex operation first, but
- we want it in op1. */
- if (! (REG_P (op0)
- || (GET_CODE (op0) == SUBREG && REG_P (SUBREG_REG (op0)))))
- {
- op0 = XEXP (x, 1);
- op1 = XEXP (x, 0);
- }
- goto cost_minus;
-
- case MINUS:
- op0 = XEXP (x, 0);
- op1 = XEXP (x, 1);
-
- cost_minus:
- if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
- || (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
- && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT))
- {
- if (op0 != const0_rtx)
- *cost += rtx_cost (op0, MINUS, 0, speed);
-
- if (CONST_INT_P (op1))
- {
- if (!aarch64_uimm12_shift (INTVAL (op1)))
- *cost += rtx_cost (op1, MINUS, 1, speed);
- }
- else
- {
- op1 = aarch64_strip_shift_or_extend (op1);
- *cost += rtx_cost (op1, MINUS, 1, speed);
- }
- return true;
- }
-
- return false;
-
- case PLUS:
- op0 = XEXP (x, 0);
- op1 = XEXP (x, 1);
-
- if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
- {
- if (CONST_INT_P (op1) && aarch64_uimm12_shift (INTVAL (op1)))
- {
- *cost += rtx_cost (op0, PLUS, 0, speed);
- }
- else
- {
- rtx new_op0 = aarch64_strip_shift_or_extend (op0);
-
- if (new_op0 == op0
- && GET_CODE (op0) == MULT)
- {
- if ((GET_CODE (XEXP (op0, 0)) == ZERO_EXTEND
- && GET_CODE (XEXP (op0, 1)) == ZERO_EXTEND)
- || (GET_CODE (XEXP (op0, 0)) == SIGN_EXTEND
- && GET_CODE (XEXP (op0, 1)) == SIGN_EXTEND))
- {
- *cost += (rtx_cost (XEXP (XEXP (op0, 0), 0), MULT, 0,
- speed)
- + rtx_cost (XEXP (XEXP (op0, 1), 0), MULT, 1,
- speed)
- + rtx_cost (op1, PLUS, 1, speed));
- if (speed)
- *cost += extra_cost->int_multiply_extend_add;
- return true;
- }
- *cost += (rtx_cost (XEXP (op0, 0), MULT, 0, speed)
- + rtx_cost (XEXP (op0, 1), MULT, 1, speed)
- + rtx_cost (op1, PLUS, 1, speed));
-
- if (speed)
- *cost += extra_cost->int_multiply_add;
- }
-
- *cost += (rtx_cost (new_op0, PLUS, 0, speed)
- + rtx_cost (op1, PLUS, 1, speed));
- }
- return true;
- }
-
- return false;
-
- case IOR:
- case XOR:
- case AND:
- cost_logic:
- op0 = XEXP (x, 0);
- op1 = XEXP (x, 1);
-
- if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
- {
- if (CONST_INT_P (op1)
- && aarch64_bitmask_imm (INTVAL (op1), GET_MODE (x)))
- {
- *cost += rtx_cost (op0, AND, 0, speed);
- }
- else
- {
- if (GET_CODE (op0) == NOT)
- op0 = XEXP (op0, 0);
- op0 = aarch64_strip_shift (op0);
- *cost += (rtx_cost (op0, AND, 0, speed)
- + rtx_cost (op1, AND, 1, speed));
- }
- return true;
- }
- return false;
-
- case ZERO_EXTEND:
- if ((GET_MODE (x) == DImode
- && GET_MODE (XEXP (x, 0)) == SImode)
- || GET_CODE (XEXP (x, 0)) == MEM)
- {
- *cost += rtx_cost (XEXP (x, 0), ZERO_EXTEND, 0, speed);
- return true;
- }
- return false;
-
- case SIGN_EXTEND:
- if (GET_CODE (XEXP (x, 0)) == MEM)
- {
- *cost += rtx_cost (XEXP (x, 0), SIGN_EXTEND, 0, speed);
- return true;
- }
- return false;
-
- case ROTATE:
- if (!CONST_INT_P (XEXP (x, 1)))
- *cost += COSTS_N_INSNS (2);
- /* Fall through. */
- case ROTATERT:
- case LSHIFTRT:
- case ASHIFT:
- case ASHIFTRT:
-
- /* Shifting by a register often takes an extra cycle. */
- if (speed && !CONST_INT_P (XEXP (x, 1)))
- *cost += extra_cost->register_shift;
-
- *cost += rtx_cost (XEXP (x, 0), ASHIFT, 0, speed);
- return true;
-
- case HIGH:
- if (!CONSTANT_P (XEXP (x, 0)))
- *cost += rtx_cost (XEXP (x, 0), HIGH, 0, speed);
- return true;
-
- case LO_SUM:
- if (!CONSTANT_P (XEXP (x, 1)))
- *cost += rtx_cost (XEXP (x, 1), LO_SUM, 1, speed);
- *cost += rtx_cost (XEXP (x, 0), LO_SUM, 0, speed);
- return true;
-
- case ZERO_EXTRACT:
- case SIGN_EXTRACT:
- *cost += rtx_cost (XEXP (x, 0), ZERO_EXTRACT, 0, speed);
- return true;
-
- case MULT:
- op0 = XEXP (x, 0);
- op1 = XEXP (x, 1);
-
- *cost = COSTS_N_INSNS (1);
- if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
- {
- if (CONST_INT_P (op1)
- && exact_log2 (INTVAL (op1)) > 0)
- {
- *cost += rtx_cost (op0, ASHIFT, 0, speed);
- return true;
- }
-
- if ((GET_CODE (op0) == ZERO_EXTEND
- && GET_CODE (op1) == ZERO_EXTEND)
- || (GET_CODE (op0) == SIGN_EXTEND
- && GET_CODE (op1) == SIGN_EXTEND))
- {
- *cost += (rtx_cost (XEXP (op0, 0), MULT, 0, speed)
- + rtx_cost (XEXP (op1, 0), MULT, 1, speed));
- if (speed)
- *cost += extra_cost->int_multiply_extend;
- return true;
- }
-
- if (speed)
- *cost += extra_cost->int_multiply;
- }
- else if (speed)
- {
- if (GET_MODE (x) == DFmode)
- *cost += extra_cost->double_multiply;
- else if (GET_MODE (x) == SFmode)
- *cost += extra_cost->float_multiply;
- }
-
- return false; /* All arguments need to be in registers. */
-
- case MOD:
- case UMOD:
- *cost = COSTS_N_INSNS (2);
- if (speed)
- {
- if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
- *cost += (extra_cost->int_multiply_add
- + extra_cost->int_divide);
- else if (GET_MODE (x) == DFmode)
- *cost += (extra_cost->double_multiply
- + extra_cost->double_divide);
- else if (GET_MODE (x) == SFmode)
- *cost += (extra_cost->float_multiply
- + extra_cost->float_divide);
- }
- return false; /* All arguments need to be in registers. */
-
- case DIV:
- case UDIV:
- *cost = COSTS_N_INSNS (1);
- if (speed)
- {
- if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
- *cost += extra_cost->int_divide;
- else if (GET_MODE (x) == DFmode)
- *cost += extra_cost->double_divide;
- else if (GET_MODE (x) == SFmode)
- *cost += extra_cost->float_divide;
- }
- return false; /* All arguments need to be in registers. */
-
- default:
- break;
- }
- return false;
-}
-
-static int
-aarch64_address_cost (rtx x ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED,
- addr_space_t as ATTRIBUTE_UNUSED, bool speed ATTRIBUTE_UNUSED)
-{
- enum rtx_code c = GET_CODE (x);
- const struct cpu_addrcost_table *addr_cost = aarch64_tune_params->addr_cost;
-
- if (c == PRE_INC || c == PRE_DEC || c == PRE_MODIFY)
- return addr_cost->pre_modify;
-
- if (c == POST_INC || c == POST_DEC || c == POST_MODIFY)
- return addr_cost->post_modify;
-
- if (c == PLUS)
- {
- if (GET_CODE (XEXP (x, 1)) == CONST_INT)
- return addr_cost->imm_offset;
- else if (GET_CODE (XEXP (x, 0)) == MULT
- || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
- || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
- return addr_cost->register_extend;
-
- return addr_cost->register_offset;
- }
- else if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
- return addr_cost->imm_offset;
-
- return 0;
-}
-
-static int
-aarch64_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
- reg_class_t from, reg_class_t to)
-{
- const struct cpu_regmove_cost *regmove_cost
- = aarch64_tune_params->regmove_cost;
-
- if (from == GENERAL_REGS && to == GENERAL_REGS)
- return regmove_cost->GP2GP;
- else if (from == GENERAL_REGS)
- return regmove_cost->GP2FP;
- else if (to == GENERAL_REGS)
- return regmove_cost->FP2GP;
-
- /* When AdvSIMD instructions are disabled it is not possible to move
- a 128-bit value directly between Q registers. This is handled in
- secondary reload. A general register is used as a scratch to move
- the upper DI value and the lower DI value is moved directly,
- hence the cost is the sum of three moves. */
-
- if (! TARGET_SIMD && GET_MODE_SIZE (from) == 128 && GET_MODE_SIZE (to) == 128)
- return regmove_cost->GP2FP + regmove_cost->FP2GP + regmove_cost->FP2FP;
-
- return regmove_cost->FP2FP;
-}
-
-static int
-aarch64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
- reg_class_t rclass ATTRIBUTE_UNUSED,
- bool in ATTRIBUTE_UNUSED)
-{
- return aarch64_tune_params->memmov_cost;
-}
-
-static void initialize_aarch64_code_model (void);
-
-/* Parse the architecture extension string. */
-
-static void
-aarch64_parse_extension (char *str)
-{
- /* The extension string is parsed left to right. */
- const struct aarch64_option_extension *opt = NULL;
-
- /* Flag to say whether we are adding or removing an extension. */
- int adding_ext = -1;
-
- while (str != NULL && *str != 0)
- {
- char *ext;
- size_t len;
-
- str++;
- ext = strchr (str, '+');
-
- if (ext != NULL)
- len = ext - str;
- else
- len = strlen (str);
-
- if (len >= 2 && strncmp (str, "no", 2) == 0)
- {
- adding_ext = 0;
- len -= 2;
- str += 2;
- }
- else if (len > 0)
- adding_ext = 1;
-
- if (len == 0)
- {
- error ("missing feature modifier after %qs", "+no");
- return;
- }
-
- /* Scan over the extensions table trying to find an exact match. */
- for (opt = all_extensions; opt->name != NULL; opt++)
- {
- if (strlen (opt->name) == len && strncmp (opt->name, str, len) == 0)
- {
- /* Add or remove the extension. */
- if (adding_ext)
- aarch64_isa_flags |= opt->flags_on;
- else
- aarch64_isa_flags &= ~(opt->flags_off);
- break;
- }
- }
-
- if (opt->name == NULL)
- {
- /* Extension not found in list. */
- error ("unknown feature modifier %qs", str);
- return;
- }
-
- str = ext;
- };
-
- return;
-}
-
-/* Parse the ARCH string. */
-
-static void
-aarch64_parse_arch (void)
-{
- char *ext;
- const struct processor *arch;
- char *str = (char *) alloca (strlen (aarch64_arch_string) + 1);
- size_t len;
-
- strcpy (str, aarch64_arch_string);
-
- ext = strchr (str, '+');
-
- if (ext != NULL)
- len = ext - str;
- else
- len = strlen (str);
-
- if (len == 0)
- {
- error ("missing arch name in -march=%qs", str);
- return;
- }
-
- /* Loop through the list of supported ARCHs to find a match. */
- for (arch = all_architectures; arch->name != NULL; arch++)
- {
- if (strlen (arch->name) == len && strncmp (arch->name, str, len) == 0)
- {
- selected_arch = arch;
- aarch64_isa_flags = selected_arch->flags;
- selected_cpu = &all_cores[selected_arch->core];
-
- if (ext != NULL)
- {
- /* ARCH string contains at least one extension. */
- aarch64_parse_extension (ext);
- }
-
- return;
- }
- }
-
- /* ARCH name not found in list. */
- error ("unknown value %qs for -march", str);
- return;
-}
-
-/* Parse the CPU string. */
-
-static void
-aarch64_parse_cpu (void)
-{
- char *ext;
- const struct processor *cpu;
- char *str = (char *) alloca (strlen (aarch64_cpu_string) + 1);
- size_t len;
-
- strcpy (str, aarch64_cpu_string);
-
- ext = strchr (str, '+');
-
- if (ext != NULL)
- len = ext - str;
- else
- len = strlen (str);
-
- if (len == 0)
- {
- error ("missing cpu name in -mcpu=%qs", str);
- return;
- }
-
- /* Loop through the list of supported CPUs to find a match. */
- for (cpu = all_cores; cpu->name != NULL; cpu++)
- {
- if (strlen (cpu->name) == len && strncmp (cpu->name, str, len) == 0)
- {
- selected_cpu = cpu;
- aarch64_isa_flags = selected_cpu->flags;
-
- if (ext != NULL)
- {
- /* CPU string contains at least one extension. */
- aarch64_parse_extension (ext);
- }
-
- return;
- }
- }
-
- /* CPU name not found in list. */
- error ("unknown value %qs for -mcpu", str);
- return;
-}
-
-/* Parse the TUNE string. */
-
-static void
-aarch64_parse_tune (void)
-{
- const struct processor *cpu;
- char *str = (char *) alloca (strlen (aarch64_tune_string) + 1);
- strcpy (str, aarch64_tune_string);
-
- /* Loop through the list of supported CPUs to find a match. */
- for (cpu = all_cores; cpu->name != NULL; cpu++)
- {
- if (strcmp (cpu->name, str) == 0)
- {
- selected_tune = cpu;
- return;
- }
- }
-
- /* CPU name not found in list. */
- error ("unknown value %qs for -mtune", str);
- return;
-}
-
-
-/* Implement TARGET_OPTION_OVERRIDE. */
-
-static void
-aarch64_override_options (void)
-{
- /* march wins over mcpu, so when march is defined, mcpu takes the same value,
- otherwise march remains undefined. mtune can be used with either march or
- mcpu. */
-
- if (aarch64_arch_string)
- {
- aarch64_parse_arch ();
- aarch64_cpu_string = NULL;
- }
-
- if (aarch64_cpu_string)
- {
- aarch64_parse_cpu ();
- selected_arch = NULL;
- }
-
- if (aarch64_tune_string)
- {
- aarch64_parse_tune ();
- }
-
- initialize_aarch64_code_model ();
-
- aarch64_build_bitmask_table ();
-
- /* This target defaults to strict volatile bitfields. */
- if (flag_strict_volatile_bitfields < 0 && abi_version_at_least (2))
- flag_strict_volatile_bitfields = 1;
-
- /* If the user did not specify a processor, choose the default
- one for them. This will be the CPU set during configuration using
- --with-cpu, otherwise it is "generic". */
- if (!selected_cpu)
- {
- selected_cpu = &all_cores[TARGET_CPU_DEFAULT & 0x3f];
- aarch64_isa_flags = TARGET_CPU_DEFAULT >> 6;
- }
-
- gcc_assert (selected_cpu);
-
- /* The selected cpu may be an architecture, so lookup tuning by core ID. */
- if (!selected_tune)
- selected_tune = &all_cores[selected_cpu->core];
-
- aarch64_tune_flags = selected_tune->flags;
- aarch64_tune = selected_tune->core;
- aarch64_tune_params = selected_tune->tune;
-
- aarch64_override_options_after_change ();
-}
-
-/* Implement targetm.override_options_after_change. */
-
-static void
-aarch64_override_options_after_change (void)
-{
- faked_omit_frame_pointer = false;
-
- /* To omit leaf frame pointers, we need to turn flag_omit_frame_pointer on so
- that aarch64_frame_pointer_required will be called. We need to remember
- whether flag_omit_frame_pointer was turned on normally or just faked. */
-
- if (flag_omit_leaf_frame_pointer && !flag_omit_frame_pointer)
- {
- flag_omit_frame_pointer = true;
- faked_omit_frame_pointer = true;
- }
-}
-
-static struct machine_function *
-aarch64_init_machine_status (void)
-{
- struct machine_function *machine;
- machine = ggc_alloc_cleared_machine_function ();
- return machine;
-}
-
-void
-aarch64_init_expanders (void)
-{
- init_machine_status = aarch64_init_machine_status;
-}
-
-/* A checking mechanism for the implementation of the various code models. */
-static void
-initialize_aarch64_code_model (void)
-{
- if (flag_pic)
- {
- switch (aarch64_cmodel_var)
- {
- case AARCH64_CMODEL_TINY:
- aarch64_cmodel = AARCH64_CMODEL_TINY_PIC;
- break;
- case AARCH64_CMODEL_SMALL:
- aarch64_cmodel = AARCH64_CMODEL_SMALL_PIC;
- break;
- case AARCH64_CMODEL_LARGE:
- sorry ("code model %qs with -f%s", "large",
- flag_pic > 1 ? "PIC" : "pic");
- default:
- gcc_unreachable ();
- }
- }
- else
- aarch64_cmodel = aarch64_cmodel_var;
-}
-
-/* Return true if SYMBOL_REF X binds locally. */
-
-static bool
-aarch64_symbol_binds_local_p (const_rtx x)
-{
- return (SYMBOL_REF_DECL (x)
- ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
- : SYMBOL_REF_LOCAL_P (x));
-}
-
-/* Return true if SYMBOL_REF X is thread local */
-static bool
-aarch64_tls_symbol_p (rtx x)
-{
- if (! TARGET_HAVE_TLS)
- return false;
-
- if (GET_CODE (x) != SYMBOL_REF)
- return false;
-
- return SYMBOL_REF_TLS_MODEL (x) != 0;
-}
-
-/* Classify a TLS symbol into one of the TLS kinds. */
-enum aarch64_symbol_type
-aarch64_classify_tls_symbol (rtx x)
-{
- enum tls_model tls_kind = tls_symbolic_operand_type (x);
-
- switch (tls_kind)
- {
- case TLS_MODEL_GLOBAL_DYNAMIC:
- case TLS_MODEL_LOCAL_DYNAMIC:
- return TARGET_TLS_DESC ? SYMBOL_SMALL_TLSDESC : SYMBOL_SMALL_TLSGD;
-
- case TLS_MODEL_INITIAL_EXEC:
- return SYMBOL_SMALL_GOTTPREL;
-
- case TLS_MODEL_LOCAL_EXEC:
- return SYMBOL_SMALL_TPREL;
-
- case TLS_MODEL_EMULATED:
- case TLS_MODEL_NONE:
- return SYMBOL_FORCE_TO_MEM;
-
- default:
- gcc_unreachable ();
- }
-}
-
-/* Return the method that should be used to access SYMBOL_REF or
- LABEL_REF X in context CONTEXT. */
-enum aarch64_symbol_type
-aarch64_classify_symbol (rtx x,
- enum aarch64_symbol_context context ATTRIBUTE_UNUSED)
-{
- if (GET_CODE (x) == LABEL_REF)
- {
- switch (aarch64_cmodel)
- {
- case AARCH64_CMODEL_LARGE:
- return SYMBOL_FORCE_TO_MEM;
-
- case AARCH64_CMODEL_TINY_PIC:
- case AARCH64_CMODEL_TINY:
- case AARCH64_CMODEL_SMALL_PIC:
- case AARCH64_CMODEL_SMALL:
- return SYMBOL_SMALL_ABSOLUTE;
-
- default:
- gcc_unreachable ();
- }
- }
-
- gcc_assert (GET_CODE (x) == SYMBOL_REF);
-
- switch (aarch64_cmodel)
- {
- case AARCH64_CMODEL_LARGE:
- return SYMBOL_FORCE_TO_MEM;
-
- case AARCH64_CMODEL_TINY:
- case AARCH64_CMODEL_SMALL:
-
- /* This is needed to get DFmode, TImode constants to be loaded off
- the constant pool. Is it necessary to dump TImode values into
- the constant pool. We don't handle TImode constant loads properly
- yet and hence need to use the constant pool. */
- if (CONSTANT_POOL_ADDRESS_P (x))
- return SYMBOL_FORCE_TO_MEM;
-
- if (aarch64_tls_symbol_p (x))
- return aarch64_classify_tls_symbol (x);
-
- if (SYMBOL_REF_WEAK (x))
- return SYMBOL_FORCE_TO_MEM;
-
- return SYMBOL_SMALL_ABSOLUTE;
-
- case AARCH64_CMODEL_TINY_PIC:
- case AARCH64_CMODEL_SMALL_PIC:
-
- if (CONSTANT_POOL_ADDRESS_P (x))
- return SYMBOL_FORCE_TO_MEM;
-
- if (aarch64_tls_symbol_p (x))
- return aarch64_classify_tls_symbol (x);
-
- if (!aarch64_symbol_binds_local_p (x))
- return SYMBOL_SMALL_GOT;
-
- return SYMBOL_SMALL_ABSOLUTE;
-
- default:
- gcc_unreachable ();
- }
- /* By default push everything into the constant pool. */
- return SYMBOL_FORCE_TO_MEM;
-}
-
-/* Return true if X is a symbolic constant that can be used in context
- CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
-
-bool
-aarch64_symbolic_constant_p (rtx x, enum aarch64_symbol_context context,
- enum aarch64_symbol_type *symbol_type)
-{
- rtx offset;
- split_const (x, &x, &offset);
- if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
- *symbol_type = aarch64_classify_symbol (x, context);
- else
- return false;
-
- /* No checking of offset at this point. */
- return true;
-}
-
-bool
-aarch64_constant_address_p (rtx x)
-{
- return (CONSTANT_P (x) && memory_address_p (DImode, x));
-}
-
-bool
-aarch64_legitimate_pic_operand_p (rtx x)
-{
- if (GET_CODE (x) == SYMBOL_REF
- || (GET_CODE (x) == CONST
- && GET_CODE (XEXP (x, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
- return false;
-
- return true;
-}
-
-/* Return true if X holds either a quarter-precision or
- floating-point +0.0 constant. */
-static bool
-aarch64_valid_floating_const (enum machine_mode mode, rtx x)
-{
- if (!CONST_DOUBLE_P (x))
- return false;
-
- /* TODO: We could handle moving 0.0 to a TFmode register,
- but first we would like to refactor the movtf_aarch64
- to be more amicable to split moves properly and
- correctly gate on TARGET_SIMD. For now - reject all
- constants which are not to SFmode or DFmode registers. */
- if (!(mode == SFmode || mode == DFmode))
- return false;
-
- if (aarch64_float_const_zero_rtx_p (x))
- return true;
- return aarch64_float_const_representable_p (x);
-}
-
-static bool
-aarch64_legitimate_constant_p (enum machine_mode mode, rtx x)
-{
- /* Do not allow vector struct mode constants. We could support
- 0 and -1 easily, but they need support in aarch64-simd.md. */
- if (TARGET_SIMD && aarch64_vect_struct_mode_p (mode))
- return false;
-
- /* This could probably go away because
- we now decompose CONST_INTs according to expand_mov_immediate. */
- if ((GET_CODE (x) == CONST_VECTOR
- && aarch64_simd_valid_immediate (x, mode, false,
- NULL, NULL, NULL, NULL, NULL) != -1)
- || CONST_INT_P (x) || aarch64_valid_floating_const (mode, x))
- return !targetm.cannot_force_const_mem (mode, x);
-
- if (GET_CODE (x) == HIGH
- && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
- return true;
-
- return aarch64_constant_address_p (x);
-}
-
-rtx
-aarch64_load_tp (rtx target)
-{
- if (!target
- || GET_MODE (target) != Pmode
- || !register_operand (target, Pmode))
- target = gen_reg_rtx (Pmode);
-
- /* Can return in any reg. */
- emit_insn (gen_aarch64_load_tp_hard (target));
- return target;
-}
-
-/* On AAPCS systems, this is the "struct __va_list". */
-static GTY(()) tree va_list_type;
-
-/* Implement TARGET_BUILD_BUILTIN_VA_LIST.
- Return the type to use as __builtin_va_list.
-
- AAPCS64 \S 7.1.4 requires that va_list be a typedef for a type defined as:
-
- struct __va_list
- {
- void *__stack;
- void *__gr_top;
- void *__vr_top;
- int __gr_offs;
- int __vr_offs;
- }; */
-
-static tree
-aarch64_build_builtin_va_list (void)
-{
- tree va_list_name;
- tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
-
- /* Create the type. */
- va_list_type = lang_hooks.types.make_type (RECORD_TYPE);
- /* Give it the required name. */
- va_list_name = build_decl (BUILTINS_LOCATION,
- TYPE_DECL,
- get_identifier ("__va_list"),
- va_list_type);
- DECL_ARTIFICIAL (va_list_name) = 1;
- TYPE_NAME (va_list_type) = va_list_name;
- TYPE_STUB_DECL (va_list_type) = va_list_name;
-
- /* Create the fields. */
- f_stack = build_decl (BUILTINS_LOCATION,
- FIELD_DECL, get_identifier ("__stack"),
- ptr_type_node);
- f_grtop = build_decl (BUILTINS_LOCATION,
- FIELD_DECL, get_identifier ("__gr_top"),
- ptr_type_node);
- f_vrtop = build_decl (BUILTINS_LOCATION,
- FIELD_DECL, get_identifier ("__vr_top"),
- ptr_type_node);
- f_groff = build_decl (BUILTINS_LOCATION,
- FIELD_DECL, get_identifier ("__gr_offs"),
- integer_type_node);
- f_vroff = build_decl (BUILTINS_LOCATION,
- FIELD_DECL, get_identifier ("__vr_offs"),
- integer_type_node);
-
- DECL_ARTIFICIAL (f_stack) = 1;
- DECL_ARTIFICIAL (f_grtop) = 1;
- DECL_ARTIFICIAL (f_vrtop) = 1;
- DECL_ARTIFICIAL (f_groff) = 1;
- DECL_ARTIFICIAL (f_vroff) = 1;
-
- DECL_FIELD_CONTEXT (f_stack) = va_list_type;
- DECL_FIELD_CONTEXT (f_grtop) = va_list_type;
- DECL_FIELD_CONTEXT (f_vrtop) = va_list_type;
- DECL_FIELD_CONTEXT (f_groff) = va_list_type;
- DECL_FIELD_CONTEXT (f_vroff) = va_list_type;
-
- TYPE_FIELDS (va_list_type) = f_stack;
- DECL_CHAIN (f_stack) = f_grtop;
- DECL_CHAIN (f_grtop) = f_vrtop;
- DECL_CHAIN (f_vrtop) = f_groff;
- DECL_CHAIN (f_groff) = f_vroff;
-
- /* Compute its layout. */
- layout_type (va_list_type);
-
- return va_list_type;
-}
-
-/* Implement TARGET_EXPAND_BUILTIN_VA_START. */
-static void
-aarch64_expand_builtin_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
-{
- const CUMULATIVE_ARGS *cum;
- tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
- tree stack, grtop, vrtop, groff, vroff;
- tree t;
- int gr_save_area_size;
- int vr_save_area_size;
- int vr_offset;
-
- cum = &crtl->args.info;
- gr_save_area_size
- = (NUM_ARG_REGS - cum->aapcs_ncrn) * UNITS_PER_WORD;
- vr_save_area_size
- = (NUM_FP_ARG_REGS - cum->aapcs_nvrn) * UNITS_PER_VREG;
-
- if (TARGET_GENERAL_REGS_ONLY)
- {
- if (cum->aapcs_nvrn > 0)
- sorry ("%qs and floating point or vector arguments",
- "-mgeneral-regs-only");
- vr_save_area_size = 0;
- }
-
- f_stack = TYPE_FIELDS (va_list_type_node);
- f_grtop = DECL_CHAIN (f_stack);
- f_vrtop = DECL_CHAIN (f_grtop);
- f_groff = DECL_CHAIN (f_vrtop);
- f_vroff = DECL_CHAIN (f_groff);
-
- stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), valist, f_stack,
- NULL_TREE);
- grtop = build3 (COMPONENT_REF, TREE_TYPE (f_grtop), valist, f_grtop,
- NULL_TREE);
- vrtop = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop), valist, f_vrtop,
- NULL_TREE);
- groff = build3 (COMPONENT_REF, TREE_TYPE (f_groff), valist, f_groff,
- NULL_TREE);
- vroff = build3 (COMPONENT_REF, TREE_TYPE (f_vroff), valist, f_vroff,
- NULL_TREE);
-
- /* Emit code to initialize STACK, which points to the next varargs stack
- argument. CUM->AAPCS_STACK_SIZE gives the number of stack words used
- by named arguments. STACK is 8-byte aligned. */
- t = make_tree (TREE_TYPE (stack), virtual_incoming_args_rtx);
- if (cum->aapcs_stack_size > 0)
- t = fold_build_pointer_plus_hwi (t, cum->aapcs_stack_size * UNITS_PER_WORD);
- t = build2 (MODIFY_EXPR, TREE_TYPE (stack), stack, t);
- expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
-
- /* Emit code to initialize GRTOP, the top of the GR save area.
- virtual_incoming_args_rtx should have been 16 byte aligned. */
- t = make_tree (TREE_TYPE (grtop), virtual_incoming_args_rtx);
- t = build2 (MODIFY_EXPR, TREE_TYPE (grtop), grtop, t);
- expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
-
- /* Emit code to initialize VRTOP, the top of the VR save area.
- This address is gr_save_area_bytes below GRTOP, rounded
- down to the next 16-byte boundary. */
- t = make_tree (TREE_TYPE (vrtop), virtual_incoming_args_rtx);
- vr_offset = AARCH64_ROUND_UP (gr_save_area_size,
- STACK_BOUNDARY / BITS_PER_UNIT);
-
- if (vr_offset)
- t = fold_build_pointer_plus_hwi (t, -vr_offset);
- t = build2 (MODIFY_EXPR, TREE_TYPE (vrtop), vrtop, t);
- expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
-
- /* Emit code to initialize GROFF, the offset from GRTOP of the
- next GPR argument. */
- t = build2 (MODIFY_EXPR, TREE_TYPE (groff), groff,
- build_int_cst (TREE_TYPE (groff), -gr_save_area_size));
- expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
-
- /* Likewise emit code to initialize VROFF, the offset from FTOP
- of the next VR argument. */
- t = build2 (MODIFY_EXPR, TREE_TYPE (vroff), vroff,
- build_int_cst (TREE_TYPE (vroff), -vr_save_area_size));
- expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
-}
-
-/* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
-
-static tree
-aarch64_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
- gimple_seq *post_p ATTRIBUTE_UNUSED)
-{
- tree addr;
- bool indirect_p;
- bool is_ha; /* is HFA or HVA. */
- bool dw_align; /* double-word align. */
- enum machine_mode ag_mode = VOIDmode;
- int nregs;
- enum machine_mode mode;
-
- tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff;
- tree stack, f_top, f_off, off, arg, roundup, on_stack;
- HOST_WIDE_INT size, rsize, adjust, align;
- tree t, u, cond1, cond2;
-
- indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
- if (indirect_p)
- type = build_pointer_type (type);
-
- mode = TYPE_MODE (type);
-
- f_stack = TYPE_FIELDS (va_list_type_node);
- f_grtop = DECL_CHAIN (f_stack);
- f_vrtop = DECL_CHAIN (f_grtop);
- f_groff = DECL_CHAIN (f_vrtop);
- f_vroff = DECL_CHAIN (f_groff);
-
- stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), unshare_expr (valist),
- f_stack, NULL_TREE);
- size = int_size_in_bytes (type);
- align = aarch64_function_arg_alignment (mode, type) / BITS_PER_UNIT;
-
- dw_align = false;
- adjust = 0;
- if (aarch64_vfp_is_call_or_return_candidate (mode,
- type,
- &ag_mode,
- &nregs,
- &is_ha))
- {
- /* TYPE passed in fp/simd registers. */
- if (TARGET_GENERAL_REGS_ONLY)
- sorry ("%qs and floating point or vector arguments",
- "-mgeneral-regs-only");
-
- f_top = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop),
- unshare_expr (valist), f_vrtop, NULL_TREE);
- f_off = build3 (COMPONENT_REF, TREE_TYPE (f_vroff),
- unshare_expr (valist), f_vroff, NULL_TREE);
-
- rsize = nregs * UNITS_PER_VREG;
-
- if (is_ha)
- {
- if (BYTES_BIG_ENDIAN && GET_MODE_SIZE (ag_mode) < UNITS_PER_VREG)
- adjust = UNITS_PER_VREG - GET_MODE_SIZE (ag_mode);
- }
- else if (BLOCK_REG_PADDING (mode, type, 1) == downward
- && size < UNITS_PER_VREG)
- {
- adjust = UNITS_PER_VREG - size;
- }
- }
- else
- {
- /* TYPE passed in general registers. */
- f_top = build3 (COMPONENT_REF, TREE_TYPE (f_grtop),
- unshare_expr (valist), f_grtop, NULL_TREE);
- f_off = build3 (COMPONENT_REF, TREE_TYPE (f_groff),
- unshare_expr (valist), f_groff, NULL_TREE);
- rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
- nregs = rsize / UNITS_PER_WORD;
-
- if (align > 8)
- dw_align = true;
-
- if (BLOCK_REG_PADDING (mode, type, 1) == downward
- && size < UNITS_PER_WORD)
- {
- adjust = UNITS_PER_WORD - size;
- }
- }
-
- /* Get a local temporary for the field value. */
- off = get_initialized_tmp_var (f_off, pre_p, NULL);
-
- /* Emit code to branch if off >= 0. */
- t = build2 (GE_EXPR, boolean_type_node, off,
- build_int_cst (TREE_TYPE (off), 0));
- cond1 = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
-
- if (dw_align)
- {
- /* Emit: offs = (offs + 15) & -16. */
- t = build2 (PLUS_EXPR, TREE_TYPE (off), off,
- build_int_cst (TREE_TYPE (off), 15));
- t = build2 (BIT_AND_EXPR, TREE_TYPE (off), t,
- build_int_cst (TREE_TYPE (off), -16));
- roundup = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t);
- }
- else
- roundup = NULL;
-
- /* Update ap.__[g|v]r_offs */
- t = build2 (PLUS_EXPR, TREE_TYPE (off), off,
- build_int_cst (TREE_TYPE (off), rsize));
- t = build2 (MODIFY_EXPR, TREE_TYPE (f_off), unshare_expr (f_off), t);
-
- /* String up. */
- if (roundup)
- t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t);
-
- /* [cond2] if (ap.__[g|v]r_offs > 0) */
- u = build2 (GT_EXPR, boolean_type_node, unshare_expr (f_off),
- build_int_cst (TREE_TYPE (f_off), 0));
- cond2 = build3 (COND_EXPR, ptr_type_node, u, NULL_TREE, NULL_TREE);
-
- /* String up: make sure the assignment happens before the use. */
- t = build2 (COMPOUND_EXPR, TREE_TYPE (cond2), t, cond2);
- COND_EXPR_ELSE (cond1) = t;
-
- /* Prepare the trees handling the argument that is passed on the stack;
- the top level node will store in ON_STACK. */
- arg = get_initialized_tmp_var (stack, pre_p, NULL);
- if (align > 8)
- {
- /* if (alignof(type) > 8) (arg = arg + 15) & -16; */
- t = fold_convert (intDI_type_node, arg);
- t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
- build_int_cst (TREE_TYPE (t), 15));
- t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
- build_int_cst (TREE_TYPE (t), -16));
- t = fold_convert (TREE_TYPE (arg), t);
- roundup = build2 (MODIFY_EXPR, TREE_TYPE (arg), arg, t);
- }
- else
- roundup = NULL;
- /* Advance ap.__stack */
- t = fold_convert (intDI_type_node, arg);
- t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
- build_int_cst (TREE_TYPE (t), size + 7));
- t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
- build_int_cst (TREE_TYPE (t), -8));
- t = fold_convert (TREE_TYPE (arg), t);
- t = build2 (MODIFY_EXPR, TREE_TYPE (stack), unshare_expr (stack), t);
- /* String up roundup and advance. */
- if (roundup)
- t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t);
- /* String up with arg */
- on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), t, arg);
- /* Big-endianness related address adjustment. */
- if (BLOCK_REG_PADDING (mode, type, 1) == downward
- && size < UNITS_PER_WORD)
- {
- t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (arg), arg,
- size_int (UNITS_PER_WORD - size));
- on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), on_stack, t);
- }
-
- COND_EXPR_THEN (cond1) = unshare_expr (on_stack);
- COND_EXPR_THEN (cond2) = unshare_expr (on_stack);
-
- /* Adjustment to OFFSET in the case of BIG_ENDIAN. */
- t = off;
- if (adjust)
- t = build2 (PREINCREMENT_EXPR, TREE_TYPE (off), off,
- build_int_cst (TREE_TYPE (off), adjust));
-
- t = fold_convert (sizetype, t);
- t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (f_top), f_top, t);
-
- if (is_ha)
- {
- /* type ha; // treat as "struct {ftype field[n];}"
- ... [computing offs]
- for (i = 0; i <nregs; ++i, offs += 16)
- ha.field[i] = *((ftype *)(ap.__vr_top + offs));
- return ha; */
- int i;
- tree tmp_ha, field_t, field_ptr_t;
-
- /* Declare a local variable. */
- tmp_ha = create_tmp_var_raw (type, "ha");
- gimple_add_tmp_var (tmp_ha);
-
- /* Establish the base type. */
- switch (ag_mode)
- {
- case SFmode:
- field_t = float_type_node;
- field_ptr_t = float_ptr_type_node;
- break;
- case DFmode:
- field_t = double_type_node;
- field_ptr_t = double_ptr_type_node;
- break;
- case TFmode:
- field_t = long_double_type_node;
- field_ptr_t = long_double_ptr_type_node;
- break;
-/* The half precision and quad precision are not fully supported yet. Enable
- the following code after the support is complete. Need to find the correct
- type node for __fp16 *. */
-#if 0
- case HFmode:
- field_t = float_type_node;
- field_ptr_t = float_ptr_type_node;
- break;
-#endif
- case V2SImode:
- case V4SImode:
- {
- tree innertype = make_signed_type (GET_MODE_PRECISION (SImode));
- field_t = build_vector_type_for_mode (innertype, ag_mode);
- field_ptr_t = build_pointer_type (field_t);
- }
- break;
- default:
- gcc_assert (0);
- }
-
- /* *(field_ptr_t)&ha = *((field_ptr_t)vr_saved_area */
- tmp_ha = build1 (ADDR_EXPR, field_ptr_t, tmp_ha);
- addr = t;
- t = fold_convert (field_ptr_t, addr);
- t = build2 (MODIFY_EXPR, field_t,
- build1 (INDIRECT_REF, field_t, tmp_ha),
- build1 (INDIRECT_REF, field_t, t));
-
- /* ha.field[i] = *((field_ptr_t)vr_saved_area + i) */
- for (i = 1; i < nregs; ++i)
- {
- addr = fold_build_pointer_plus_hwi (addr, UNITS_PER_VREG);
- u = fold_convert (field_ptr_t, addr);
- u = build2 (MODIFY_EXPR, field_t,
- build2 (MEM_REF, field_t, tmp_ha,
- build_int_cst (field_ptr_t,
- (i *
- int_size_in_bytes (field_t)))),
- build1 (INDIRECT_REF, field_t, u));
- t = build2 (COMPOUND_EXPR, TREE_TYPE (t), t, u);
- }
-
- u = fold_convert (TREE_TYPE (f_top), tmp_ha);
- t = build2 (COMPOUND_EXPR, TREE_TYPE (f_top), t, u);
- }
-
- COND_EXPR_ELSE (cond2) = t;
- addr = fold_convert (build_pointer_type (type), cond1);
- addr = build_va_arg_indirect_ref (addr);
-
- if (indirect_p)
- addr = build_va_arg_indirect_ref (addr);
-
- return addr;
-}
-
-/* Implement TARGET_SETUP_INCOMING_VARARGS. */
-
-static void
-aarch64_setup_incoming_varargs (cumulative_args_t cum_v, enum machine_mode mode,
- tree type, int *pretend_size ATTRIBUTE_UNUSED,
- int no_rtl)
-{
- CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
- CUMULATIVE_ARGS local_cum;
- int gr_saved, vr_saved;
-
- /* The caller has advanced CUM up to, but not beyond, the last named
- argument. Advance a local copy of CUM past the last "real" named
- argument, to find out how many registers are left over. */
- local_cum = *cum;
- aarch64_function_arg_advance (pack_cumulative_args(&local_cum), mode, type, true);
-
- /* Found out how many registers we need to save. */
- gr_saved = NUM_ARG_REGS - local_cum.aapcs_ncrn;
- vr_saved = NUM_FP_ARG_REGS - local_cum.aapcs_nvrn;
-
- if (TARGET_GENERAL_REGS_ONLY)
- {
- if (local_cum.aapcs_nvrn > 0)
- sorry ("%qs and floating point or vector arguments",
- "-mgeneral-regs-only");
- vr_saved = 0;
- }
-
- if (!no_rtl)
- {
- if (gr_saved > 0)
- {
- rtx ptr, mem;
-
- /* virtual_incoming_args_rtx should have been 16-byte aligned. */
- ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
- - gr_saved * UNITS_PER_WORD);
- mem = gen_frame_mem (BLKmode, ptr);
- set_mem_alias_set (mem, get_varargs_alias_set ());
-
- move_block_from_reg (local_cum.aapcs_ncrn + R0_REGNUM,
- mem, gr_saved);
- }
- if (vr_saved > 0)
- {
- /* We can't use move_block_from_reg, because it will use
- the wrong mode, storing D regs only. */
- enum machine_mode mode = TImode;
- int off, i;
-
- /* Set OFF to the offset from virtual_incoming_args_rtx of
- the first vector register. The VR save area lies below
- the GR one, and is aligned to 16 bytes. */
- off = -AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD,
- STACK_BOUNDARY / BITS_PER_UNIT);
- off -= vr_saved * UNITS_PER_VREG;
-
- for (i = local_cum.aapcs_nvrn; i < NUM_FP_ARG_REGS; ++i)
- {
- rtx ptr, mem;
-
- ptr = plus_constant (Pmode, virtual_incoming_args_rtx, off);
- mem = gen_frame_mem (mode, ptr);
- set_mem_alias_set (mem, get_varargs_alias_set ());
- aarch64_emit_move (mem, gen_rtx_REG (mode, V0_REGNUM + i));
- off += UNITS_PER_VREG;
- }
- }
- }
-
- /* We don't save the size into *PRETEND_SIZE because we want to avoid
- any complication of having crtl->args.pretend_args_size changed. */
- cfun->machine->saved_varargs_size
- = (AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD,
- STACK_BOUNDARY / BITS_PER_UNIT)
- + vr_saved * UNITS_PER_VREG);
-}
-
-static void
-aarch64_conditional_register_usage (void)
-{
- int i;
- if (!TARGET_FLOAT)
- {
- for (i = V0_REGNUM; i <= V31_REGNUM; i++)
- {
- fixed_regs[i] = 1;
- call_used_regs[i] = 1;
- }
- }
-}
-
-/* Walk down the type tree of TYPE counting consecutive base elements.
- If *MODEP is VOIDmode, then set it to the first valid floating point
- type. If a non-floating point type is found, or if a floating point
- type that doesn't match a non-VOIDmode *MODEP is found, then return -1,
- otherwise return the count in the sub-tree. */
-static int
-aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
-{
- enum machine_mode mode;
- HOST_WIDE_INT size;
-
- switch (TREE_CODE (type))
- {
- case REAL_TYPE:
- mode = TYPE_MODE (type);
- if (mode != DFmode && mode != SFmode && mode != TFmode)
- return -1;
-
- if (*modep == VOIDmode)
- *modep = mode;
-
- if (*modep == mode)
- return 1;
-
- break;
-
- case COMPLEX_TYPE:
- mode = TYPE_MODE (TREE_TYPE (type));
- if (mode != DFmode && mode != SFmode && mode != TFmode)
- return -1;
-
- if (*modep == VOIDmode)
- *modep = mode;
-
- if (*modep == mode)
- return 2;
-
- break;
-
- case VECTOR_TYPE:
- /* Use V2SImode and V4SImode as representatives of all 64-bit
- and 128-bit vector types. */
- size = int_size_in_bytes (type);
- switch (size)
- {
- case 8:
- mode = V2SImode;
- break;
- case 16:
- mode = V4SImode;
- break;
- default:
- return -1;
- }
-
- if (*modep == VOIDmode)
- *modep = mode;
-
- /* Vector modes are considered to be opaque: two vectors are
- equivalent for the purposes of being homogeneous aggregates
- if they are the same size. */
- if (*modep == mode)
- return 1;
-
- break;
-
- case ARRAY_TYPE:
- {
- int count;
- tree index = TYPE_DOMAIN (type);
-
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
- return -1;
-
- count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep);
- if (count == -1
- || !index
- || !TYPE_MAX_VALUE (index)
- || !host_integerp (TYPE_MAX_VALUE (index), 1)
- || !TYPE_MIN_VALUE (index)
- || !host_integerp (TYPE_MIN_VALUE (index), 1)
- || count < 0)
- return -1;
-
- count *= (1 + tree_low_cst (TYPE_MAX_VALUE (index), 1)
- - tree_low_cst (TYPE_MIN_VALUE (index), 1));
-
- /* There must be no padding. */
- if (!host_integerp (TYPE_SIZE (type), 1)
- || (tree_low_cst (TYPE_SIZE (type), 1)
- != count * GET_MODE_BITSIZE (*modep)))
- return -1;
-
- return count;
- }
-
- case RECORD_TYPE:
- {
- int count = 0;
- int sub_count;
- tree field;
-
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
- return -1;
-
- for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
- {
- if (TREE_CODE (field) != FIELD_DECL)
- continue;
-
- sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
- if (sub_count < 0)
- return -1;
- count += sub_count;
- }
-
- /* There must be no padding. */
- if (!host_integerp (TYPE_SIZE (type), 1)
- || (tree_low_cst (TYPE_SIZE (type), 1)
- != count * GET_MODE_BITSIZE (*modep)))
- return -1;
-
- return count;
- }
-
- case UNION_TYPE:
- case QUAL_UNION_TYPE:
- {
- /* These aren't very interesting except in a degenerate case. */
- int count = 0;
- int sub_count;
- tree field;
-
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
- return -1;
-
- for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
- {
- if (TREE_CODE (field) != FIELD_DECL)
- continue;
-
- sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
- if (sub_count < 0)
- return -1;
- count = count > sub_count ? count : sub_count;
- }
-
- /* There must be no padding. */
- if (!host_integerp (TYPE_SIZE (type), 1)
- || (tree_low_cst (TYPE_SIZE (type), 1)
- != count * GET_MODE_BITSIZE (*modep)))
- return -1;
-
- return count;
- }
-
- default:
- break;
- }
-
- return -1;
-}
-
-/* Return TRUE if the type, as described by TYPE and MODE, is a composite
- type as described in AAPCS64 \S 4.3. This includes aggregate, union and
- array types. The C99 floating-point complex types are also considered
- as composite types, according to AAPCS64 \S 7.1.1. The complex integer
- types, which are GCC extensions and out of the scope of AAPCS64, are
- treated as composite types here as well.
-
- Note that MODE itself is not sufficient in determining whether a type
- is such a composite type or not. This is because
- stor-layout.c:compute_record_mode may have already changed the MODE
- (BLKmode) of a RECORD_TYPE TYPE to some other mode. For example, a
- structure with only one field may have its MODE set to the mode of the
- field. Also an integer mode whose size matches the size of the
- RECORD_TYPE type may be used to substitute the original mode
- (i.e. BLKmode) in certain circumstances. In other words, MODE cannot be
- solely relied on. */
-
-static bool
-aarch64_composite_type_p (const_tree type,
- enum machine_mode mode)
-{
- if (type && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE))
- return true;
-
- if (mode == BLKmode
- || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
- || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
- return true;
-
- return false;
-}
-
-/* Return TRUE if the type, as described by TYPE and MODE, is a short vector
- type as described in AAPCS64 \S 4.1.2.
-
- See the comment above aarch64_composite_type_p for the notes on MODE. */
-
-static bool
-aarch64_short_vector_p (const_tree type,
- enum machine_mode mode)
-{
- HOST_WIDE_INT size = -1;
-
- if (type && TREE_CODE (type) == VECTOR_TYPE)
- size = int_size_in_bytes (type);
- else if (!aarch64_composite_type_p (type, mode)
- && (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
- || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT))
- size = GET_MODE_SIZE (mode);
-
- return (size == 8 || size == 16) ? true : false;
-}
-
-/* Return TRUE if an argument, whose type is described by TYPE and MODE,
- shall be passed or returned in simd/fp register(s) (providing these
- parameter passing registers are available).
-
- Upon successful return, *COUNT returns the number of needed registers,
- *BASE_MODE returns the mode of the individual register and when IS_HAF
- is not NULL, *IS_HA indicates whether or not the argument is a homogeneous
- floating-point aggregate or a homogeneous short-vector aggregate. */
-
-static bool
-aarch64_vfp_is_call_or_return_candidate (enum machine_mode mode,
- const_tree type,
- enum machine_mode *base_mode,
- int *count,
- bool *is_ha)
-{
- enum machine_mode new_mode = VOIDmode;
- bool composite_p = aarch64_composite_type_p (type, mode);
-
- if (is_ha != NULL) *is_ha = false;
-
- if ((!composite_p && GET_MODE_CLASS (mode) == MODE_FLOAT)
- || aarch64_short_vector_p (type, mode))
- {
- *count = 1;
- new_mode = mode;
- }
- else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
- {
- if (is_ha != NULL) *is_ha = true;
- *count = 2;
- new_mode = GET_MODE_INNER (mode);
- }
- else if (type && composite_p)
- {
- int ag_count = aapcs_vfp_sub_candidate (type, &new_mode);
-
- if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
- {
- if (is_ha != NULL) *is_ha = true;
- *count = ag_count;
- }
- else
- return false;
- }
- else
- return false;
-
- *base_mode = new_mode;
- return true;
-}
-
-/* Implement TARGET_STRUCT_VALUE_RTX. */
-
-static rtx
-aarch64_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
- int incoming ATTRIBUTE_UNUSED)
-{
- return gen_rtx_REG (Pmode, AARCH64_STRUCT_VALUE_REGNUM);
-}
-
-/* Implements target hook vector_mode_supported_p. */
-static bool
-aarch64_vector_mode_supported_p (enum machine_mode mode)
-{
- if (TARGET_SIMD
- && (mode == V4SImode || mode == V8HImode
- || mode == V16QImode || mode == V2DImode
- || mode == V2SImode || mode == V4HImode
- || mode == V8QImode || mode == V2SFmode
- || mode == V4SFmode || mode == V2DFmode))
- return true;
-
- return false;
-}
-
-/* Return quad mode as the preferred SIMD mode. */
-static enum machine_mode
-aarch64_preferred_simd_mode (enum machine_mode mode)
-{
- if (TARGET_SIMD)
- switch (mode)
- {
- case DFmode:
- return V2DFmode;
- case SFmode:
- return V4SFmode;
- case SImode:
- return V4SImode;
- case HImode:
- return V8HImode;
- case QImode:
- return V16QImode;
- case DImode:
- return V2DImode;
- break;
-
- default:;
- }
- return word_mode;
-}
-
-/* Return the bitmask of possible vector sizes for the vectorizer
- to iterate over. */
-static unsigned int
-aarch64_autovectorize_vector_sizes (void)
-{
- return (16 | 8);
-}
-
-/* A table to help perform AArch64-specific name mangling for AdvSIMD
- vector types in order to conform to the AAPCS64 (see "Procedure
- Call Standard for the ARM 64-bit Architecture", Appendix A). To
- qualify for emission with the mangled names defined in that document,
- a vector type must not only be of the correct mode but also be
- composed of AdvSIMD vector element types (e.g.
- _builtin_aarch64_simd_qi); these types are registered by
- aarch64_init_simd_builtins (). In other words, vector types defined
- in other ways e.g. via vector_size attribute will get default
- mangled names. */
-typedef struct
-{
- enum machine_mode mode;
- const char *element_type_name;
- const char *mangled_name;
-} aarch64_simd_mangle_map_entry;
-
-static aarch64_simd_mangle_map_entry aarch64_simd_mangle_map[] = {
- /* 64-bit containerized types. */
- { V8QImode, "__builtin_aarch64_simd_qi", "10__Int8x8_t" },
- { V8QImode, "__builtin_aarch64_simd_uqi", "11__Uint8x8_t" },
- { V4HImode, "__builtin_aarch64_simd_hi", "11__Int16x4_t" },
- { V4HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x4_t" },
- { V2SImode, "__builtin_aarch64_simd_si", "11__Int32x2_t" },
- { V2SImode, "__builtin_aarch64_simd_usi", "12__Uint32x2_t" },
- { V2SFmode, "__builtin_aarch64_simd_sf", "13__Float32x2_t" },
- { V8QImode, "__builtin_aarch64_simd_poly8", "11__Poly8x8_t" },
- { V4HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x4_t" },
- /* 128-bit containerized types. */
- { V16QImode, "__builtin_aarch64_simd_qi", "11__Int8x16_t" },
- { V16QImode, "__builtin_aarch64_simd_uqi", "12__Uint8x16_t" },
- { V8HImode, "__builtin_aarch64_simd_hi", "11__Int16x8_t" },
- { V8HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x8_t" },
- { V4SImode, "__builtin_aarch64_simd_si", "11__Int32x4_t" },
- { V4SImode, "__builtin_aarch64_simd_usi", "12__Uint32x4_t" },
- { V2DImode, "__builtin_aarch64_simd_di", "11__Int64x2_t" },
- { V2DImode, "__builtin_aarch64_simd_udi", "12__Uint64x2_t" },
- { V4SFmode, "__builtin_aarch64_simd_sf", "13__Float32x4_t" },
- { V2DFmode, "__builtin_aarch64_simd_df", "13__Float64x2_t" },
- { V16QImode, "__builtin_aarch64_simd_poly8", "12__Poly8x16_t" },
- { V8HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x8_t" },
- { VOIDmode, NULL, NULL }
-};
-
-/* Implement TARGET_MANGLE_TYPE. */
-
-static const char *
-aarch64_mangle_type (const_tree type)
-{
- /* The AArch64 ABI documents say that "__va_list" has to be
- managled as if it is in the "std" namespace. */
- if (lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type))
- return "St9__va_list";
-
- /* Check the mode of the vector type, and the name of the vector
- element type, against the table. */
- if (TREE_CODE (type) == VECTOR_TYPE)
- {
- aarch64_simd_mangle_map_entry *pos = aarch64_simd_mangle_map;
-
- while (pos->mode != VOIDmode)
- {
- tree elt_type = TREE_TYPE (type);
-
- if (pos->mode == TYPE_MODE (type)
- && TREE_CODE (TYPE_NAME (elt_type)) == TYPE_DECL
- && !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (elt_type))),
- pos->element_type_name))
- return pos->mangled_name;
-
- pos++;
- }
- }
-
- /* Use the default mangling. */
- return NULL;
-}
-
-/* Return the equivalent letter for size. */
-static unsigned char
-sizetochar (int size)
-{
- switch (size)
- {
- case 64: return 'd';
- case 32: return 's';
- case 16: return 'h';
- case 8 : return 'b';
- default: gcc_unreachable ();
- }
-}
-
-/* Return true iff x is a uniform vector of floating-point
- constants, and the constant can be represented in
- quarter-precision form. Note, as aarch64_float_const_representable
- rejects both +0.0 and -0.0, we will also reject +0.0 and -0.0. */
-static bool
-aarch64_vect_float_const_representable_p (rtx x)
-{
- int i = 0;
- REAL_VALUE_TYPE r0, ri;
- rtx x0, xi;
-
- if (GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_FLOAT)
- return false;
-
- x0 = CONST_VECTOR_ELT (x, 0);
- if (!CONST_DOUBLE_P (x0))
- return false;
-
- REAL_VALUE_FROM_CONST_DOUBLE (r0, x0);
-
- for (i = 1; i < CONST_VECTOR_NUNITS (x); i++)
- {
- xi = CONST_VECTOR_ELT (x, i);
- if (!CONST_DOUBLE_P (xi))
- return false;
-
- REAL_VALUE_FROM_CONST_DOUBLE (ri, xi);
- if (!REAL_VALUES_EQUAL (r0, ri))
- return false;
- }
-
- return aarch64_float_const_representable_p (x0);
-}
-
-/* TODO: This function returns values similar to those
- returned by neon_valid_immediate in gcc/config/arm/arm.c
- but the API here is different enough that these magic numbers
- are not used. It should be sufficient to return true or false. */
-static int
-aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, int inverse,
- rtx *modconst, int *elementwidth,
- unsigned char *elementchar,
- int *mvn, int *shift)
-{
-#define CHECK(STRIDE, ELSIZE, CLASS, TEST, SHIFT, NEG) \
- matches = 1; \
- for (i = 0; i < idx; i += (STRIDE)) \
- if (!(TEST)) \
- matches = 0; \
- if (matches) \
- { \
- immtype = (CLASS); \
- elsize = (ELSIZE); \
- elchar = sizetochar (elsize); \
- eshift = (SHIFT); \
- emvn = (NEG); \
- break; \
- }
-
- unsigned int i, elsize = 0, idx = 0, n_elts = CONST_VECTOR_NUNITS (op);
- unsigned int innersize = GET_MODE_SIZE (GET_MODE_INNER (mode));
- unsigned char bytes[16];
- unsigned char elchar = 0;
- int immtype = -1, matches;
- unsigned int invmask = inverse ? 0xff : 0;
- int eshift, emvn;
-
- if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
- {
- bool simd_imm_zero = aarch64_simd_imm_zero_p (op, mode);
- int elem_width = GET_MODE_BITSIZE (GET_MODE (CONST_VECTOR_ELT (op, 0)));
-
- if (!(simd_imm_zero
- || aarch64_vect_float_const_representable_p (op)))
- return -1;
-
- if (modconst)
- *modconst = CONST_VECTOR_ELT (op, 0);
-
- if (elementwidth)
- *elementwidth = elem_width;
-
- if (elementchar)
- *elementchar = sizetochar (elem_width);
-
- if (shift)
- *shift = 0;
-
- if (simd_imm_zero)
- return 19;
- else
- return 18;
- }
-
- /* Splat vector constant out into a byte vector. */
- for (i = 0; i < n_elts; i++)
- {
- rtx el = CONST_VECTOR_ELT (op, i);
- unsigned HOST_WIDE_INT elpart;
- unsigned int part, parts;
-
- if (GET_CODE (el) == CONST_INT)
- {
- elpart = INTVAL (el);
- parts = 1;
- }
- else if (GET_CODE (el) == CONST_DOUBLE)
- {
- elpart = CONST_DOUBLE_LOW (el);
- parts = 2;
- }
- else
- gcc_unreachable ();
-
- for (part = 0; part < parts; part++)
- {
- unsigned int byte;
- for (byte = 0; byte < innersize; byte++)
- {
- bytes[idx++] = (elpart & 0xff) ^ invmask;
- elpart >>= BITS_PER_UNIT;
- }
- if (GET_CODE (el) == CONST_DOUBLE)
- elpart = CONST_DOUBLE_HIGH (el);
- }
- }
-
- /* Sanity check. */
- gcc_assert (idx == GET_MODE_SIZE (mode));
-
- do
- {
- CHECK (4, 32, 0, bytes[i] == bytes[0] && bytes[i + 1] == 0
- && bytes[i + 2] == 0 && bytes[i + 3] == 0, 0, 0);
-
- CHECK (4, 32, 1, bytes[i] == 0 && bytes[i + 1] == bytes[1]
- && bytes[i + 2] == 0 && bytes[i + 3] == 0, 8, 0);
-
- CHECK (4, 32, 2, bytes[i] == 0 && bytes[i + 1] == 0
- && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 16, 0);
-
- CHECK (4, 32, 3, bytes[i] == 0 && bytes[i + 1] == 0
- && bytes[i + 2] == 0 && bytes[i + 3] == bytes[3], 24, 0);
-
- CHECK (2, 16, 4, bytes[i] == bytes[0] && bytes[i + 1] == 0, 0, 0);
-
- CHECK (2, 16, 5, bytes[i] == 0 && bytes[i + 1] == bytes[1], 8, 0);
-
- CHECK (4, 32, 6, bytes[i] == bytes[0] && bytes[i + 1] == 0xff
- && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 0, 1);
-
- CHECK (4, 32, 7, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
- && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 8, 1);
-
- CHECK (4, 32, 8, bytes[i] == 0xff && bytes[i + 1] == 0xff
- && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 16, 1);
-
- CHECK (4, 32, 9, bytes[i] == 0xff && bytes[i + 1] == 0xff
- && bytes[i + 2] == 0xff && bytes[i + 3] == bytes[3], 24, 1);
-
- CHECK (2, 16, 10, bytes[i] == bytes[0] && bytes[i + 1] == 0xff, 0, 1);
-
- CHECK (2, 16, 11, bytes[i] == 0xff && bytes[i + 1] == bytes[1], 8, 1);
-
- CHECK (4, 32, 12, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
- && bytes[i + 2] == 0 && bytes[i + 3] == 0, 0, 0);
-
- CHECK (4, 32, 13, bytes[i] == 0 && bytes[i + 1] == bytes[1]
- && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 0, 1);
-
- CHECK (4, 32, 14, bytes[i] == 0xff && bytes[i + 1] == 0xff
- && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 0, 0);
-
- CHECK (4, 32, 15, bytes[i] == 0 && bytes[i + 1] == 0
- && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 0, 1);
-
- CHECK (1, 8, 16, bytes[i] == bytes[0], 0, 0);
-
- CHECK (1, 64, 17, (bytes[i] == 0 || bytes[i] == 0xff)
- && bytes[i] == bytes[(i + 8) % idx], 0, 0);
- }
- while (0);
-
- /* TODO: Currently the assembler cannot handle types 12 to 15.
- And there is no way to specify cmode through the compiler.
- Disable them till there is support in the assembler. */
- if (immtype == -1
- || (immtype >= 12 && immtype <= 15)
- || immtype == 18)
- return -1;
-
-
- if (elementwidth)
- *elementwidth = elsize;
-
- if (elementchar)
- *elementchar = elchar;
-
- if (mvn)
- *mvn = emvn;
-
- if (shift)
- *shift = eshift;
-
- if (modconst)
- {
- unsigned HOST_WIDE_INT imm = 0;
-
- /* Un-invert bytes of recognized vector, if necessary. */
- if (invmask != 0)
- for (i = 0; i < idx; i++)
- bytes[i] ^= invmask;
-
- if (immtype == 17)
- {
- /* FIXME: Broken on 32-bit H_W_I hosts. */
- gcc_assert (sizeof (HOST_WIDE_INT) == 8);
-
- for (i = 0; i < 8; i++)
- imm |= (unsigned HOST_WIDE_INT) (bytes[i] ? 0xff : 0)
- << (i * BITS_PER_UNIT);
-
- *modconst = GEN_INT (imm);
- }
- else
- {
- unsigned HOST_WIDE_INT imm = 0;
-
- for (i = 0; i < elsize / BITS_PER_UNIT; i++)
- imm |= (unsigned HOST_WIDE_INT) bytes[i] << (i * BITS_PER_UNIT);
-
- /* Construct 'abcdefgh' because the assembler cannot handle
- generic constants. */
- gcc_assert (shift != NULL && mvn != NULL);
- if (*mvn)
- imm = ~imm;
- imm = (imm >> *shift) & 0xff;
- *modconst = GEN_INT (imm);
- }
- }
-
- return immtype;
-#undef CHECK
-}
-
-/* Return TRUE if rtx X is legal for use as either a AdvSIMD MOVI instruction
- (or, implicitly, MVNI) immediate. Write back width per element
- to *ELEMENTWIDTH, and a modified constant (whatever should be output
- for a MOVI instruction) in *MODCONST. */
-int
-aarch64_simd_immediate_valid_for_move (rtx op, enum machine_mode mode,
- rtx *modconst, int *elementwidth,
- unsigned char *elementchar,
- int *mvn, int *shift)
-{
- rtx tmpconst;
- int tmpwidth;
- unsigned char tmpwidthc;
- int tmpmvn = 0, tmpshift = 0;
- int retval = aarch64_simd_valid_immediate (op, mode, 0, &tmpconst,
- &tmpwidth, &tmpwidthc,
- &tmpmvn, &tmpshift);
-
- if (retval == -1)
- return 0;
-
- if (modconst)
- *modconst = tmpconst;
-
- if (elementwidth)
- *elementwidth = tmpwidth;
-
- if (elementchar)
- *elementchar = tmpwidthc;
-
- if (mvn)
- *mvn = tmpmvn;
-
- if (shift)
- *shift = tmpshift;
-
- return 1;
-}
-
-static bool
-aarch64_const_vec_all_same_int_p (rtx x,
- HOST_WIDE_INT minval,
- HOST_WIDE_INT maxval)
-{
- HOST_WIDE_INT firstval;
- int count, i;
-
- if (GET_CODE (x) != CONST_VECTOR
- || GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_INT)
- return false;
-
- firstval = INTVAL (CONST_VECTOR_ELT (x, 0));
- if (firstval < minval || firstval > maxval)
- return false;
-
- count = CONST_VECTOR_NUNITS (x);
- for (i = 1; i < count; i++)
- if (INTVAL (CONST_VECTOR_ELT (x, i)) != firstval)
- return false;
-
- return true;
-}
-
-/* Check of immediate shift constants are within range. */
-bool
-aarch64_simd_shift_imm_p (rtx x, enum machine_mode mode, bool left)
-{
- int bit_width = GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT;
- if (left)
- return aarch64_const_vec_all_same_int_p (x, 0, bit_width - 1);
- else
- return aarch64_const_vec_all_same_int_p (x, 1, bit_width);
-}
-
-/* Return true if X is a uniform vector where all elements
- are either the floating-point constant 0.0 or the
- integer constant 0. */
-bool
-aarch64_simd_imm_zero_p (rtx x, enum machine_mode mode)
-{
- return x == CONST0_RTX (mode);
-}
-
-bool
-aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
-{
- HOST_WIDE_INT imm = INTVAL (x);
- int i;
-
- for (i = 0; i < 8; i++)
- {
- unsigned int byte = imm & 0xff;
- if (byte != 0xff && byte != 0)
- return false;
- imm >>= 8;
- }
-
- return true;
-}
-
-/* Return a const_int vector of VAL. */
-rtx
-aarch64_simd_gen_const_vector_dup (enum machine_mode mode, int val)
-{
- int nunits = GET_MODE_NUNITS (mode);
- rtvec v = rtvec_alloc (nunits);
- int i;
-
- for (i=0; i < nunits; i++)
- RTVEC_ELT (v, i) = GEN_INT (val);
-
- return gen_rtx_CONST_VECTOR (mode, v);
-}
-
-/* Construct and return a PARALLEL RTX vector. */
-rtx
-aarch64_simd_vect_par_cnst_half (enum machine_mode mode, bool high)
-{
- int nunits = GET_MODE_NUNITS (mode);
- rtvec v = rtvec_alloc (nunits / 2);
- int base = high ? nunits / 2 : 0;
- rtx t1;
- int i;
-
- for (i=0; i < nunits / 2; i++)
- RTVEC_ELT (v, i) = GEN_INT (base + i);
-
- t1 = gen_rtx_PARALLEL (mode, v);
- return t1;
-}
-
-/* Bounds-check lanes. Ensure OPERAND lies between LOW (inclusive) and
- HIGH (exclusive). */
-void
-aarch64_simd_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
-{
- HOST_WIDE_INT lane;
- gcc_assert (GET_CODE (operand) == CONST_INT);
- lane = INTVAL (operand);
-
- if (lane < low || lane >= high)
- error ("lane out of range");
-}
-
-void
-aarch64_simd_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
-{
- gcc_assert (GET_CODE (operand) == CONST_INT);
- HOST_WIDE_INT lane = INTVAL (operand);
-
- if (lane < low || lane >= high)
- error ("constant out of range");
-}
-
-/* Emit code to reinterpret one AdvSIMD type as another,
- without altering bits. */
-void
-aarch64_simd_reinterpret (rtx dest, rtx src)
-{
- emit_move_insn (dest, gen_lowpart (GET_MODE (dest), src));
-}
-
-/* Emit code to place a AdvSIMD pair result in memory locations (with equal
- registers). */
-void
-aarch64_simd_emit_pair_result_insn (enum machine_mode mode,
- rtx (*intfn) (rtx, rtx, rtx), rtx destaddr,
- rtx op1)
-{
- rtx mem = gen_rtx_MEM (mode, destaddr);
- rtx tmp1 = gen_reg_rtx (mode);
- rtx tmp2 = gen_reg_rtx (mode);
-
- emit_insn (intfn (tmp1, op1, tmp2));
-
- emit_move_insn (mem, tmp1);
- mem = adjust_address (mem, mode, GET_MODE_SIZE (mode));
- emit_move_insn (mem, tmp2);
-}
-
-/* Return TRUE if OP is a valid vector addressing mode. */
-bool
-aarch64_simd_mem_operand_p (rtx op)
-{
- return MEM_P (op) && (GET_CODE (XEXP (op, 0)) == POST_INC
- || GET_CODE (XEXP (op, 0)) == REG);
-}
-
-/* Set up OPERANDS for a register copy from SRC to DEST, taking care
- not to early-clobber SRC registers in the process.
-
- We assume that the operands described by SRC and DEST represent a
- decomposed copy of OPERANDS[1] into OPERANDS[0]. COUNT is the
- number of components into which the copy has been decomposed. */
-void
-aarch64_simd_disambiguate_copy (rtx *operands, rtx *dest,
- rtx *src, unsigned int count)
-{
- unsigned int i;
-
- if (!reg_overlap_mentioned_p (operands[0], operands[1])
- || REGNO (operands[0]) < REGNO (operands[1]))
- {
- for (i = 0; i < count; i++)
- {
- operands[2 * i] = dest[i];
- operands[2 * i + 1] = src[i];
- }
- }
- else
- {
- for (i = 0; i < count; i++)
- {
- operands[2 * i] = dest[count - i - 1];
- operands[2 * i + 1] = src[count - i - 1];
- }
- }
-}
-
-/* Compute and return the length of aarch64_simd_mov<mode>, where <mode> is
- one of VSTRUCT modes: OI, CI or XI. */
-int
-aarch64_simd_attr_length_move (rtx insn)
-{
- enum machine_mode mode;
-
- extract_insn_cached (insn);
-
- if (REG_P (recog_data.operand[0]) && REG_P (recog_data.operand[1]))
- {
- mode = GET_MODE (recog_data.operand[0]);
- switch (mode)
- {
- case OImode:
- return 8;
- case CImode:
- return 12;
- case XImode:
- return 16;
- default:
- gcc_unreachable ();
- }
- }
- return 4;
-}
-
-/* Implement target hook TARGET_VECTOR_ALIGNMENT. The AAPCS64 sets the maximum
- alignment of a vector to 128 bits. */
-static HOST_WIDE_INT
-aarch64_simd_vector_alignment (const_tree type)
-{
- HOST_WIDE_INT align = tree_low_cst (TYPE_SIZE (type), 0);
- return MIN (align, 128);
-}
-
-/* Implement target hook TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE. */
-static bool
-aarch64_simd_vector_alignment_reachable (const_tree type, bool is_packed)
-{
- if (is_packed)
- return false;
-
- /* We guarantee alignment for vectors up to 128-bits. */
- if (tree_int_cst_compare (TYPE_SIZE (type),
- bitsize_int (BIGGEST_ALIGNMENT)) > 0)
- return false;
-
- /* Vectors whose size is <= BIGGEST_ALIGNMENT are naturally aligned. */
- return true;
-}
-
-/* If VALS is a vector constant that can be loaded into a register
- using DUP, generate instructions to do so and return an RTX to
- assign to the register. Otherwise return NULL_RTX. */
-static rtx
-aarch64_simd_dup_constant (rtx vals)
-{
- enum machine_mode mode = GET_MODE (vals);
- enum machine_mode inner_mode = GET_MODE_INNER (mode);
- int n_elts = GET_MODE_NUNITS (mode);
- bool all_same = true;
- rtx x;
- int i;
-
- if (GET_CODE (vals) != CONST_VECTOR)
- return NULL_RTX;
-
- for (i = 1; i < n_elts; ++i)
- {
- x = CONST_VECTOR_ELT (vals, i);
- if (!rtx_equal_p (x, CONST_VECTOR_ELT (vals, 0)))
- all_same = false;
- }
-
- if (!all_same)
- return NULL_RTX;
-
- /* We can load this constant by using DUP and a constant in a
- single ARM register. This will be cheaper than a vector
- load. */
- x = copy_to_mode_reg (inner_mode, CONST_VECTOR_ELT (vals, 0));
- return gen_rtx_VEC_DUPLICATE (mode, x);
-}
-
-
-/* Generate code to load VALS, which is a PARALLEL containing only
- constants (for vec_init) or CONST_VECTOR, efficiently into a
- register. Returns an RTX to copy into the register, or NULL_RTX
- for a PARALLEL that can not be converted into a CONST_VECTOR. */
-static rtx
-aarch64_simd_make_constant (rtx vals)
-{
- enum machine_mode mode = GET_MODE (vals);
- rtx const_dup;
- rtx const_vec = NULL_RTX;
- int n_elts = GET_MODE_NUNITS (mode);
- int n_const = 0;
- int i;
-
- if (GET_CODE (vals) == CONST_VECTOR)
- const_vec = vals;
- else if (GET_CODE (vals) == PARALLEL)
- {
- /* A CONST_VECTOR must contain only CONST_INTs and
- CONST_DOUBLEs, but CONSTANT_P allows more (e.g. SYMBOL_REF).
- Only store valid constants in a CONST_VECTOR. */
- for (i = 0; i < n_elts; ++i)
- {
- rtx x = XVECEXP (vals, 0, i);
- if (CONST_INT_P (x) || CONST_DOUBLE_P (x))
- n_const++;
- }
- if (n_const == n_elts)
- const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
- }
- else
- gcc_unreachable ();
-
- if (const_vec != NULL_RTX
- && aarch64_simd_immediate_valid_for_move (const_vec, mode, NULL, NULL,
- NULL, NULL, NULL))
- /* Load using MOVI/MVNI. */
- return const_vec;
- else if ((const_dup = aarch64_simd_dup_constant (vals)) != NULL_RTX)
- /* Loaded using DUP. */
- return const_dup;
- else if (const_vec != NULL_RTX)
- /* Load from constant pool. We can not take advantage of single-cycle
- LD1 because we need a PC-relative addressing mode. */
- return const_vec;
- else
- /* A PARALLEL containing something not valid inside CONST_VECTOR.
- We can not construct an initializer. */
- return NULL_RTX;
-}
-
-void
-aarch64_expand_vector_init (rtx target, rtx vals)
-{
- enum machine_mode mode = GET_MODE (target);
- enum machine_mode inner_mode = GET_MODE_INNER (mode);
- int n_elts = GET_MODE_NUNITS (mode);
- int n_var = 0, one_var = -1;
- bool all_same = true;
- rtx x, mem;
- int i;
-
- x = XVECEXP (vals, 0, 0);
- if (!CONST_INT_P (x) && !CONST_DOUBLE_P (x))
- n_var = 1, one_var = 0;
-
- for (i = 1; i < n_elts; ++i)
- {
- x = XVECEXP (vals, 0, i);
- if (!CONST_INT_P (x) && !CONST_DOUBLE_P (x))
- ++n_var, one_var = i;
-
- if (!rtx_equal_p (x, XVECEXP (vals, 0, 0)))
- all_same = false;
- }
-
- if (n_var == 0)
- {
- rtx constant = aarch64_simd_make_constant (vals);
- if (constant != NULL_RTX)
- {
- emit_move_insn (target, constant);
- return;
- }
- }
-
- /* Splat a single non-constant element if we can. */
- if (all_same)
- {
- x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, 0));
- aarch64_emit_move (target, gen_rtx_VEC_DUPLICATE (mode, x));
- return;
- }
-
- /* One field is non-constant. Load constant then overwrite varying
- field. This is more efficient than using the stack. */
- if (n_var == 1)
- {
- rtx copy = copy_rtx (vals);
- rtx index = GEN_INT (one_var);
- enum insn_code icode;
-
- /* Load constant part of vector, substitute neighboring value for
- varying element. */
- XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, one_var ^ 1);
- aarch64_expand_vector_init (target, copy);
-
- /* Insert variable. */
- x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, one_var));
- icode = optab_handler (vec_set_optab, mode);
- gcc_assert (icode != CODE_FOR_nothing);
- emit_insn (GEN_FCN (icode) (target, x, index));
- return;
- }
-
- /* Construct the vector in memory one field at a time
- and load the whole vector. */
- mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
- for (i = 0; i < n_elts; i++)
- emit_move_insn (adjust_address_nv (mem, inner_mode,
- i * GET_MODE_SIZE (inner_mode)),
- XVECEXP (vals, 0, i));
- emit_move_insn (target, mem);
-
-}
-
-static unsigned HOST_WIDE_INT
-aarch64_shift_truncation_mask (enum machine_mode mode)
-{
- return
- (aarch64_vector_mode_supported_p (mode)
- || aarch64_vect_struct_mode_p (mode)) ? 0 : (GET_MODE_BITSIZE (mode) - 1);
-}
-
-#ifndef TLS_SECTION_ASM_FLAG
-#define TLS_SECTION_ASM_FLAG 'T'
-#endif
-
-void
-aarch64_elf_asm_named_section (const char *name, unsigned int flags,
- tree decl ATTRIBUTE_UNUSED)
-{
- char flagchars[10], *f = flagchars;
-
- /* If we have already declared this section, we can use an
- abbreviated form to switch back to it -- unless this section is
- part of a COMDAT groups, in which case GAS requires the full
- declaration every time. */
- if (!(HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
- && (flags & SECTION_DECLARED))
- {
- fprintf (asm_out_file, "\t.section\t%s\n", name);
- return;
- }
-
- if (!(flags & SECTION_DEBUG))
- *f++ = 'a';
- if (flags & SECTION_WRITE)
- *f++ = 'w';
- if (flags & SECTION_CODE)
- *f++ = 'x';
- if (flags & SECTION_SMALL)
- *f++ = 's';
- if (flags & SECTION_MERGE)
- *f++ = 'M';
- if (flags & SECTION_STRINGS)
- *f++ = 'S';
- if (flags & SECTION_TLS)
- *f++ = TLS_SECTION_ASM_FLAG;
- if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
- *f++ = 'G';
- *f = '\0';
-
- fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
-
- if (!(flags & SECTION_NOTYPE))
- {
- const char *type;
- const char *format;
-
- if (flags & SECTION_BSS)
- type = "nobits";
- else
- type = "progbits";
-
-#ifdef TYPE_OPERAND_FMT
- format = "," TYPE_OPERAND_FMT;
-#else
- format = ",@%s";
-#endif
-
- fprintf (asm_out_file, format, type);
-
- if (flags & SECTION_ENTSIZE)
- fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
- if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
- {
- if (TREE_CODE (decl) == IDENTIFIER_NODE)
- fprintf (asm_out_file, ",%s,comdat", IDENTIFIER_POINTER (decl));
- else
- fprintf (asm_out_file, ",%s,comdat",
- IDENTIFIER_POINTER (DECL_COMDAT_GROUP (decl)));
- }
- }
-
- putc ('\n', asm_out_file);
-}
-
-/* Select a format to encode pointers in exception handling data. */
-int
-aarch64_asm_preferred_eh_data_format (int code ATTRIBUTE_UNUSED, int global)
-{
- int type;
- switch (aarch64_cmodel)
- {
- case AARCH64_CMODEL_TINY:
- case AARCH64_CMODEL_TINY_PIC:
- case AARCH64_CMODEL_SMALL:
- case AARCH64_CMODEL_SMALL_PIC:
- /* text+got+data < 4Gb. 4-byte signed relocs are sufficient
- for everything. */
- type = DW_EH_PE_sdata4;
- break;
- default:
- /* No assumptions here. 8-byte relocs required. */
- type = DW_EH_PE_sdata8;
- break;
- }
- return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
-}
-
-/* Emit load exclusive. */
-
-static void
-aarch64_emit_load_exclusive (enum machine_mode mode, rtx rval,
- rtx mem, rtx model_rtx)
-{
- rtx (*gen) (rtx, rtx, rtx);
-
- switch (mode)
- {
- case QImode: gen = gen_aarch64_load_exclusiveqi; break;
- case HImode: gen = gen_aarch64_load_exclusivehi; break;
- case SImode: gen = gen_aarch64_load_exclusivesi; break;
- case DImode: gen = gen_aarch64_load_exclusivedi; break;
- default:
- gcc_unreachable ();
- }
-
- emit_insn (gen (rval, mem, model_rtx));
-}
-
-/* Emit store exclusive. */
-
-static void
-aarch64_emit_store_exclusive (enum machine_mode mode, rtx bval,
- rtx rval, rtx mem, rtx model_rtx)
-{
- rtx (*gen) (rtx, rtx, rtx, rtx);
-
- switch (mode)
- {
- case QImode: gen = gen_aarch64_store_exclusiveqi; break;
- case HImode: gen = gen_aarch64_store_exclusivehi; break;
- case SImode: gen = gen_aarch64_store_exclusivesi; break;
- case DImode: gen = gen_aarch64_store_exclusivedi; break;
- default:
- gcc_unreachable ();
- }
-
- emit_insn (gen (bval, rval, mem, model_rtx));
-}
-
-/* Mark the previous jump instruction as unlikely. */
-
-static void
-aarch64_emit_unlikely_jump (rtx insn)
-{
- rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
-
- insn = emit_jump_insn (insn);
- add_reg_note (insn, REG_BR_PROB, very_unlikely);
-}
-
-/* Expand a compare and swap pattern. */
-
-void
-aarch64_expand_compare_and_swap (rtx operands[])
-{
- rtx bval, rval, mem, oldval, newval, is_weak, mod_s, mod_f, x;
- enum machine_mode mode, cmp_mode;
- rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
-
- bval = operands[0];
- rval = operands[1];
- mem = operands[2];
- oldval = operands[3];
- newval = operands[4];
- is_weak = operands[5];
- mod_s = operands[6];
- mod_f = operands[7];
- mode = GET_MODE (mem);
- cmp_mode = mode;
-
- /* Normally the succ memory model must be stronger than fail, but in the
- unlikely event of fail being ACQUIRE and succ being RELEASE we need to
- promote succ to ACQ_REL so that we don't lose the acquire semantics. */
-
- if (INTVAL (mod_f) == MEMMODEL_ACQUIRE
- && INTVAL (mod_s) == MEMMODEL_RELEASE)
- mod_s = GEN_INT (MEMMODEL_ACQ_REL);
-
- switch (mode)
- {
- case QImode:
- case HImode:
- /* For short modes, we're going to perform the comparison in SImode,
- so do the zero-extension now. */
- cmp_mode = SImode;
- rval = gen_reg_rtx (SImode);
- oldval = convert_modes (SImode, mode, oldval, true);
- /* Fall through. */
-
- case SImode:
- case DImode:
- /* Force the value into a register if needed. */
- if (!aarch64_plus_operand (oldval, mode))
- oldval = force_reg (cmp_mode, oldval);
- break;
-
- default:
- gcc_unreachable ();
- }
-
- switch (mode)
- {
- case QImode: gen = gen_atomic_compare_and_swapqi_1; break;
- case HImode: gen = gen_atomic_compare_and_swaphi_1; break;
- case SImode: gen = gen_atomic_compare_and_swapsi_1; break;
- case DImode: gen = gen_atomic_compare_and_swapdi_1; break;
- default:
- gcc_unreachable ();
- }
-
- emit_insn (gen (rval, mem, oldval, newval, is_weak, mod_s, mod_f));
-
- if (mode == QImode || mode == HImode)
- emit_move_insn (operands[1], gen_lowpart (mode, rval));
-
- x = gen_rtx_REG (CCmode, CC_REGNUM);
- x = gen_rtx_EQ (SImode, x, const0_rtx);
- emit_insn (gen_rtx_SET (VOIDmode, bval, x));
-}
-
-/* Split a compare and swap pattern. */
-
-void
-aarch64_split_compare_and_swap (rtx operands[])
-{
- rtx rval, mem, oldval, newval, scratch;
- enum machine_mode mode;
- bool is_weak;
- rtx label1, label2, x, cond;
-
- rval = operands[0];
- mem = operands[1];
- oldval = operands[2];
- newval = operands[3];
- is_weak = (operands[4] != const0_rtx);
- scratch = operands[7];
- mode = GET_MODE (mem);
-
- label1 = NULL_RTX;
- if (!is_weak)
- {
- label1 = gen_label_rtx ();
- emit_label (label1);
- }
- label2 = gen_label_rtx ();
-
- aarch64_emit_load_exclusive (mode, rval, mem, operands[5]);
-
- cond = aarch64_gen_compare_reg (NE, rval, oldval);
- x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
- x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
- gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
- aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
-
- aarch64_emit_store_exclusive (mode, scratch, mem, newval, operands[5]);
-
- if (!is_weak)
- {
- x = gen_rtx_NE (VOIDmode, scratch, const0_rtx);
- x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
- gen_rtx_LABEL_REF (Pmode, label1), pc_rtx);
- aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
- }
- else
- {
- cond = gen_rtx_REG (CCmode, CC_REGNUM);
- x = gen_rtx_COMPARE (CCmode, scratch, const0_rtx);
- emit_insn (gen_rtx_SET (VOIDmode, cond, x));
- }
-
- emit_label (label2);
-}
-
-/* Split an atomic operation. */
-
-void
-aarch64_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
- rtx value, rtx model_rtx, rtx cond)
-{
- enum machine_mode mode = GET_MODE (mem);
- enum machine_mode wmode = (mode == DImode ? DImode : SImode);
- rtx label, x;
-
- label = gen_label_rtx ();
- emit_label (label);
-
- if (new_out)
- new_out = gen_lowpart (wmode, new_out);
- if (old_out)
- old_out = gen_lowpart (wmode, old_out);
- else
- old_out = new_out;
- value = simplify_gen_subreg (wmode, value, mode, 0);
-
- aarch64_emit_load_exclusive (mode, old_out, mem, model_rtx);
-
- switch (code)
- {
- case SET:
- new_out = value;
- break;
-
- case NOT:
- x = gen_rtx_AND (wmode, old_out, value);
- emit_insn (gen_rtx_SET (VOIDmode, new_out, x));
- x = gen_rtx_NOT (wmode, new_out);
- emit_insn (gen_rtx_SET (VOIDmode, new_out, x));
- break;
-
- case MINUS:
- if (CONST_INT_P (value))
- {
- value = GEN_INT (-INTVAL (value));
- code = PLUS;
- }
- /* Fall through. */
-
- default:
- x = gen_rtx_fmt_ee (code, wmode, old_out, value);
- emit_insn (gen_rtx_SET (VOIDmode, new_out, x));
- break;
- }
-
- aarch64_emit_store_exclusive (mode, cond, mem,
- gen_lowpart (mode, new_out), model_rtx);
-
- x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
- x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
- gen_rtx_LABEL_REF (Pmode, label), pc_rtx);
- aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
-}
-
-static void
-aarch64_print_extension (void)
-{
- const struct aarch64_option_extension *opt = NULL;
-
- for (opt = all_extensions; opt->name != NULL; opt++)
- if ((aarch64_isa_flags & opt->flags_on) == opt->flags_on)
- asm_fprintf (asm_out_file, "+%s", opt->name);
-
- asm_fprintf (asm_out_file, "\n");
-}
-
-static void
-aarch64_start_file (void)
-{
- if (selected_arch)
- {
- asm_fprintf (asm_out_file, "\t.arch %s", selected_arch->name);
- aarch64_print_extension ();
- }
- else if (selected_cpu)
- {
- asm_fprintf (asm_out_file, "\t.cpu %s", selected_cpu->name);
- aarch64_print_extension ();
- }
- default_file_start();
-}
-
-/* Target hook for c_mode_for_suffix. */
-static enum machine_mode
-aarch64_c_mode_for_suffix (char suffix)
-{
- if (suffix == 'q')
- return TFmode;
-
- return VOIDmode;
-}
-
-/* We can only represent floating point constants which will fit in
- "quarter-precision" values. These values are characterised by
- a sign bit, a 4-bit mantissa and a 3-bit exponent. And are given
- by:
-
- (-1)^s * (n/16) * 2^r
-
- Where:
- 's' is the sign bit.
- 'n' is an integer in the range 16 <= n <= 31.
- 'r' is an integer in the range -3 <= r <= 4. */
-
-/* Return true iff X can be represented by a quarter-precision
- floating point immediate operand X. Note, we cannot represent 0.0. */
-bool
-aarch64_float_const_representable_p (rtx x)
-{
- /* This represents our current view of how many bits
- make up the mantissa. */
- int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
- int exponent;
- unsigned HOST_WIDE_INT mantissa, mask;
- HOST_WIDE_INT m1, m2;
- REAL_VALUE_TYPE r, m;
-
- if (!CONST_DOUBLE_P (x))
- return false;
-
- REAL_VALUE_FROM_CONST_DOUBLE (r, x);
-
- /* We cannot represent infinities, NaNs or +/-zero. We won't
- know if we have +zero until we analyse the mantissa, but we
- can reject the other invalid values. */
- if (REAL_VALUE_ISINF (r) || REAL_VALUE_ISNAN (r)
- || REAL_VALUE_MINUS_ZERO (r))
- return false;
-
- /* Extract exponent. */
- r = real_value_abs (&r);
- exponent = REAL_EXP (&r);
-
- /* For the mantissa, we expand into two HOST_WIDE_INTS, apart from the
- highest (sign) bit, with a fixed binary point at bit point_pos.
- m1 holds the low part of the mantissa, m2 the high part.
- WARNING: If we ever have a representation using more than 2 * H_W_I - 1
- bits for the mantissa, this can fail (low bits will be lost). */
- real_ldexp (&m, &r, point_pos - exponent);
- REAL_VALUE_TO_INT (&m1, &m2, m);
-
- /* If the low part of the mantissa has bits set we cannot represent
- the value. */
- if (m1 != 0)
- return false;
- /* We have rejected the lower HOST_WIDE_INT, so update our
- understanding of how many bits lie in the mantissa and
- look only at the high HOST_WIDE_INT. */
- mantissa = m2;
- point_pos -= HOST_BITS_PER_WIDE_INT;
-
- /* We can only represent values with a mantissa of the form 1.xxxx. */
- mask = ((unsigned HOST_WIDE_INT)1 << (point_pos - 5)) - 1;
- if ((mantissa & mask) != 0)
- return false;
-
- /* Having filtered unrepresentable values, we may now remove all
- but the highest 5 bits. */
- mantissa >>= point_pos - 5;
-
- /* We cannot represent the value 0.0, so reject it. This is handled
- elsewhere. */
- if (mantissa == 0)
- return false;
-
- /* Then, as bit 4 is always set, we can mask it off, leaving
- the mantissa in the range [0, 15]. */
- mantissa &= ~(1 << 4);
- gcc_assert (mantissa <= 15);
-
- /* GCC internally does not use IEEE754-like encoding (where normalized
- significands are in the range [1, 2). GCC uses [0.5, 1) (see real.c).
- Our mantissa values are shifted 4 places to the left relative to
- normalized IEEE754 so we must modify the exponent returned by REAL_EXP
- by 5 places to correct for GCC's representation. */
- exponent = 5 - exponent;
-
- return (exponent >= 0 && exponent <= 7);
-}
-
-char*
-aarch64_output_simd_mov_immediate (rtx *const_vector,
- enum machine_mode mode,
- unsigned width)
-{
- int is_valid;
- unsigned char widthc;
- int lane_width_bits;
- static char templ[40];
- int shift = 0, mvn = 0;
- const char *mnemonic;
- unsigned int lane_count = 0;
-
- is_valid =
- aarch64_simd_immediate_valid_for_move (*const_vector, mode,
- const_vector, &lane_width_bits,
- &widthc, &mvn, &shift);
- gcc_assert (is_valid);
-
- mode = GET_MODE_INNER (mode);
- if (mode == SFmode || mode == DFmode)
- {
- bool zero_p =
- aarch64_float_const_zero_rtx_p (*const_vector);
- gcc_assert (shift == 0);
- mnemonic = zero_p ? "movi" : "fmov";
- }
- else
- mnemonic = mvn ? "mvni" : "movi";
-
- gcc_assert (lane_width_bits != 0);
- lane_count = width / lane_width_bits;
-
- if (lane_count == 1)
- snprintf (templ, sizeof (templ), "%s\t%%d0, %%1", mnemonic);
- else if (shift)
- snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, %%1, lsl %d",
- mnemonic, lane_count, widthc, shift);
- else
- snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, %%1",
- mnemonic, lane_count, widthc);
- return templ;
-}
-
-/* Split operands into moves from op[1] + op[2] into op[0]. */
-
-void
-aarch64_split_combinev16qi (rtx operands[3])
-{
- unsigned int dest = REGNO (operands[0]);
- unsigned int src1 = REGNO (operands[1]);
- unsigned int src2 = REGNO (operands[2]);
- enum machine_mode halfmode = GET_MODE (operands[1]);
- unsigned int halfregs = HARD_REGNO_NREGS (src1, halfmode);
- rtx destlo, desthi;
-
- gcc_assert (halfmode == V16QImode);
-
- if (src1 == dest && src2 == dest + halfregs)
- {
- /* No-op move. Can't split to nothing; emit something. */
- emit_note (NOTE_INSN_DELETED);
- return;
- }
-
- /* Preserve register attributes for variable tracking. */
- destlo = gen_rtx_REG_offset (operands[0], halfmode, dest, 0);
- desthi = gen_rtx_REG_offset (operands[0], halfmode, dest + halfregs,
- GET_MODE_SIZE (halfmode));
-
- /* Special case of reversed high/low parts. */
- if (reg_overlap_mentioned_p (operands[2], destlo)
- && reg_overlap_mentioned_p (operands[1], desthi))
- {
- emit_insn (gen_xorv16qi3 (operands[1], operands[1], operands[2]));
- emit_insn (gen_xorv16qi3 (operands[2], operands[1], operands[2]));
- emit_insn (gen_xorv16qi3 (operands[1], operands[1], operands[2]));
- }
- else if (!reg_overlap_mentioned_p (operands[2], destlo))
- {
- /* Try to avoid unnecessary moves if part of the result
- is in the right place already. */
- if (src1 != dest)
- emit_move_insn (destlo, operands[1]);
- if (src2 != dest + halfregs)
- emit_move_insn (desthi, operands[2]);
- }
- else
- {
- if (src2 != dest + halfregs)
- emit_move_insn (desthi, operands[2]);
- if (src1 != dest)
- emit_move_insn (destlo, operands[1]);
- }
-}
-
-/* vec_perm support. */
-
-#define MAX_VECT_LEN 16
-
-struct expand_vec_perm_d
-{
- rtx target, op0, op1;
- unsigned char perm[MAX_VECT_LEN];
- enum machine_mode vmode;
- unsigned char nelt;
- bool one_vector_p;
- bool testing_p;
-};
-
-/* Generate a variable permutation. */
-
-static void
-aarch64_expand_vec_perm_1 (rtx target, rtx op0, rtx op1, rtx sel)
-{
- enum machine_mode vmode = GET_MODE (target);
- bool one_vector_p = rtx_equal_p (op0, op1);
-
- gcc_checking_assert (vmode == V8QImode || vmode == V16QImode);
- gcc_checking_assert (GET_MODE (op0) == vmode);
- gcc_checking_assert (GET_MODE (op1) == vmode);
- gcc_checking_assert (GET_MODE (sel) == vmode);
- gcc_checking_assert (TARGET_SIMD);
-
- if (one_vector_p)
- {
- if (vmode == V8QImode)
- {
- /* Expand the argument to a V16QI mode by duplicating it. */
- rtx pair = gen_reg_rtx (V16QImode);
- emit_insn (gen_aarch64_combinev8qi (pair, op0, op0));
- emit_insn (gen_aarch64_tbl1v8qi (target, pair, sel));
- }
- else
- {
- emit_insn (gen_aarch64_tbl1v16qi (target, op0, sel));
- }
- }
- else
- {
- rtx pair;
-
- if (vmode == V8QImode)
- {
- pair = gen_reg_rtx (V16QImode);
- emit_insn (gen_aarch64_combinev8qi (pair, op0, op1));
- emit_insn (gen_aarch64_tbl1v8qi (target, pair, sel));
- }
- else
- {
- pair = gen_reg_rtx (OImode);
- emit_insn (gen_aarch64_combinev16qi (pair, op0, op1));
- emit_insn (gen_aarch64_tbl2v16qi (target, pair, sel));
- }
- }
-}
-
-void
-aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
-{
- enum machine_mode vmode = GET_MODE (target);
- unsigned int i, nelt = GET_MODE_NUNITS (vmode);
- bool one_vector_p = rtx_equal_p (op0, op1);
- rtx rmask[MAX_VECT_LEN], mask;
-
- gcc_checking_assert (!BYTES_BIG_ENDIAN);
-
- /* The TBL instruction does not use a modulo index, so we must take care
- of that ourselves. */
- mask = GEN_INT (one_vector_p ? nelt - 1 : 2 * nelt - 1);
- for (i = 0; i < nelt; ++i)
- rmask[i] = mask;
- mask = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rmask));
- sel = expand_simple_binop (vmode, AND, sel, mask, NULL, 0, OPTAB_LIB_WIDEN);
-
- aarch64_expand_vec_perm_1 (target, op0, op1, sel);
-}
-
-/* Recognize patterns suitable for the TRN instructions. */
-static bool
-aarch64_evpc_trn (struct expand_vec_perm_d *d)
-{
- unsigned int i, odd, mask, nelt = d->nelt;
- rtx out, in0, in1, x;
- rtx (*gen) (rtx, rtx, rtx);
- enum machine_mode vmode = d->vmode;
-
- if (GET_MODE_UNIT_SIZE (vmode) > 8)
- return false;
-
- /* Note that these are little-endian tests.
- We correct for big-endian later. */
- if (d->perm[0] == 0)
- odd = 0;
- else if (d->perm[0] == 1)
- odd = 1;
- else
- return false;
- mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
-
- for (i = 0; i < nelt; i += 2)
- {
- if (d->perm[i] != i + odd)
- return false;
- if (d->perm[i + 1] != ((i + nelt + odd) & mask))
- return false;
- }
-
- /* Success! */
- if (d->testing_p)
- return true;
-
- in0 = d->op0;
- in1 = d->op1;
- if (BYTES_BIG_ENDIAN)
- {
- x = in0, in0 = in1, in1 = x;
- odd = !odd;
- }
- out = d->target;
-
- if (odd)
- {
- switch (vmode)
- {
- case V16QImode: gen = gen_aarch64_trn2v16qi; break;
- case V8QImode: gen = gen_aarch64_trn2v8qi; break;
- case V8HImode: gen = gen_aarch64_trn2v8hi; break;
- case V4HImode: gen = gen_aarch64_trn2v4hi; break;
- case V4SImode: gen = gen_aarch64_trn2v4si; break;
- case V2SImode: gen = gen_aarch64_trn2v2si; break;
- case V2DImode: gen = gen_aarch64_trn2v2di; break;
- case V4SFmode: gen = gen_aarch64_trn2v4sf; break;
- case V2SFmode: gen = gen_aarch64_trn2v2sf; break;
- case V2DFmode: gen = gen_aarch64_trn2v2df; break;
- default:
- return false;
- }
- }
- else
- {
- switch (vmode)
- {
- case V16QImode: gen = gen_aarch64_trn1v16qi; break;
- case V8QImode: gen = gen_aarch64_trn1v8qi; break;
- case V8HImode: gen = gen_aarch64_trn1v8hi; break;
- case V4HImode: gen = gen_aarch64_trn1v4hi; break;
- case V4SImode: gen = gen_aarch64_trn1v4si; break;
- case V2SImode: gen = gen_aarch64_trn1v2si; break;
- case V2DImode: gen = gen_aarch64_trn1v2di; break;
- case V4SFmode: gen = gen_aarch64_trn1v4sf; break;
- case V2SFmode: gen = gen_aarch64_trn1v2sf; break;
- case V2DFmode: gen = gen_aarch64_trn1v2df; break;
- default:
- return false;
- }
- }
-
- emit_insn (gen (out, in0, in1));
- return true;
-}
-
-/* Recognize patterns suitable for the UZP instructions. */
-static bool
-aarch64_evpc_uzp (struct expand_vec_perm_d *d)
-{
- unsigned int i, odd, mask, nelt = d->nelt;
- rtx out, in0, in1, x;
- rtx (*gen) (rtx, rtx, rtx);
- enum machine_mode vmode = d->vmode;
-
- if (GET_MODE_UNIT_SIZE (vmode) > 8)
- return false;
-
- /* Note that these are little-endian tests.
- We correct for big-endian later. */
- if (d->perm[0] == 0)
- odd = 0;
- else if (d->perm[0] == 1)
- odd = 1;
- else
- return false;
- mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
-
- for (i = 0; i < nelt; i++)
- {
- unsigned elt = (i * 2 + odd) & mask;
- if (d->perm[i] != elt)
- return false;
- }
-
- /* Success! */
- if (d->testing_p)
- return true;
-
- in0 = d->op0;
- in1 = d->op1;
- if (BYTES_BIG_ENDIAN)
- {
- x = in0, in0 = in1, in1 = x;
- odd = !odd;
- }
- out = d->target;
-
- if (odd)
- {
- switch (vmode)
- {
- case V16QImode: gen = gen_aarch64_uzp2v16qi; break;
- case V8QImode: gen = gen_aarch64_uzp2v8qi; break;
- case V8HImode: gen = gen_aarch64_uzp2v8hi; break;
- case V4HImode: gen = gen_aarch64_uzp2v4hi; break;
- case V4SImode: gen = gen_aarch64_uzp2v4si; break;
- case V2SImode: gen = gen_aarch64_uzp2v2si; break;
- case V2DImode: gen = gen_aarch64_uzp2v2di; break;
- case V4SFmode: gen = gen_aarch64_uzp2v4sf; break;
- case V2SFmode: gen = gen_aarch64_uzp2v2sf; break;
- case V2DFmode: gen = gen_aarch64_uzp2v2df; break;
- default:
- return false;
- }
- }
- else
- {
- switch (vmode)
- {
- case V16QImode: gen = gen_aarch64_uzp1v16qi; break;
- case V8QImode: gen = gen_aarch64_uzp1v8qi; break;
- case V8HImode: gen = gen_aarch64_uzp1v8hi; break;
- case V4HImode: gen = gen_aarch64_uzp1v4hi; break;
- case V4SImode: gen = gen_aarch64_uzp1v4si; break;
- case V2SImode: gen = gen_aarch64_uzp1v2si; break;
- case V2DImode: gen = gen_aarch64_uzp1v2di; break;
- case V4SFmode: gen = gen_aarch64_uzp1v4sf; break;
- case V2SFmode: gen = gen_aarch64_uzp1v2sf; break;
- case V2DFmode: gen = gen_aarch64_uzp1v2df; break;
- default:
- return false;
- }
- }
-
- emit_insn (gen (out, in0, in1));
- return true;
-}
-
-/* Recognize patterns suitable for the ZIP instructions. */
-static bool
-aarch64_evpc_zip (struct expand_vec_perm_d *d)
-{
- unsigned int i, high, mask, nelt = d->nelt;
- rtx out, in0, in1, x;
- rtx (*gen) (rtx, rtx, rtx);
- enum machine_mode vmode = d->vmode;
-
- if (GET_MODE_UNIT_SIZE (vmode) > 8)
- return false;
-
- /* Note that these are little-endian tests.
- We correct for big-endian later. */
- high = nelt / 2;
- if (d->perm[0] == high)
- /* Do Nothing. */
- ;
- else if (d->perm[0] == 0)
- high = 0;
- else
- return false;
- mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
-
- for (i = 0; i < nelt / 2; i++)
- {
- unsigned elt = (i + high) & mask;
- if (d->perm[i * 2] != elt)
- return false;
- elt = (elt + nelt) & mask;
- if (d->perm[i * 2 + 1] != elt)
- return false;
- }
-
- /* Success! */
- if (d->testing_p)
- return true;
-
- in0 = d->op0;
- in1 = d->op1;
- if (BYTES_BIG_ENDIAN)
- {
- x = in0, in0 = in1, in1 = x;
- high = !high;
- }
- out = d->target;
-
- if (high)
- {
- switch (vmode)
- {
- case V16QImode: gen = gen_aarch64_zip2v16qi; break;
- case V8QImode: gen = gen_aarch64_zip2v8qi; break;
- case V8HImode: gen = gen_aarch64_zip2v8hi; break;
- case V4HImode: gen = gen_aarch64_zip2v4hi; break;
- case V4SImode: gen = gen_aarch64_zip2v4si; break;
- case V2SImode: gen = gen_aarch64_zip2v2si; break;
- case V2DImode: gen = gen_aarch64_zip2v2di; break;
- case V4SFmode: gen = gen_aarch64_zip2v4sf; break;
- case V2SFmode: gen = gen_aarch64_zip2v2sf; break;
- case V2DFmode: gen = gen_aarch64_zip2v2df; break;
- default:
- return false;
- }
- }
- else
- {
- switch (vmode)
- {
- case V16QImode: gen = gen_aarch64_zip1v16qi; break;
- case V8QImode: gen = gen_aarch64_zip1v8qi; break;
- case V8HImode: gen = gen_aarch64_zip1v8hi; break;
- case V4HImode: gen = gen_aarch64_zip1v4hi; break;
- case V4SImode: gen = gen_aarch64_zip1v4si; break;
- case V2SImode: gen = gen_aarch64_zip1v2si; break;
- case V2DImode: gen = gen_aarch64_zip1v2di; break;
- case V4SFmode: gen = gen_aarch64_zip1v4sf; break;
- case V2SFmode: gen = gen_aarch64_zip1v2sf; break;
- case V2DFmode: gen = gen_aarch64_zip1v2df; break;
- default:
- return false;
- }
- }
-
- emit_insn (gen (out, in0, in1));
- return true;
-}
-
-static bool
-aarch64_evpc_tbl (struct expand_vec_perm_d *d)
-{
- rtx rperm[MAX_VECT_LEN], sel;
- enum machine_mode vmode = d->vmode;
- unsigned int i, nelt = d->nelt;
-
- /* TODO: ARM's TBL indexing is little-endian. In order to handle GCC's
- numbering of elements for big-endian, we must reverse the order. */
- if (BYTES_BIG_ENDIAN)
- return false;
-
- if (d->testing_p)
- return true;
-
- /* Generic code will try constant permutation twice. Once with the
- original mode and again with the elements lowered to QImode.
- So wait and don't do the selector expansion ourselves. */
- if (vmode != V8QImode && vmode != V16QImode)
- return false;
-
- for (i = 0; i < nelt; ++i)
- rperm[i] = GEN_INT (d->perm[i]);
- sel = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rperm));
- sel = force_reg (vmode, sel);
-
- aarch64_expand_vec_perm_1 (d->target, d->op0, d->op1, sel);
- return true;
-}
-
-static bool
-aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
-{
- /* The pattern matching functions above are written to look for a small
- number to begin the sequence (0, 1, N/2). If we begin with an index
- from the second operand, we can swap the operands. */
- if (d->perm[0] >= d->nelt)
- {
- unsigned i, nelt = d->nelt;
- rtx x;
-
- for (i = 0; i < nelt; ++i)
- d->perm[i] = (d->perm[i] + nelt) & (2 * nelt - 1);
-
- x = d->op0;
- d->op0 = d->op1;
- d->op1 = x;
- }
-
- if (TARGET_SIMD)
- {
- if (aarch64_evpc_zip (d))
- return true;
- else if (aarch64_evpc_uzp (d))
- return true;
- else if (aarch64_evpc_trn (d))
- return true;
- return aarch64_evpc_tbl (d);
- }
- return false;
-}
-
-/* Expand a vec_perm_const pattern. */
-
-bool
-aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel)
-{
- struct expand_vec_perm_d d;
- int i, nelt, which;
-
- d.target = target;
- d.op0 = op0;
- d.op1 = op1;
-
- d.vmode = GET_MODE (target);
- gcc_assert (VECTOR_MODE_P (d.vmode));
- d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
- d.testing_p = false;
-
- for (i = which = 0; i < nelt; ++i)
- {
- rtx e = XVECEXP (sel, 0, i);
- int ei = INTVAL (e) & (2 * nelt - 1);
- which |= (ei < nelt ? 1 : 2);
- d.perm[i] = ei;
- }
-
- switch (which)
- {
- default:
- gcc_unreachable ();
-
- case 3:
- d.one_vector_p = false;
- if (!rtx_equal_p (op0, op1))
- break;
-
- /* The elements of PERM do not suggest that only the first operand
- is used, but both operands are identical. Allow easier matching
- of the permutation by folding the permutation into the single
- input vector. */
- /* Fall Through. */
- case 2:
- for (i = 0; i < nelt; ++i)
- d.perm[i] &= nelt - 1;
- d.op0 = op1;
- d.one_vector_p = true;
- break;
-
- case 1:
- d.op1 = op0;
- d.one_vector_p = true;
- break;
- }
-
- return aarch64_expand_vec_perm_const_1 (&d);
-}
-
-static bool
-aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
- const unsigned char *sel)
-{
- struct expand_vec_perm_d d;
- unsigned int i, nelt, which;
- bool ret;
-
- d.vmode = vmode;
- d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
- d.testing_p = true;
- memcpy (d.perm, sel, nelt);
-
- /* Calculate whether all elements are in one vector. */
- for (i = which = 0; i < nelt; ++i)
- {
- unsigned char e = d.perm[i];
- gcc_assert (e < 2 * nelt);
- which |= (e < nelt ? 1 : 2);
- }
-
- /* If all elements are from the second vector, reindex as if from the
- first vector. */
- if (which == 2)
- for (i = 0; i < nelt; ++i)
- d.perm[i] -= nelt;
-
- /* Check whether the mask can be applied to a single vector. */
- d.one_vector_p = (which != 3);
-
- d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
- d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
- if (!d.one_vector_p)
- d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
-
- start_sequence ();
- ret = aarch64_expand_vec_perm_const_1 (&d);
- end_sequence ();
-
- return ret;
-}
-
-#undef TARGET_ADDRESS_COST
-#define TARGET_ADDRESS_COST aarch64_address_cost
-
-/* This hook will determines whether unnamed bitfields affect the alignment
- of the containing structure. The hook returns true if the structure
- should inherit the alignment requirements of an unnamed bitfield's
- type. */
-#undef TARGET_ALIGN_ANON_BITFIELD
-#define TARGET_ALIGN_ANON_BITFIELD hook_bool_void_true
-
-#undef TARGET_ASM_ALIGNED_DI_OP
-#define TARGET_ASM_ALIGNED_DI_OP "\t.xword\t"
-
-#undef TARGET_ASM_ALIGNED_HI_OP
-#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
-
-#undef TARGET_ASM_ALIGNED_SI_OP
-#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
-
-#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
-#define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
- hook_bool_const_tree_hwi_hwi_const_tree_true
-
-#undef TARGET_ASM_FILE_START
-#define TARGET_ASM_FILE_START aarch64_start_file
-
-#undef TARGET_ASM_OUTPUT_MI_THUNK
-#define TARGET_ASM_OUTPUT_MI_THUNK aarch64_output_mi_thunk
-
-#undef TARGET_ASM_SELECT_RTX_SECTION
-#define TARGET_ASM_SELECT_RTX_SECTION aarch64_select_rtx_section
-
-#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
-#define TARGET_ASM_TRAMPOLINE_TEMPLATE aarch64_asm_trampoline_template
-
-#undef TARGET_BUILD_BUILTIN_VA_LIST
-#define TARGET_BUILD_BUILTIN_VA_LIST aarch64_build_builtin_va_list
-
-#undef TARGET_CALLEE_COPIES
-#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_false
-
-#undef TARGET_CAN_ELIMINATE
-#define TARGET_CAN_ELIMINATE aarch64_can_eliminate
-
-#undef TARGET_CANNOT_FORCE_CONST_MEM
-#define TARGET_CANNOT_FORCE_CONST_MEM aarch64_cannot_force_const_mem
-
-#undef TARGET_CONDITIONAL_REGISTER_USAGE
-#define TARGET_CONDITIONAL_REGISTER_USAGE aarch64_conditional_register_usage
-
-/* Only the least significant bit is used for initialization guard
- variables. */
-#undef TARGET_CXX_GUARD_MASK_BIT
-#define TARGET_CXX_GUARD_MASK_BIT hook_bool_void_true
-
-#undef TARGET_C_MODE_FOR_SUFFIX
-#define TARGET_C_MODE_FOR_SUFFIX aarch64_c_mode_for_suffix
-
-#ifdef TARGET_BIG_ENDIAN_DEFAULT
-#undef TARGET_DEFAULT_TARGET_FLAGS
-#define TARGET_DEFAULT_TARGET_FLAGS (MASK_BIG_END)
-#endif
-
-#undef TARGET_CLASS_MAX_NREGS
-#define TARGET_CLASS_MAX_NREGS aarch64_class_max_nregs
-
-#undef TARGET_BUILTIN_DECL
-#define TARGET_BUILTIN_DECL aarch64_builtin_decl
-
-#undef TARGET_EXPAND_BUILTIN
-#define TARGET_EXPAND_BUILTIN aarch64_expand_builtin
-
-#undef TARGET_EXPAND_BUILTIN_VA_START
-#define TARGET_EXPAND_BUILTIN_VA_START aarch64_expand_builtin_va_start
-
-#undef TARGET_FUNCTION_ARG
-#define TARGET_FUNCTION_ARG aarch64_function_arg
-
-#undef TARGET_FUNCTION_ARG_ADVANCE
-#define TARGET_FUNCTION_ARG_ADVANCE aarch64_function_arg_advance
-
-#undef TARGET_FUNCTION_ARG_BOUNDARY
-#define TARGET_FUNCTION_ARG_BOUNDARY aarch64_function_arg_boundary
-
-#undef TARGET_FUNCTION_OK_FOR_SIBCALL
-#define TARGET_FUNCTION_OK_FOR_SIBCALL aarch64_function_ok_for_sibcall
-
-#undef TARGET_FUNCTION_VALUE
-#define TARGET_FUNCTION_VALUE aarch64_function_value
-
-#undef TARGET_FUNCTION_VALUE_REGNO_P
-#define TARGET_FUNCTION_VALUE_REGNO_P aarch64_function_value_regno_p
-
-#undef TARGET_FRAME_POINTER_REQUIRED
-#define TARGET_FRAME_POINTER_REQUIRED aarch64_frame_pointer_required
-
-#undef TARGET_GIMPLIFY_VA_ARG_EXPR
-#define TARGET_GIMPLIFY_VA_ARG_EXPR aarch64_gimplify_va_arg_expr
-
-#undef TARGET_INIT_BUILTINS
-#define TARGET_INIT_BUILTINS aarch64_init_builtins
-
-#undef TARGET_LEGITIMATE_ADDRESS_P
-#define TARGET_LEGITIMATE_ADDRESS_P aarch64_legitimate_address_hook_p
-
-#undef TARGET_LEGITIMATE_CONSTANT_P
-#define TARGET_LEGITIMATE_CONSTANT_P aarch64_legitimate_constant_p
-
-#undef TARGET_LIBGCC_CMP_RETURN_MODE
-#define TARGET_LIBGCC_CMP_RETURN_MODE aarch64_libgcc_cmp_return_mode
-
-#undef TARGET_MANGLE_TYPE
-#define TARGET_MANGLE_TYPE aarch64_mangle_type
-
-#undef TARGET_MEMORY_MOVE_COST
-#define TARGET_MEMORY_MOVE_COST aarch64_memory_move_cost
-
-#undef TARGET_MUST_PASS_IN_STACK
-#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
-
-/* This target hook should return true if accesses to volatile bitfields
- should use the narrowest mode possible. It should return false if these
- accesses should use the bitfield container type. */
-#undef TARGET_NARROW_VOLATILE_BITFIELD
-#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
-
-#undef TARGET_OPTION_OVERRIDE
-#define TARGET_OPTION_OVERRIDE aarch64_override_options
-
-#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
-#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
- aarch64_override_options_after_change
-
-#undef TARGET_PASS_BY_REFERENCE
-#define TARGET_PASS_BY_REFERENCE aarch64_pass_by_reference
-
-#undef TARGET_PREFERRED_RELOAD_CLASS
-#define TARGET_PREFERRED_RELOAD_CLASS aarch64_preferred_reload_class
-
-#undef TARGET_SECONDARY_RELOAD
-#define TARGET_SECONDARY_RELOAD aarch64_secondary_reload
-
-#undef TARGET_SHIFT_TRUNCATION_MASK
-#define TARGET_SHIFT_TRUNCATION_MASK aarch64_shift_truncation_mask
-
-#undef TARGET_SETUP_INCOMING_VARARGS
-#define TARGET_SETUP_INCOMING_VARARGS aarch64_setup_incoming_varargs
-
-#undef TARGET_STRUCT_VALUE_RTX
-#define TARGET_STRUCT_VALUE_RTX aarch64_struct_value_rtx
-
-#undef TARGET_REGISTER_MOVE_COST
-#define TARGET_REGISTER_MOVE_COST aarch64_register_move_cost
-
-#undef TARGET_RETURN_IN_MEMORY
-#define TARGET_RETURN_IN_MEMORY aarch64_return_in_memory
-
-#undef TARGET_RETURN_IN_MSB
-#define TARGET_RETURN_IN_MSB aarch64_return_in_msb
-
-#undef TARGET_RTX_COSTS
-#define TARGET_RTX_COSTS aarch64_rtx_costs
-
-#undef TARGET_TRAMPOLINE_INIT
-#define TARGET_TRAMPOLINE_INIT aarch64_trampoline_init
-
-#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
-#define TARGET_USE_BLOCKS_FOR_CONSTANT_P aarch64_use_blocks_for_constant_p
-
-#undef TARGET_VECTOR_MODE_SUPPORTED_P
-#define TARGET_VECTOR_MODE_SUPPORTED_P aarch64_vector_mode_supported_p
-
-#undef TARGET_ARRAY_MODE_SUPPORTED_P
-#define TARGET_ARRAY_MODE_SUPPORTED_P aarch64_array_mode_supported_p
-
-#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
-#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE aarch64_preferred_simd_mode
-
-#undef TARGET_VECTORIZE_BUILTINS
-#define TARGET_VECTORIZE_BUILTINS
-
-#undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
-#define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
- aarch64_builtin_vectorized_function
-
-#undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
-#define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
- aarch64_autovectorize_vector_sizes
-
-/* Section anchor support. */
-
-#undef TARGET_MIN_ANCHOR_OFFSET
-#define TARGET_MIN_ANCHOR_OFFSET -256
-
-/* Limit the maximum anchor offset to 4k-1, since that's the limit for a
- byte offset; we can do much more for larger data types, but have no way
- to determine the size of the access. We assume accesses are aligned. */
-#undef TARGET_MAX_ANCHOR_OFFSET
-#define TARGET_MAX_ANCHOR_OFFSET 4095
-
-#undef TARGET_VECTOR_ALIGNMENT
-#define TARGET_VECTOR_ALIGNMENT aarch64_simd_vector_alignment
-
-#undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
-#define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE \
- aarch64_simd_vector_alignment_reachable
-
-/* vec_perm support. */
-
-#undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
-#define TARGET_VECTORIZE_VEC_PERM_CONST_OK \
- aarch64_vectorize_vec_perm_const_ok
-
-
-#undef TARGET_FIXED_CONDITION_CODE_REGS
-#define TARGET_FIXED_CONDITION_CODE_REGS aarch64_fixed_condition_code_regs
-
-struct gcc_target targetm = TARGET_INITIALIZER;
-
-#include "gt-aarch64.h"
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64.h b/gcc-4.8.1/gcc/config/aarch64/aarch64.h
deleted file mode 100644
index c3efd2a88..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64.h
+++ /dev/null
@@ -1,818 +0,0 @@
-/* Machine description for AArch64 architecture.
- Copyright (C) 2009-2013 Free Software Foundation, Inc.
- Contributed by ARM Ltd.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3, or (at your option)
- any later version.
-
- GCC is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING3. If not see
- <http://www.gnu.org/licenses/>. */
-
-
-#ifndef GCC_AARCH64_H
-#define GCC_AARCH64_H
-
-/* Target CPU builtins. */
-#define TARGET_CPU_CPP_BUILTINS() \
- do \
- { \
- builtin_define ("__aarch64__"); \
- if (TARGET_BIG_END) \
- builtin_define ("__AARCH64EB__"); \
- else \
- builtin_define ("__AARCH64EL__"); \
- \
- switch (aarch64_cmodel) \
- { \
- case AARCH64_CMODEL_TINY: \
- case AARCH64_CMODEL_TINY_PIC: \
- builtin_define ("__AARCH64_CMODEL_TINY__"); \
- break; \
- case AARCH64_CMODEL_SMALL: \
- case AARCH64_CMODEL_SMALL_PIC: \
- builtin_define ("__AARCH64_CMODEL_SMALL__");\
- break; \
- case AARCH64_CMODEL_LARGE: \
- builtin_define ("__AARCH64_CMODEL_LARGE__"); \
- break; \
- default: \
- break; \
- } \
- \
- } while (0)
-
-
-
-/* Target machine storage layout. */
-
-#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
- if (GET_MODE_CLASS (MODE) == MODE_INT \
- && GET_MODE_SIZE (MODE) < 4) \
- { \
- if (MODE == QImode || MODE == HImode) \
- { \
- MODE = SImode; \
- } \
- }
-
-/* Bits are always numbered from the LSBit. */
-#define BITS_BIG_ENDIAN 0
-
-/* Big/little-endian flavour. */
-#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
-#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
-
-/* AdvSIMD is supported in the default configuration, unless disabled by
- -mgeneral-regs-only. */
-#define TARGET_SIMD !TARGET_GENERAL_REGS_ONLY
-#define TARGET_FLOAT !TARGET_GENERAL_REGS_ONLY
-
-#define UNITS_PER_WORD 8
-
-#define UNITS_PER_VREG 16
-
-#define PARM_BOUNDARY 64
-
-#define STACK_BOUNDARY 128
-
-#define FUNCTION_BOUNDARY 32
-
-#define EMPTY_FIELD_BOUNDARY 32
-
-#define BIGGEST_ALIGNMENT 128
-
-#define SHORT_TYPE_SIZE 16
-
-#define INT_TYPE_SIZE 32
-
-#define LONG_TYPE_SIZE 64 /* XXX This should be an option */
-
-#define LONG_LONG_TYPE_SIZE 64
-
-#define FLOAT_TYPE_SIZE 32
-
-#define DOUBLE_TYPE_SIZE 64
-
-#define LONG_DOUBLE_TYPE_SIZE 128
-
-/* The architecture reserves all bits of the address for hardware use,
- so the vbit must go into the delta field of pointers to member
- functions. This is the same config as that in the AArch32
- port. */
-#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta
-
-/* Make strings word-aligned so that strcpy from constants will be
- faster. */
-#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
- ((TREE_CODE (EXP) == STRING_CST \
- && !optimize_size \
- && (ALIGN) < BITS_PER_WORD) \
- ? BITS_PER_WORD : ALIGN)
-
-#define DATA_ALIGNMENT(EXP, ALIGN) \
- ((((ALIGN) < BITS_PER_WORD) \
- && (TREE_CODE (EXP) == ARRAY_TYPE \
- || TREE_CODE (EXP) == UNION_TYPE \
- || TREE_CODE (EXP) == RECORD_TYPE)) \
- ? BITS_PER_WORD : (ALIGN))
-
-#define LOCAL_ALIGNMENT(EXP, ALIGN) DATA_ALIGNMENT(EXP, ALIGN)
-
-#define STRUCTURE_SIZE_BOUNDARY 8
-
-/* Defined by the ABI */
-#define WCHAR_TYPE "unsigned int"
-#define WCHAR_TYPE_SIZE 32
-
-/* Using long long breaks -ansi and -std=c90, so these will need to be
- made conditional for an LLP64 ABI. */
-
-#define SIZE_TYPE "long unsigned int"
-
-#define PTRDIFF_TYPE "long int"
-
-#define PCC_BITFIELD_TYPE_MATTERS 1
-
-
-/* Instruction tuning/selection flags. */
-
-/* Bit values used to identify processor capabilities. */
-#define AARCH64_FL_SIMD (1 << 0) /* Has SIMD instructions. */
-#define AARCH64_FL_FP (1 << 1) /* Has FP. */
-#define AARCH64_FL_CRYPTO (1 << 2) /* Has crypto. */
-#define AARCH64_FL_SLOWMUL (1 << 3) /* A slow multiply core. */
-
-/* Has FP and SIMD. */
-#define AARCH64_FL_FPSIMD (AARCH64_FL_FP | AARCH64_FL_SIMD)
-
-/* Has FP without SIMD. */
-#define AARCH64_FL_FPQ16 (AARCH64_FL_FP & ~AARCH64_FL_SIMD)
-
-/* Architecture flags that effect instruction selection. */
-#define AARCH64_FL_FOR_ARCH8 (AARCH64_FL_FPSIMD)
-
-/* Macros to test ISA flags. */
-extern unsigned long aarch64_isa_flags;
-#define AARCH64_ISA_CRYPTO (aarch64_isa_flags & AARCH64_FL_CRYPTO)
-#define AARCH64_ISA_FP (aarch64_isa_flags & AARCH64_FL_FP)
-#define AARCH64_ISA_SIMD (aarch64_isa_flags & AARCH64_FL_SIMD)
-
-/* Macros to test tuning flags. */
-extern unsigned long aarch64_tune_flags;
-#define AARCH64_TUNE_SLOWMUL (aarch64_tune_flags & AARCH64_FL_SLOWMUL)
-
-
-/* Standard register usage. */
-
-/* 31 64-bit general purpose registers R0-R30:
- R30 LR (link register)
- R29 FP (frame pointer)
- R19-R28 Callee-saved registers
- R18 The platform register; use as temporary register.
- R17 IP1 The second intra-procedure-call temporary register
- (can be used by call veneers and PLT code); otherwise use
- as a temporary register
- R16 IP0 The first intra-procedure-call temporary register (can
- be used by call veneers and PLT code); otherwise use as a
- temporary register
- R9-R15 Temporary registers
- R8 Structure value parameter / temporary register
- R0-R7 Parameter/result registers
-
- SP stack pointer, encoded as X/R31 where permitted.
- ZR zero register, encoded as X/R31 elsewhere
-
- 32 x 128-bit floating-point/vector registers
- V16-V31 Caller-saved (temporary) registers
- V8-V15 Callee-saved registers
- V0-V7 Parameter/result registers
-
- The vector register V0 holds scalar B0, H0, S0 and D0 in its least
- significant bits. Unlike AArch32 S1 is not packed into D0,
- etc. */
-
-/* Note that we don't mark X30 as a call-clobbered register. The idea is
- that it's really the call instructions themselves which clobber X30.
- We don't care what the called function does with it afterwards.
-
- This approach makes it easier to implement sibcalls. Unlike normal
- calls, sibcalls don't clobber X30, so the register reaches the
- called function intact. EPILOGUE_USES says that X30 is useful
- to the called function. */
-
-#define FIXED_REGISTERS \
- { \
- 0, 0, 0, 0, 0, 0, 0, 0, /* R0 - R7 */ \
- 0, 0, 0, 0, 0, 0, 0, 0, /* R8 - R15 */ \
- 0, 0, 0, 0, 0, 0, 0, 0, /* R16 - R23 */ \
- 0, 0, 0, 0, 0, 1, 0, 1, /* R24 - R30, SP */ \
- 0, 0, 0, 0, 0, 0, 0, 0, /* V0 - V7 */ \
- 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \
- 0, 0, 0, 0, 0, 0, 0, 0, /* V16 - V23 */ \
- 0, 0, 0, 0, 0, 0, 0, 0, /* V24 - V31 */ \
- 1, 1, 1, /* SFP, AP, CC */ \
- }
-
-#define CALL_USED_REGISTERS \
- { \
- 1, 1, 1, 1, 1, 1, 1, 1, /* R0 - R7 */ \
- 1, 1, 1, 1, 1, 1, 1, 1, /* R8 - R15 */ \
- 1, 1, 1, 0, 0, 0, 0, 0, /* R16 - R23 */ \
- 0, 0, 0, 0, 0, 1, 0, 1, /* R24 - R30, SP */ \
- 1, 1, 1, 1, 1, 1, 1, 1, /* V0 - V7 */ \
- 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \
- 1, 1, 1, 1, 1, 1, 1, 1, /* V16 - V23 */ \
- 1, 1, 1, 1, 1, 1, 1, 1, /* V24 - V31 */ \
- 1, 1, 1, /* SFP, AP, CC */ \
- }
-
-#define REGISTER_NAMES \
- { \
- "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", \
- "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", \
- "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", \
- "x24", "x25", "x26", "x27", "x28", "x29", "x30", "sp", \
- "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \
- "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \
- "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \
- "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", \
- "sfp", "ap", "cc", \
- }
-
-/* Generate the register aliases for core register N */
-#define R_ALIASES(N) {"r" # N, R0_REGNUM + (N)}, \
- {"w" # N, R0_REGNUM + (N)}
-
-#define V_ALIASES(N) {"q" # N, V0_REGNUM + (N)}, \
- {"d" # N, V0_REGNUM + (N)}, \
- {"s" # N, V0_REGNUM + (N)}, \
- {"h" # N, V0_REGNUM + (N)}, \
- {"b" # N, V0_REGNUM + (N)}
-
-/* Provide aliases for all of the ISA defined register name forms.
- These aliases are convenient for use in the clobber lists of inline
- asm statements. */
-
-#define ADDITIONAL_REGISTER_NAMES \
- { R_ALIASES(0), R_ALIASES(1), R_ALIASES(2), R_ALIASES(3), \
- R_ALIASES(4), R_ALIASES(5), R_ALIASES(6), R_ALIASES(7), \
- R_ALIASES(8), R_ALIASES(9), R_ALIASES(10), R_ALIASES(11), \
- R_ALIASES(12), R_ALIASES(13), R_ALIASES(14), R_ALIASES(15), \
- R_ALIASES(16), R_ALIASES(17), R_ALIASES(18), R_ALIASES(19), \
- R_ALIASES(20), R_ALIASES(21), R_ALIASES(22), R_ALIASES(23), \
- R_ALIASES(24), R_ALIASES(25), R_ALIASES(26), R_ALIASES(27), \
- R_ALIASES(28), R_ALIASES(29), R_ALIASES(30), /* 31 omitted */ \
- V_ALIASES(0), V_ALIASES(1), V_ALIASES(2), V_ALIASES(3), \
- V_ALIASES(4), V_ALIASES(5), V_ALIASES(6), V_ALIASES(7), \
- V_ALIASES(8), V_ALIASES(9), V_ALIASES(10), V_ALIASES(11), \
- V_ALIASES(12), V_ALIASES(13), V_ALIASES(14), V_ALIASES(15), \
- V_ALIASES(16), V_ALIASES(17), V_ALIASES(18), V_ALIASES(19), \
- V_ALIASES(20), V_ALIASES(21), V_ALIASES(22), V_ALIASES(23), \
- V_ALIASES(24), V_ALIASES(25), V_ALIASES(26), V_ALIASES(27), \
- V_ALIASES(28), V_ALIASES(29), V_ALIASES(30), V_ALIASES(31) \
- }
-
-/* Say that the epilogue uses the return address register. Note that
- in the case of sibcalls, the values "used by the epilogue" are
- considered live at the start of the called function. */
-
-#define EPILOGUE_USES(REGNO) \
- ((REGNO) == LR_REGNUM)
-
-/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
- the stack pointer does not matter. The value is tested only in
- functions that have frame pointers. */
-#define EXIT_IGNORE_STACK 1
-
-#define STATIC_CHAIN_REGNUM R18_REGNUM
-#define HARD_FRAME_POINTER_REGNUM R29_REGNUM
-#define FRAME_POINTER_REGNUM SFP_REGNUM
-#define STACK_POINTER_REGNUM SP_REGNUM
-#define ARG_POINTER_REGNUM AP_REGNUM
-#define FIRST_PSEUDO_REGISTER 67
-
-/* The number of (integer) argument register available. */
-#define NUM_ARG_REGS 8
-#define NUM_FP_ARG_REGS 8
-
-/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
- four members. */
-#define HA_MAX_NUM_FLDS 4
-
-/* External dwarf register number scheme. These number are used to
- identify registers in dwarf debug information, the values are
- defined by the AArch64 ABI. The numbering scheme is independent of
- GCC's internal register numbering scheme. */
-
-#define AARCH64_DWARF_R0 0
-
-/* The number of R registers, note 31! not 32. */
-#define AARCH64_DWARF_NUMBER_R 31
-
-#define AARCH64_DWARF_SP 31
-#define AARCH64_DWARF_V0 64
-
-/* The number of V registers. */
-#define AARCH64_DWARF_NUMBER_V 32
-
-/* For signal frames we need to use an alternative return column. This
- value must not correspond to a hard register and must be out of the
- range of DWARF_FRAME_REGNUM(). */
-#define DWARF_ALT_FRAME_RETURN_COLUMN \
- (AARCH64_DWARF_V0 + AARCH64_DWARF_NUMBER_V)
-
-/* We add 1 extra frame register for use as the
- DWARF_ALT_FRAME_RETURN_COLUMN. */
-#define DWARF_FRAME_REGISTERS (DWARF_ALT_FRAME_RETURN_COLUMN + 1)
-
-
-#define DBX_REGISTER_NUMBER(REGNO) aarch64_dbx_register_number (REGNO)
-/* Provide a definition of DWARF_FRAME_REGNUM here so that fallback unwinders
- can use DWARF_ALT_FRAME_RETURN_COLUMN defined below. This is just the same
- as the default definition in dwarf2out.c. */
-#undef DWARF_FRAME_REGNUM
-#define DWARF_FRAME_REGNUM(REGNO) DBX_REGISTER_NUMBER (REGNO)
-
-#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LR_REGNUM)
-
-#define HARD_REGNO_NREGS(REGNO, MODE) aarch64_hard_regno_nregs (REGNO, MODE)
-
-#define HARD_REGNO_MODE_OK(REGNO, MODE) aarch64_hard_regno_mode_ok (REGNO, MODE)
-
-#define MODES_TIEABLE_P(MODE1, MODE2) \
- (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
-
-#define DWARF2_UNWIND_INFO 1
-
-/* Use R0 through R3 to pass exception handling information. */
-#define EH_RETURN_DATA_REGNO(N) \
- ((N) < 4 ? ((unsigned int) R0_REGNUM + (N)) : INVALID_REGNUM)
-
-/* Select a format to encode pointers in exception handling data. */
-#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
- aarch64_asm_preferred_eh_data_format ((CODE), (GLOBAL))
-
-/* The register that holds the return address in exception handlers. */
-#define AARCH64_EH_STACKADJ_REGNUM (R0_REGNUM + 4)
-#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, AARCH64_EH_STACKADJ_REGNUM)
-
-/* Don't use __builtin_setjmp until we've defined it. */
-#undef DONT_USE_BUILTIN_SETJMP
-#define DONT_USE_BUILTIN_SETJMP 1
-
-/* Register in which the structure value is to be returned. */
-#define AARCH64_STRUCT_VALUE_REGNUM R8_REGNUM
-
-/* Non-zero if REGNO is part of the Core register set.
-
- The rather unusual way of expressing this check is to avoid
- warnings when building the compiler when R0_REGNUM is 0 and REGNO
- is unsigned. */
-#define GP_REGNUM_P(REGNO) \
- (((unsigned) (REGNO - R0_REGNUM)) <= (R30_REGNUM - R0_REGNUM))
-
-#define FP_REGNUM_P(REGNO) \
- (((unsigned) (REGNO - V0_REGNUM)) <= (V31_REGNUM - V0_REGNUM))
-
-#define FP_LO_REGNUM_P(REGNO) \
- (((unsigned) (REGNO - V0_REGNUM)) <= (V15_REGNUM - V0_REGNUM))
-
-
-/* Register and constant classes. */
-
-enum reg_class
-{
- NO_REGS,
- CORE_REGS,
- GENERAL_REGS,
- STACK_REG,
- POINTER_REGS,
- FP_LO_REGS,
- FP_REGS,
- ALL_REGS,
- LIM_REG_CLASSES /* Last */
-};
-
-#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
-
-#define REG_CLASS_NAMES \
-{ \
- "NO_REGS", \
- "CORE_REGS", \
- "GENERAL_REGS", \
- "STACK_REG", \
- "POINTER_REGS", \
- "FP_LO_REGS", \
- "FP_REGS", \
- "ALL_REGS" \
-}
-
-#define REG_CLASS_CONTENTS \
-{ \
- { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
- { 0x7fffffff, 0x00000000, 0x00000003 }, /* CORE_REGS */ \
- { 0x7fffffff, 0x00000000, 0x00000003 }, /* GENERAL_REGS */ \
- { 0x80000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \
- { 0xffffffff, 0x00000000, 0x00000003 }, /* POINTER_REGS */ \
- { 0x00000000, 0x0000ffff, 0x00000000 }, /* FP_LO_REGS */ \
- { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \
- { 0xffffffff, 0xffffffff, 0x00000007 } /* ALL_REGS */ \
-}
-
-#define REGNO_REG_CLASS(REGNO) aarch64_regno_regclass (REGNO)
-
-#define INDEX_REG_CLASS CORE_REGS
-#define BASE_REG_CLASS POINTER_REGS
-
-/* Register pairs used to eliminate unneeded registers that point intoi
- the stack frame. */
-#define ELIMINABLE_REGS \
-{ \
- { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
- { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \
- { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
- { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \
-}
-
-#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
- (OFFSET) = aarch64_initial_elimination_offset (FROM, TO)
-
-/* CPU/ARCH option handling. */
-#include "config/aarch64/aarch64-opts.h"
-
-enum target_cpus
-{
-#define AARCH64_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
- TARGET_CPU_##IDENT,
-#include "aarch64-cores.def"
-#undef AARCH64_CORE
- TARGET_CPU_generic
-};
-
-/* If there is no CPU defined at configure, use "generic" as default. */
-#ifndef TARGET_CPU_DEFAULT
-#define TARGET_CPU_DEFAULT \
- (TARGET_CPU_generic | (AARCH64_CPU_DEFAULT_FLAGS << 6))
-#endif
-
-/* The processor for which instructions should be scheduled. */
-extern enum aarch64_processor aarch64_tune;
-
-/* RTL generation support. */
-#define INIT_EXPANDERS aarch64_init_expanders ()
-
-
-/* Stack layout; function entry, exit and calling. */
-#define STACK_GROWS_DOWNWARD 1
-
-#define FRAME_GROWS_DOWNWARD 0
-
-#define STARTING_FRAME_OFFSET 0
-
-#define ACCUMULATE_OUTGOING_ARGS 1
-
-#define FIRST_PARM_OFFSET(FNDECL) 0
-
-/* Fix for VFP */
-#define LIBCALL_VALUE(MODE) \
- gen_rtx_REG (MODE, FLOAT_MODE_P (MODE) ? V0_REGNUM : R0_REGNUM)
-
-#define DEFAULT_PCC_STRUCT_RETURN 0
-
-#define AARCH64_ROUND_UP(X, ALIGNMENT) \
- (((X) + ((ALIGNMENT) - 1)) & ~((ALIGNMENT) - 1))
-
-#define AARCH64_ROUND_DOWN(X, ALIGNMENT) \
- ((X) & ~((ALIGNMENT) - 1))
-
-#ifdef HOST_WIDE_INT
-struct GTY (()) aarch64_frame
-{
- HOST_WIDE_INT reg_offset[FIRST_PSEUDO_REGISTER];
- HOST_WIDE_INT saved_regs_size;
- /* Padding if needed after the all the callee save registers have
- been saved. */
- HOST_WIDE_INT padding0;
- HOST_WIDE_INT hardfp_offset; /* HARD_FRAME_POINTER_REGNUM */
- HOST_WIDE_INT fp_lr_offset; /* Space needed for saving fp and/or lr */
-
- bool laid_out;
-};
-
-typedef struct GTY (()) machine_function
-{
- struct aarch64_frame frame;
-
- /* The number of extra stack bytes taken up by register varargs.
- This area is allocated by the callee at the very top of the frame. */
- HOST_WIDE_INT saved_varargs_size;
-
-} machine_function;
-#endif
-
-
-/* Which ABI to use. */
-enum arm_abi_type
-{
- ARM_ABI_AAPCS64
-};
-
-enum arm_pcs
-{
- ARM_PCS_AAPCS64, /* Base standard AAPCS for 64 bit. */
- ARM_PCS_UNKNOWN
-};
-
-
-extern enum arm_abi_type arm_abi;
-extern enum arm_pcs arm_pcs_variant;
-#ifndef ARM_DEFAULT_ABI
-#define ARM_DEFAULT_ABI ARM_ABI_AAPCS64
-#endif
-
-#ifndef ARM_DEFAULT_PCS
-#define ARM_DEFAULT_PCS ARM_PCS_AAPCS64
-#endif
-
-/* We can't use enum machine_mode inside a generator file because it
- hasn't been created yet; we shouldn't be using any code that
- needs the real definition though, so this ought to be safe. */
-#ifdef GENERATOR_FILE
-#define MACHMODE int
-#else
-#include "insn-modes.h"
-#define MACHMODE enum machine_mode
-#endif
-
-
-/* AAPCS related state tracking. */
-typedef struct
-{
- enum arm_pcs pcs_variant;
- int aapcs_arg_processed; /* No need to lay out this argument again. */
- int aapcs_ncrn; /* Next Core register number. */
- int aapcs_nextncrn; /* Next next core register number. */
- int aapcs_nvrn; /* Next Vector register number. */
- int aapcs_nextnvrn; /* Next Next Vector register number. */
- rtx aapcs_reg; /* Register assigned to this argument. This
- is NULL_RTX if this parameter goes on
- the stack. */
- MACHMODE aapcs_vfp_rmode;
- int aapcs_stack_words; /* If the argument is passed on the stack, this
- is the number of words needed, after rounding
- up. Only meaningful when
- aapcs_reg == NULL_RTX. */
- int aapcs_stack_size; /* The total size (in words, per 8 byte) of the
- stack arg area so far. */
-} CUMULATIVE_ARGS;
-
-#define FUNCTION_ARG_PADDING(MODE, TYPE) \
- (aarch64_pad_arg_upward (MODE, TYPE) ? upward : downward)
-
-#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
- (aarch64_pad_reg_upward (MODE, TYPE, FIRST) ? upward : downward)
-
-#define PAD_VARARGS_DOWN 0
-
-#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
- aarch64_init_cumulative_args (&(CUM), FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS)
-
-#define FUNCTION_ARG_REGNO_P(REGNO) \
- aarch64_function_arg_regno_p(REGNO)
-
-
-/* ISA Features. */
-
-/* Addressing modes, etc. */
-#define HAVE_POST_INCREMENT 1
-#define HAVE_PRE_INCREMENT 1
-#define HAVE_POST_DECREMENT 1
-#define HAVE_PRE_DECREMENT 1
-#define HAVE_POST_MODIFY_DISP 1
-#define HAVE_PRE_MODIFY_DISP 1
-
-#define MAX_REGS_PER_ADDRESS 2
-
-#define CONSTANT_ADDRESS_P(X) aarch64_constant_address_p(X)
-
-/* Try a machine-dependent way of reloading an illegitimate address
- operand. If we find one, push the reload and jump to WIN. This
- macro is used in only one place: `find_reloads_address' in reload.c. */
-
-#define LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_L, WIN) \
-do { \
- rtx new_x = aarch64_legitimize_reload_address (&(X), MODE, OPNUM, TYPE, \
- IND_L); \
- if (new_x) \
- { \
- X = new_x; \
- goto WIN; \
- } \
-} while (0)
-
-#define REGNO_OK_FOR_BASE_P(REGNO) \
- aarch64_regno_ok_for_base_p (REGNO, true)
-
-#define REGNO_OK_FOR_INDEX_P(REGNO) \
- aarch64_regno_ok_for_index_p (REGNO, true)
-
-#define LEGITIMATE_PIC_OPERAND_P(X) \
- aarch64_legitimate_pic_operand_p (X)
-
-#define CASE_VECTOR_MODE Pmode
-
-#define DEFAULT_SIGNED_CHAR 0
-
-/* An integer expression for the size in bits of the largest integer machine
- mode that should actually be used. We allow pairs of registers. */
-#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode)
-
-/* Maximum bytes moved by a single instruction (load/store pair). */
-#define MOVE_MAX (UNITS_PER_WORD * 2)
-
-/* The base cost overhead of a memcpy call, for MOVE_RATIO and friends. */
-#define AARCH64_CALL_RATIO 8
-
-/* When optimizing for size, give a better estimate of the length of a memcpy
- call, but use the default otherwise. But move_by_pieces_ninsns() counts
- memory-to-memory moves, and we'll have to generate a load & store for each,
- so halve the value to take that into account. */
-#define MOVE_RATIO(speed) \
- (((speed) ? 15 : AARCH64_CALL_RATIO) / 2)
-
-/* For CLEAR_RATIO, when optimizing for size, give a better estimate
- of the length of a memset call, but use the default otherwise. */
-#define CLEAR_RATIO(speed) \
- ((speed) ? 15 : AARCH64_CALL_RATIO)
-
-/* SET_RATIO is similar to CLEAR_RATIO, but for a non-zero constant, so when
- optimizing for size adjust the ratio to account for the overhead of loading
- the constant. */
-#define SET_RATIO(speed) \
- ((speed) ? 15 : AARCH64_CALL_RATIO - 2)
-
-/* STORE_BY_PIECES_P can be used when copying a constant string, but
- in that case each 64-bit chunk takes 5 insns instead of 2 (LDR/STR).
- For now we always fail this and let the move_by_pieces code copy
- the string from read-only memory. */
-#define STORE_BY_PIECES_P(SIZE, ALIGN) 0
-
-/* Disable auto-increment in move_by_pieces et al. Use of auto-increment is
- rarely a good idea in straight-line code since it adds an extra address
- dependency between each instruction. Better to use incrementing offsets. */
-#define USE_LOAD_POST_INCREMENT(MODE) 0
-#define USE_LOAD_POST_DECREMENT(MODE) 0
-#define USE_LOAD_PRE_INCREMENT(MODE) 0
-#define USE_LOAD_PRE_DECREMENT(MODE) 0
-#define USE_STORE_POST_INCREMENT(MODE) 0
-#define USE_STORE_POST_DECREMENT(MODE) 0
-#define USE_STORE_PRE_INCREMENT(MODE) 0
-#define USE_STORE_PRE_DECREMENT(MODE) 0
-
-/* ?? #define WORD_REGISTER_OPERATIONS */
-
-/* Define if loading from memory in MODE, an integral mode narrower than
- BITS_PER_WORD will either zero-extend or sign-extend. The value of this
- macro should be the code that says which one of the two operations is
- implicitly done, or UNKNOWN if none. */
-#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
-
-/* Define this macro to be non-zero if instructions will fail to work
- if given data not on the nominal alignment. */
-#define STRICT_ALIGNMENT TARGET_STRICT_ALIGN
-
-/* Define this macro to be non-zero if accessing less than a word of
- memory is no faster than accessing a word of memory, i.e., if such
- accesses require more than one instruction or if there is no
- difference in cost.
- Although there's no difference in instruction count or cycles,
- in AArch64 we don't want to expand to a sub-word to a 64-bit access
- if we don't have to, for power-saving reasons. */
-#define SLOW_BYTE_ACCESS 0
-
-#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
-
-#define NO_FUNCTION_CSE 1
-
-#define Pmode DImode
-#define FUNCTION_MODE Pmode
-
-#define SELECT_CC_MODE(OP, X, Y) aarch64_select_cc_mode (OP, X, Y)
-
-#define REVERSE_CONDITION(CODE, MODE) \
- (((MODE) == CCFPmode || (MODE) == CCFPEmode) \
- ? reverse_condition_maybe_unordered (CODE) \
- : reverse_condition (CODE))
-
-#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
- ((VALUE) = ((MODE) == SImode ? 32 : 64), 2)
-#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
- ((VALUE) = ((MODE) == SImode ? 32 : 64), 2)
-
-#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
-
-#define RETURN_ADDR_RTX aarch64_return_addr
-
-#define TRAMPOLINE_SIZE aarch64_trampoline_size ()
-
-/* Trampolines contain dwords, so must be dword aligned. */
-#define TRAMPOLINE_ALIGNMENT 64
-
-/* Put trampolines in the text section so that mapping symbols work
- correctly. */
-#define TRAMPOLINE_SECTION text_section
-
-/* Costs, etc. */
-#define MEMORY_MOVE_COST(M, CLASS, IN) \
- (GET_MODE_SIZE (M) < 8 ? 8 : GET_MODE_SIZE (M))
-
-/* To start with. */
-#define BRANCH_COST(SPEED_P, PREDICTABLE_P) 2
-
-
-/* Assembly output. */
-
-/* For now we'll make all jump tables pc-relative. */
-#define CASE_VECTOR_PC_RELATIVE 1
-
-#define CASE_VECTOR_SHORTEN_MODE(min, max, body) \
- ((min < -0x1fff0 || max > 0x1fff0) ? SImode \
- : (min < -0x1f0 || max > 0x1f0) ? HImode \
- : QImode)
-
-/* Jump table alignment is explicit in ASM_OUTPUT_CASE_LABEL. */
-#define ADDR_VEC_ALIGN(JUMPTABLE) 0
-
-#define PRINT_OPERAND(STREAM, X, CODE) aarch64_print_operand (STREAM, X, CODE)
-
-#define PRINT_OPERAND_ADDRESS(STREAM, X) \
- aarch64_print_operand_address (STREAM, X)
-
-#define FUNCTION_PROFILER(STREAM, LABELNO) \
- aarch64_function_profiler (STREAM, LABELNO)
-
-/* For some reason, the Linux headers think they know how to define
- these macros. They don't!!! */
-#undef ASM_APP_ON
-#undef ASM_APP_OFF
-#define ASM_APP_ON "\t" ASM_COMMENT_START " Start of user assembly\n"
-#define ASM_APP_OFF "\t" ASM_COMMENT_START " End of user assembly\n"
-
-#define CONSTANT_POOL_BEFORE_FUNCTION 0
-
-/* This definition should be relocated to aarch64-elf-raw.h. This macro
- should be undefined in aarch64-linux.h and a clear_cache pattern
- implmented to emit either the call to __aarch64_sync_cache_range()
- directly or preferably the appropriate sycall or cache clear
- instructions inline. */
-#define CLEAR_INSN_CACHE(beg, end) \
- extern void __aarch64_sync_cache_range (void *, void *); \
- __aarch64_sync_cache_range (beg, end)
-
-/* VFP registers may only be accessed in the mode they
- were set. */
-#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
- (GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO) \
- ? reg_classes_intersect_p (FP_REGS, (CLASS)) \
- : 0)
-
-
-#define SHIFT_COUNT_TRUNCATED !TARGET_SIMD
-
-/* Callee only saves lower 64-bits of a 128-bit register. Tell the
- compiler the callee clobbers the top 64-bits when restoring the
- bottom 64-bits. */
-#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) \
- (FP_REGNUM_P (REGNO) && GET_MODE_SIZE (MODE) > 8)
-
-/* Check TLS Descriptors mechanism is selected. */
-#define TARGET_TLS_DESC (aarch64_tls_dialect == TLS_DESCRIPTORS)
-
-extern enum aarch64_code_model aarch64_cmodel;
-
-/* When using the tiny addressing model conditional and unconditional branches
- can span the whole of the available address space (1MB). */
-#define HAS_LONG_COND_BRANCH \
- (aarch64_cmodel == AARCH64_CMODEL_TINY \
- || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
-
-#define HAS_LONG_UNCOND_BRANCH \
- (aarch64_cmodel == AARCH64_CMODEL_TINY \
- || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
-
-/* Modes valid for AdvSIMD Q registers. */
-#define AARCH64_VALID_SIMD_QREG_MODE(MODE) \
- ((MODE) == V4SImode || (MODE) == V8HImode || (MODE) == V16QImode \
- || (MODE) == V4SFmode || (MODE) == V2DImode || mode == V2DFmode)
-
-#endif /* GCC_AARCH64_H */
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64.md b/gcc-4.8.1/gcc/config/aarch64/aarch64.md
deleted file mode 100644
index 37e9f05ce..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64.md
+++ /dev/null
@@ -1,3325 +0,0 @@
-;; Machine description for AArch64 architecture.
-;; Copyright (C) 2009-2013 Free Software Foundation, Inc.
-;; Contributed by ARM Ltd.
-;;
-;; This file is part of GCC.
-;;
-;; GCC is free software; you can redistribute it and/or modify it
-;; under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 3, or (at your option)
-;; any later version.
-;;
-;; GCC is distributed in the hope that it will be useful, but
-;; WITHOUT ANY WARRANTY; without even the implied warranty of
-;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-;; General Public License for more details.
-;;
-;; You should have received a copy of the GNU General Public License
-;; along with GCC; see the file COPYING3. If not see
-;; <http://www.gnu.org/licenses/>.
-
-;; Register numbers
-(define_constants
- [
- (R0_REGNUM 0)
- (R1_REGNUM 1)
- (R2_REGNUM 2)
- (R3_REGNUM 3)
- (R4_REGNUM 4)
- (R5_REGNUM 5)
- (R6_REGNUM 6)
- (R7_REGNUM 7)
- (R8_REGNUM 8)
- (R9_REGNUM 9)
- (R10_REGNUM 10)
- (R11_REGNUM 11)
- (R12_REGNUM 12)
- (R13_REGNUM 13)
- (R14_REGNUM 14)
- (R15_REGNUM 15)
- (R16_REGNUM 16)
- (IP0_REGNUM 16)
- (R17_REGNUM 17)
- (IP1_REGNUM 17)
- (R18_REGNUM 18)
- (R19_REGNUM 19)
- (R20_REGNUM 20)
- (R21_REGNUM 21)
- (R22_REGNUM 22)
- (R23_REGNUM 23)
- (R24_REGNUM 24)
- (R25_REGNUM 25)
- (R26_REGNUM 26)
- (R27_REGNUM 27)
- (R28_REGNUM 28)
- (R29_REGNUM 29)
- (R30_REGNUM 30)
- (LR_REGNUM 30)
- (SP_REGNUM 31)
- (V0_REGNUM 32)
- (V15_REGNUM 47)
- (V31_REGNUM 63)
- (SFP_REGNUM 64)
- (AP_REGNUM 65)
- (CC_REGNUM 66)
- ]
-)
-
-(define_c_enum "unspec" [
- UNSPEC_CASESI
- UNSPEC_CLS
- UNSPEC_FRINTA
- UNSPEC_FRINTI
- UNSPEC_FRINTM
- UNSPEC_FRINTP
- UNSPEC_FRINTX
- UNSPEC_FRINTZ
- UNSPEC_GOTSMALLPIC
- UNSPEC_GOTSMALLTLS
- UNSPEC_LD2
- UNSPEC_LD3
- UNSPEC_LD4
- UNSPEC_MB
- UNSPEC_NOP
- UNSPEC_PRLG_STK
- UNSPEC_RBIT
- UNSPEC_ST2
- UNSPEC_ST3
- UNSPEC_ST4
- UNSPEC_TLS
- UNSPEC_TLSDESC
- UNSPEC_VSTRUCTDUMMY
-])
-
-(define_c_enum "unspecv" [
- UNSPECV_EH_RETURN ; Represent EH_RETURN
- ]
-)
-
-;; If further include files are added the defintion of MD_INCLUDES
-;; must be updated.
-
-(include "constraints.md")
-(include "predicates.md")
-(include "iterators.md")
-
-;; -------------------------------------------------------------------
-;; Instruction types and attributes
-;; -------------------------------------------------------------------
-
-;; Main data types used by the insntructions
-
-(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF"
- (const_string "unknown"))
-
-(define_attr "mode2" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF"
- (const_string "unknown"))
-
-; The "v8type" attribute is used to for fine grained classification of
-; AArch64 instructions. This table briefly explains the meaning of each type.
-
-; adc add/subtract with carry.
-; adcs add/subtract with carry (setting condition flags).
-; adr calculate address.
-; alu simple alu instruction (no memory or fp regs access).
-; alu_ext simple alu instruction (sign/zero-extended register).
-; alu_shift simple alu instruction, with a source operand shifted by a constant.
-; alus simple alu instruction (setting condition flags).
-; alus_ext simple alu instruction (sign/zero-extended register, setting condition flags).
-; alus_shift simple alu instruction, with a source operand shifted by a constant (setting condition flags).
-; bfm bitfield move operation.
-; branch branch.
-; call subroutine call.
-; ccmp conditional compare.
-; clz count leading zeros/sign bits.
-; csel conditional select.
-; dmb data memory barrier.
-; extend sign/zero-extend (specialised bitfield move).
-; extr extract register-sized bitfield encoding.
-; fpsimd_load load single floating point / simd scalar register from memory.
-; fpsimd_load2 load pair of floating point / simd scalar registers from memory.
-; fpsimd_store store single floating point / simd scalar register to memory.
-; fpsimd_store2 store pair floating point / simd scalar registers to memory.
-; fadd floating point add/sub.
-; fccmp floating point conditional compare.
-; fcmp floating point comparison.
-; fconst floating point load immediate.
-; fcsel floating point conditional select.
-; fcvt floating point convert (float to float).
-; fcvtf2i floating point convert (float to integer).
-; fcvti2f floating point convert (integer to float).
-; fdiv floating point division operation.
-; ffarith floating point abs, neg or cpy.
-; fmadd floating point multiply-add/sub.
-; fminmax floating point min/max.
-; fmov floating point move (float to float).
-; fmovf2i floating point move (float to integer).
-; fmovi2f floating point move (integer to float).
-; fmul floating point multiply.
-; frint floating point round to integral.
-; fsqrt floating point square root.
-; load_acq load-acquire.
-; load load single general register from memory
-; load2 load pair of general registers from memory
-; logic logical operation (register).
-; logic_imm and/or/xor operation (immediate).
-; logic_shift logical operation with shift.
-; logics logical operation (register, setting condition flags).
-; logics_imm and/or/xor operation (immediate, setting condition flags).
-; logics_shift logical operation with shift (setting condition flags).
-; madd integer multiply-add/sub.
-; maddl widening integer multiply-add/sub.
-; misc miscellaneous - any type that doesn't fit into the rest.
-; move integer move operation.
-; move2 double integer move operation.
-; movk move 16-bit immediate with keep.
-; movz move 16-bit immmediate with zero/one.
-; mrs system/special register move.
-; mulh 64x64 to 128-bit multiply (high part).
-; mull widening multiply.
-; mult integer multiply instruction.
-; prefetch memory prefetch.
-; rbit reverse bits.
-; rev reverse bytes.
-; sdiv integer division operation (signed).
-; shift variable shift operation.
-; shift_imm immediate shift operation (specialised bitfield move).
-; store_rel store-release.
-; store store single general register to memory.
-; store2 store pair of general registers to memory.
-; udiv integer division operation (unsigned).
-
-(define_attr "v8type"
- "adc,\
- adcs,\
- adr,\
- alu,\
- alu_ext,\
- alu_shift,\
- alus,\
- alus_ext,\
- alus_shift,\
- bfm,\
- branch,\
- call,\
- ccmp,\
- clz,\
- csel,\
- dmb,\
- div,\
- div64,\
- extend,\
- extr,\
- fpsimd_load,\
- fpsimd_load2,\
- fpsimd_store2,\
- fpsimd_store,\
- fadd,\
- fccmp,\
- fcvt,\
- fcvtf2i,\
- fcvti2f,\
- fcmp,\
- fconst,\
- fcsel,\
- fdiv,\
- ffarith,\
- fmadd,\
- fminmax,\
- fmov,\
- fmovf2i,\
- fmovi2f,\
- fmul,\
- frint,\
- fsqrt,\
- load_acq,\
- load1,\
- load2,\
- logic,\
- logic_imm,\
- logic_shift,\
- logics,\
- logics_imm,\
- logics_shift,\
- madd,\
- maddl,\
- misc,\
- move,\
- move2,\
- movk,\
- movz,\
- mrs,\
- mulh,\
- mull,\
- mult,\
- prefetch,\
- rbit,\
- rev,\
- sdiv,\
- shift,\
- shift_imm,\
- store_rel,\
- store1,\
- store2,\
- udiv"
- (const_string "alu"))
-
-
-; The "type" attribute is used by the AArch32 backend. Below is a mapping
-; from "v8type" to "type".
-
-(define_attr "type"
- "alu,alu_shift,block,branch,call,f_2_r,f_cvt,f_flag,f_loads,
- f_loadd,f_stored,f_stores,faddd,fadds,fcmpd,fcmps,fconstd,fconsts,
- fcpys,fdivd,fdivs,ffarithd,ffariths,fmacd,fmacs,fmuld,fmuls,load_byte,
- load1,load2,mult,r_2_f,store1,store2"
- (cond [
- (eq_attr "v8type" "alu_shift,alus_shift,logic_shift,logics_shift") (const_string "alu_shift")
- (eq_attr "v8type" "branch") (const_string "branch")
- (eq_attr "v8type" "call") (const_string "call")
- (eq_attr "v8type" "fmovf2i") (const_string "f_2_r")
- (eq_attr "v8type" "fcvt,fcvtf2i,fcvti2f") (const_string "f_cvt")
- (and (eq_attr "v8type" "fpsimd_load") (eq_attr "mode" "SF")) (const_string "f_loads")
- (and (eq_attr "v8type" "fpsimd_load") (eq_attr "mode" "DF")) (const_string "f_loadd")
- (and (eq_attr "v8type" "fpsimd_store") (eq_attr "mode" "SF")) (const_string "f_stores")
- (and (eq_attr "v8type" "fpsimd_store") (eq_attr "mode" "DF")) (const_string "f_stored")
- (and (eq_attr "v8type" "fadd,fminmax") (eq_attr "mode" "DF")) (const_string "faddd")
- (and (eq_attr "v8type" "fadd,fminmax") (eq_attr "mode" "SF")) (const_string "fadds")
- (and (eq_attr "v8type" "fcmp,fccmp") (eq_attr "mode" "DF")) (const_string "fcmpd")
- (and (eq_attr "v8type" "fcmp,fccmp") (eq_attr "mode" "SF")) (const_string "fcmps")
- (and (eq_attr "v8type" "fconst") (eq_attr "mode" "DF")) (const_string "fconstd")
- (and (eq_attr "v8type" "fconst") (eq_attr "mode" "SF")) (const_string "fconsts")
- (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "DF")) (const_string "fdivd")
- (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "SF")) (const_string "fdivs")
- (and (eq_attr "v8type" "ffarith") (eq_attr "mode" "DF")) (const_string "ffarithd")
- (and (eq_attr "v8type" "ffarith") (eq_attr "mode" "SF")) (const_string "ffariths")
- (and (eq_attr "v8type" "fmadd") (eq_attr "mode" "DF")) (const_string "fmacd")
- (and (eq_attr "v8type" "fmadd") (eq_attr "mode" "SF")) (const_string "fmacs")
- (and (eq_attr "v8type" "fmul") (eq_attr "mode" "DF")) (const_string "fmuld")
- (and (eq_attr "v8type" "fmul") (eq_attr "mode" "SF")) (const_string "fmuls")
- (and (eq_attr "v8type" "load1") (eq_attr "mode" "QI,HI")) (const_string "load_byte")
- (and (eq_attr "v8type" "load1") (eq_attr "mode" "SI,DI,TI")) (const_string "load1")
- (eq_attr "v8type" "load2") (const_string "load2")
- (and (eq_attr "v8type" "mulh,mult,mull,madd,sdiv,udiv") (eq_attr "mode" "SI")) (const_string "mult")
- (eq_attr "v8type" "fmovi2f") (const_string "r_2_f")
- (eq_attr "v8type" "store1") (const_string "store1")
- (eq_attr "v8type" "store2") (const_string "store2")
- ]
- (const_string "alu")))
-
-;; Attribute that specifies whether or not the instruction touches fp
-;; registers.
-(define_attr "fp" "no,yes" (const_string "no"))
-
-;; Attribute that specifies whether or not the instruction touches simd
-;; registers.
-(define_attr "simd" "no,yes" (const_string "no"))
-
-(define_attr "length" ""
- (const_int 4))
-
-;; Attribute that controls whether an alternative is enabled or not.
-;; Currently it is only used to disable alternatives which touch fp or simd
-;; registers when -mgeneral-regs-only is specified.
-(define_attr "enabled" "no,yes"
- (cond [(ior
- (and (eq_attr "fp" "yes")
- (eq (symbol_ref "TARGET_FLOAT") (const_int 0)))
- (and (eq_attr "simd" "yes")
- (eq (symbol_ref "TARGET_SIMD") (const_int 0))))
- (const_string "no")
- ] (const_string "yes")))
-
-;; -------------------------------------------------------------------
-;; Pipeline descriptions and scheduling
-;; -------------------------------------------------------------------
-
-;; Processor types.
-(include "aarch64-tune.md")
-
-;; Scheduling
-(include "aarch64-generic.md")
-(include "large.md")
-(include "small.md")
-
-;; -------------------------------------------------------------------
-;; Jumps and other miscellaneous insns
-;; -------------------------------------------------------------------
-
-(define_insn "indirect_jump"
- [(set (pc) (match_operand:DI 0 "register_operand" "r"))]
- ""
- "br\\t%0"
- [(set_attr "v8type" "branch")]
-)
-
-(define_insn "jump"
- [(set (pc) (label_ref (match_operand 0 "" "")))]
- ""
- "b\\t%l0"
- [(set_attr "v8type" "branch")]
-)
-
-(define_expand "cbranch<mode>4"
- [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
- [(match_operand:GPI 1 "register_operand" "")
- (match_operand:GPI 2 "aarch64_plus_operand" "")])
- (label_ref (match_operand 3 "" ""))
- (pc)))]
- ""
- "
- operands[1] = aarch64_gen_compare_reg (GET_CODE (operands[0]), operands[1],
- operands[2]);
- operands[2] = const0_rtx;
- "
-)
-
-(define_expand "cbranch<mode>4"
- [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
- [(match_operand:GPF 1 "register_operand" "")
- (match_operand:GPF 2 "aarch64_reg_or_zero" "")])
- (label_ref (match_operand 3 "" ""))
- (pc)))]
- ""
- "
- operands[1] = aarch64_gen_compare_reg (GET_CODE (operands[0]), operands[1],
- operands[2]);
- operands[2] = const0_rtx;
- "
-)
-
-(define_insn "*condjump"
- [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
- [(match_operand 1 "cc_register" "") (const_int 0)])
- (label_ref (match_operand 2 "" ""))
- (pc)))]
- ""
- "b%m0\\t%l2"
- [(set_attr "v8type" "branch")]
-)
-
-(define_expand "casesi"
- [(match_operand:SI 0 "register_operand" "") ; Index
- (match_operand:SI 1 "const_int_operand" "") ; Lower bound
- (match_operand:SI 2 "const_int_operand" "") ; Total range
- (match_operand:DI 3 "" "") ; Table label
- (match_operand:DI 4 "" "")] ; Out of range label
- ""
- {
- if (operands[1] != const0_rtx)
- {
- rtx reg = gen_reg_rtx (SImode);
-
- /* Canonical RTL says that if you have:
-
- (minus (X) (CONST))
-
- then this should be emitted as:
-
- (plus (X) (-CONST))
-
- The use of trunc_int_for_mode ensures that the resulting
- constant can be represented in SImode, this is important
- for the corner case where operand[1] is INT_MIN. */
-
- operands[1] = GEN_INT (trunc_int_for_mode (-INTVAL (operands[1]), SImode));
-
- if (!(*insn_data[CODE_FOR_addsi3].operand[2].predicate)
- (operands[1], SImode))
- operands[1] = force_reg (SImode, operands[1]);
- emit_insn (gen_addsi3 (reg, operands[0], operands[1]));
- operands[0] = reg;
- }
-
- if (!aarch64_plus_operand (operands[2], SImode))
- operands[2] = force_reg (SImode, operands[2]);
- emit_jump_insn (gen_cbranchsi4 (gen_rtx_GTU (SImode, const0_rtx,
- const0_rtx),
- operands[0], operands[2], operands[4]));
-
- operands[2] = force_reg (DImode, gen_rtx_LABEL_REF (VOIDmode, operands[3]));
- emit_jump_insn (gen_casesi_dispatch (operands[2], operands[0],
- operands[3]));
- DONE;
- }
-)
-
-(define_insn "casesi_dispatch"
- [(parallel
- [(set (pc)
- (mem:DI (unspec [(match_operand:DI 0 "register_operand" "r")
- (match_operand:SI 1 "register_operand" "r")]
- UNSPEC_CASESI)))
- (clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:DI 3 "=r"))
- (clobber (match_scratch:DI 4 "=r"))
- (use (label_ref (match_operand 2 "" "")))])]
- ""
- "*
- return aarch64_output_casesi (operands);
- "
- [(set_attr "length" "16")
- (set_attr "v8type" "branch")]
-)
-
-(define_insn "nop"
- [(unspec[(const_int 0)] UNSPEC_NOP)]
- ""
- "nop"
- [(set_attr "v8type" "misc")]
-)
-
-(define_expand "prologue"
- [(clobber (const_int 0))]
- ""
- "
- aarch64_expand_prologue ();
- DONE;
- "
-)
-
-(define_expand "epilogue"
- [(clobber (const_int 0))]
- ""
- "
- aarch64_expand_epilogue (false);
- DONE;
- "
-)
-
-(define_expand "sibcall_epilogue"
- [(clobber (const_int 0))]
- ""
- "
- aarch64_expand_epilogue (true);
- DONE;
- "
-)
-
-(define_insn "*do_return"
- [(return)]
- ""
- "ret"
- [(set_attr "v8type" "branch")]
-)
-
-(define_insn "eh_return"
- [(unspec_volatile [(match_operand:DI 0 "register_operand" "r")]
- UNSPECV_EH_RETURN)]
- ""
- "#"
- [(set_attr "v8type" "branch")]
-)
-
-(define_split
- [(unspec_volatile [(match_operand:DI 0 "register_operand" "")]
- UNSPECV_EH_RETURN)]
- "reload_completed"
- [(set (match_dup 1) (match_dup 0))]
- {
- operands[1] = aarch64_final_eh_return_addr ();
- }
-)
-
-(define_insn "*cb<optab><mode>1"
- [(set (pc) (if_then_else (EQL (match_operand:GPI 0 "register_operand" "r")
- (const_int 0))
- (label_ref (match_operand 1 "" ""))
- (pc)))]
- ""
- "<cbz>\\t%<w>0, %l1"
- [(set_attr "v8type" "branch")]
-)
-
-(define_insn "*tb<optab><mode>1"
- [(set (pc) (if_then_else
- (EQL (zero_extract:DI (match_operand:GPI 0 "register_operand" "r")
- (const_int 1)
- (match_operand 1 "const_int_operand" "n"))
- (const_int 0))
- (label_ref (match_operand 2 "" ""))
- (pc)))
- (clobber (match_scratch:DI 3 "=r"))]
- ""
- "*
- if (get_attr_length (insn) == 8)
- return \"ubfx\\t%<w>3, %<w>0, %1, #1\;<cbz>\\t%<w>3, %l2\";
- return \"<tbz>\\t%<w>0, %1, %l2\";
- "
- [(set_attr "v8type" "branch")
- (set_attr "mode" "<MODE>")
- (set (attr "length")
- (if_then_else (and (ge (minus (match_dup 2) (pc)) (const_int -32768))
- (lt (minus (match_dup 2) (pc)) (const_int 32764)))
- (const_int 4)
- (const_int 8)))]
-)
-
-(define_insn "*cb<optab><mode>1"
- [(set (pc) (if_then_else (LTGE (match_operand:ALLI 0 "register_operand" "r")
- (const_int 0))
- (label_ref (match_operand 1 "" ""))
- (pc)))
- (clobber (match_scratch:DI 2 "=r"))]
- ""
- "*
- if (get_attr_length (insn) == 8)
- return \"ubfx\\t%<w>2, %<w>0, <sizem1>, #1\;<cbz>\\t%<w>2, %l1\";
- return \"<tbz>\\t%<w>0, <sizem1>, %l1\";
- "
- [(set_attr "v8type" "branch")
- (set_attr "mode" "<MODE>")
- (set (attr "length")
- (if_then_else (and (ge (minus (match_dup 1) (pc)) (const_int -32768))
- (lt (minus (match_dup 1) (pc)) (const_int 32764)))
- (const_int 4)
- (const_int 8)))]
-)
-
-;; -------------------------------------------------------------------
-;; Subroutine calls and sibcalls
-;; -------------------------------------------------------------------
-
-(define_expand "call"
- [(parallel [(call (match_operand 0 "memory_operand" "")
- (match_operand 1 "general_operand" ""))
- (use (match_operand 2 "" ""))
- (clobber (reg:DI LR_REGNUM))])]
- ""
- "
- {
- rtx callee;
-
- /* In an untyped call, we can get NULL for operand 2. */
- if (operands[2] == NULL)
- operands[2] = const0_rtx;
-
- /* Decide if we should generate indirect calls by loading the
- 64-bit address of the callee into a register before performing
- the branch-and-link. */
- callee = XEXP (operands[0], 0);
- if (GET_CODE (callee) == SYMBOL_REF
- ? aarch64_is_long_call_p (callee)
- : !REG_P (callee))
- XEXP (operands[0], 0) = force_reg (Pmode, callee);
- }"
-)
-
-(define_insn "*call_reg"
- [(call (mem:DI (match_operand:DI 0 "register_operand" "r"))
- (match_operand 1 "" ""))
- (use (match_operand 2 "" ""))
- (clobber (reg:DI LR_REGNUM))]
- ""
- "blr\\t%0"
- [(set_attr "v8type" "call")]
-)
-
-(define_insn "*call_symbol"
- [(call (mem:DI (match_operand:DI 0 "" ""))
- (match_operand 1 "" ""))
- (use (match_operand 2 "" ""))
- (clobber (reg:DI LR_REGNUM))]
- "GET_CODE (operands[0]) == SYMBOL_REF
- && !aarch64_is_long_call_p (operands[0])"
- "bl\\t%a0"
- [(set_attr "v8type" "call")]
-)
-
-(define_expand "call_value"
- [(parallel [(set (match_operand 0 "" "")
- (call (match_operand 1 "memory_operand" "")
- (match_operand 2 "general_operand" "")))
- (use (match_operand 3 "" ""))
- (clobber (reg:DI LR_REGNUM))])]
- ""
- "
- {
- rtx callee;
-
- /* In an untyped call, we can get NULL for operand 3. */
- if (operands[3] == NULL)
- operands[3] = const0_rtx;
-
- /* Decide if we should generate indirect calls by loading the
- 64-bit address of the callee into a register before performing
- the branch-and-link. */
- callee = XEXP (operands[1], 0);
- if (GET_CODE (callee) == SYMBOL_REF
- ? aarch64_is_long_call_p (callee)
- : !REG_P (callee))
- XEXP (operands[1], 0) = force_reg (Pmode, callee);
- }"
-)
-
-(define_insn "*call_value_reg"
- [(set (match_operand 0 "" "")
- (call (mem:DI (match_operand:DI 1 "register_operand" "r"))
- (match_operand 2 "" "")))
- (use (match_operand 3 "" ""))
- (clobber (reg:DI LR_REGNUM))]
- ""
- "blr\\t%1"
- [(set_attr "v8type" "call")]
-)
-
-(define_insn "*call_value_symbol"
- [(set (match_operand 0 "" "")
- (call (mem:DI (match_operand:DI 1 "" ""))
- (match_operand 2 "" "")))
- (use (match_operand 3 "" ""))
- (clobber (reg:DI LR_REGNUM))]
- "GET_CODE (operands[1]) == SYMBOL_REF
- && !aarch64_is_long_call_p (operands[1])"
- "bl\\t%a1"
- [(set_attr "v8type" "call")]
-)
-
-(define_expand "sibcall"
- [(parallel [(call (match_operand 0 "memory_operand" "")
- (match_operand 1 "general_operand" ""))
- (return)
- (use (match_operand 2 "" ""))])]
- ""
- {
- if (operands[2] == NULL_RTX)
- operands[2] = const0_rtx;
- }
-)
-
-(define_expand "sibcall_value"
- [(parallel [(set (match_operand 0 "" "")
- (call (match_operand 1 "memory_operand" "")
- (match_operand 2 "general_operand" "")))
- (return)
- (use (match_operand 3 "" ""))])]
- ""
- {
- if (operands[3] == NULL_RTX)
- operands[3] = const0_rtx;
- }
-)
-
-(define_insn "*sibcall_insn"
- [(call (mem:DI (match_operand:DI 0 "" "X"))
- (match_operand 1 "" ""))
- (return)
- (use (match_operand 2 "" ""))]
- "GET_CODE (operands[0]) == SYMBOL_REF"
- "b\\t%a0"
- [(set_attr "v8type" "branch")]
-)
-
-(define_insn "*sibcall_value_insn"
- [(set (match_operand 0 "" "")
- (call (mem:DI (match_operand 1 "" "X"))
- (match_operand 2 "" "")))
- (return)
- (use (match_operand 3 "" ""))]
- "GET_CODE (operands[1]) == SYMBOL_REF"
- "b\\t%a1"
- [(set_attr "v8type" "branch")]
-)
-
-;; Call subroutine returning any type.
-
-(define_expand "untyped_call"
- [(parallel [(call (match_operand 0 "")
- (const_int 0))
- (match_operand 1 "")
- (match_operand 2 "")])]
- ""
-{
- int i;
-
- emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
-
- for (i = 0; i < XVECLEN (operands[2], 0); i++)
- {
- rtx set = XVECEXP (operands[2], 0, i);
- emit_move_insn (SET_DEST (set), SET_SRC (set));
- }
-
- /* The optimizer does not know that the call sets the function value
- registers we stored in the result block. We avoid problems by
- claiming that all hard registers are used and clobbered at this
- point. */
- emit_insn (gen_blockage ());
- DONE;
-})
-
-;; -------------------------------------------------------------------
-;; Moves
-;; -------------------------------------------------------------------
-
-(define_expand "mov<mode>"
- [(set (match_operand:SHORT 0 "nonimmediate_operand" "")
- (match_operand:SHORT 1 "general_operand" ""))]
- ""
- "
- if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
- operands[1] = force_reg (<MODE>mode, operands[1]);
- "
-)
-
-(define_insn "*mov<mode>_aarch64"
- [(set (match_operand:SHORT 0 "nonimmediate_operand" "=r,r,r,m, r,*w")
- (match_operand:SHORT 1 "general_operand" " r,M,m,rZ,*w,r"))]
- "(register_operand (operands[0], <MODE>mode)
- || aarch64_reg_or_zero (operands[1], <MODE>mode))"
- "@
- mov\\t%w0, %w1
- mov\\t%w0, %1
- ldr<size>\\t%w0, %1
- str<size>\\t%w1, %0
- umov\\t%w0, %1.<v>[0]
- dup\\t%0.<Vallxd>, %w1"
- [(set_attr "v8type" "move,alu,load1,store1,*,*")
- (set_attr "simd_type" "*,*,*,*,simd_movgp,simd_dupgp")
- (set_attr "mode" "<MODE>")
- (set_attr "simd_mode" "<MODE>")]
-)
-
-(define_expand "mov<mode>"
- [(set (match_operand:GPI 0 "nonimmediate_operand" "")
- (match_operand:GPI 1 "general_operand" ""))]
- ""
- "
- if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
- operands[1] = force_reg (<MODE>mode, operands[1]);
-
- if (CONSTANT_P (operands[1]))
- {
- aarch64_expand_mov_immediate (operands[0], operands[1]);
- DONE;
- }
- "
-)
-
-(define_insn "*movsi_aarch64"
- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,m, *w, r,*w")
- (match_operand:SI 1 "aarch64_mov_operand" " r,M,m,rZ,rZ,*w,*w"))]
- "(register_operand (operands[0], SImode)
- || aarch64_reg_or_zero (operands[1], SImode))"
- "@
- mov\\t%w0, %w1
- mov\\t%w0, %1
- ldr\\t%w0, %1
- str\\t%w1, %0
- fmov\\t%s0, %w1
- fmov\\t%w0, %s1
- fmov\\t%s0, %s1"
- [(set_attr "v8type" "move,alu,load1,store1,fmov,fmov,fmov")
- (set_attr "mode" "SI")
- (set_attr "fp" "*,*,*,*,yes,yes,yes")]
-)
-
-(define_insn "*movdi_aarch64"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=r,k,r,r,r,m, r, r, *w, r,*w,w")
- (match_operand:DI 1 "aarch64_mov_operand" " r,r,k,N,m,rZ,Usa,Ush,rZ,*w,*w,Dd"))]
- "(register_operand (operands[0], DImode)
- || aarch64_reg_or_zero (operands[1], DImode))"
- "@
- mov\\t%x0, %x1
- mov\\t%0, %x1
- mov\\t%x0, %1
- mov\\t%x0, %1
- ldr\\t%x0, %1
- str\\t%x1, %0
- adr\\t%x0, %a1
- adrp\\t%x0, %A1
- fmov\\t%d0, %x1
- fmov\\t%x0, %d1
- fmov\\t%d0, %d1
- movi\\t%d0, %1"
- [(set_attr "v8type" "move,move,move,alu,load1,store1,adr,adr,fmov,fmov,fmov,fmov")
- (set_attr "mode" "DI")
- (set_attr "fp" "*,*,*,*,*,*,*,*,yes,yes,yes,yes")]
-)
-
-(define_insn "insv_imm<mode>"
- [(set (zero_extract:GPI (match_operand:GPI 0 "register_operand" "+r")
- (const_int 16)
- (match_operand:GPI 1 "const_int_operand" "n"))
- (match_operand:GPI 2 "const_int_operand" "n"))]
- "INTVAL (operands[1]) < GET_MODE_BITSIZE (<MODE>mode)
- && INTVAL (operands[1]) % 16 == 0
- && UINTVAL (operands[2]) <= 0xffff"
- "movk\\t%<w>0, %X2, lsl %1"
- [(set_attr "v8type" "movk")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_expand "movti"
- [(set (match_operand:TI 0 "nonimmediate_operand" "")
- (match_operand:TI 1 "general_operand" ""))]
- ""
- "
- if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx)
- operands[1] = force_reg (TImode, operands[1]);
- "
-)
-
-(define_insn "*movti_aarch64"
- [(set (match_operand:TI 0
- "nonimmediate_operand" "=r, *w,r ,*w,r ,Ump,Ump,*w,m")
- (match_operand:TI 1
- "aarch64_movti_operand" " rn,r ,*w,*w,Ump,r ,Z , m,*w"))]
- "(register_operand (operands[0], TImode)
- || aarch64_reg_or_zero (operands[1], TImode))"
- "@
- #
- #
- #
- orr\\t%0.16b, %1.16b, %1.16b
- ldp\\t%0, %H0, %1
- stp\\t%1, %H1, %0
- stp\\txzr, xzr, %0
- ldr\\t%q0, %1
- str\\t%q1, %0"
- [(set_attr "v8type" "move2,fmovi2f,fmovf2i,*, \
- load2,store2,store2,fpsimd_load,fpsimd_store")
- (set_attr "simd_type" "*,*,*,simd_move,*,*,*,*,*")
- (set_attr "mode" "DI,DI,DI,TI,DI,DI,DI,TI,TI")
- (set_attr "length" "8,8,8,4,4,4,4,4,4")
- (set_attr "fp" "*,*,*,*,*,*,*,yes,yes")
- (set_attr "simd" "*,*,*,yes,*,*,*,*,*")])
-
-;; Split a TImode register-register or register-immediate move into
-;; its component DImode pieces, taking care to handle overlapping
-;; source and dest registers.
-(define_split
- [(set (match_operand:TI 0 "register_operand" "")
- (match_operand:TI 1 "aarch64_reg_or_imm" ""))]
- "reload_completed && aarch64_split_128bit_move_p (operands[0], operands[1])"
- [(const_int 0)]
-{
- aarch64_split_128bit_move (operands[0], operands[1]);
- DONE;
-})
-
-(define_expand "mov<mode>"
- [(set (match_operand:GPF 0 "nonimmediate_operand" "")
- (match_operand:GPF 1 "general_operand" ""))]
- ""
- "
- if (!TARGET_FLOAT)
- {
- sorry (\"%qs and floating point code\", \"-mgeneral-regs-only\");
- FAIL;
- }
-
- if (GET_CODE (operands[0]) == MEM)
- operands[1] = force_reg (<MODE>mode, operands[1]);
- "
-)
-
-(define_insn "*movsf_aarch64"
- [(set (match_operand:SF 0 "nonimmediate_operand" "=w, ?r,w,w ,w,m,r,m ,r")
- (match_operand:SF 1 "general_operand" "?rY, w,w,Ufc,m,w,m,rY,r"))]
- "TARGET_FLOAT && (register_operand (operands[0], SFmode)
- || register_operand (operands[1], SFmode))"
- "@
- fmov\\t%s0, %w1
- fmov\\t%w0, %s1
- fmov\\t%s0, %s1
- fmov\\t%s0, %1
- ldr\\t%s0, %1
- str\\t%s1, %0
- ldr\\t%w0, %1
- str\\t%w1, %0
- mov\\t%w0, %w1"
- [(set_attr "v8type" "fmovi2f,fmovf2i,\
- fmov,fconst,fpsimd_load,\
- fpsimd_store,fpsimd_load,fpsimd_store,fmov")
- (set_attr "mode" "SF")]
-)
-
-(define_insn "*movdf_aarch64"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=w, ?r,w,w ,w,m,r,m ,r")
- (match_operand:DF 1 "general_operand" "?rY, w,w,Ufc,m,w,m,rY,r"))]
- "TARGET_FLOAT && (register_operand (operands[0], DFmode)
- || register_operand (operands[1], DFmode))"
- "@
- fmov\\t%d0, %x1
- fmov\\t%x0, %d1
- fmov\\t%d0, %d1
- fmov\\t%d0, %1
- ldr\\t%d0, %1
- str\\t%d1, %0
- ldr\\t%x0, %1
- str\\t%x1, %0
- mov\\t%x0, %x1"
- [(set_attr "v8type" "fmovi2f,fmovf2i,\
- fmov,fconst,fpsimd_load,\
- fpsimd_store,fpsimd_load,fpsimd_store,move")
- (set_attr "mode" "DF")]
-)
-
-(define_expand "movtf"
- [(set (match_operand:TF 0 "nonimmediate_operand" "")
- (match_operand:TF 1 "general_operand" ""))]
- ""
- "
- if (!TARGET_FLOAT)
- {
- sorry (\"%qs and floating point code\", \"-mgeneral-regs-only\");
- FAIL;
- }
-
- if (GET_CODE (operands[0]) == MEM)
- operands[1] = force_reg (TFmode, operands[1]);
- "
-)
-
-(define_insn "*movtf_aarch64"
- [(set (match_operand:TF 0
- "nonimmediate_operand" "=w,?&r,w ,?r,w,?w,w,m,?r ,Ump")
- (match_operand:TF 1
- "general_operand" " w,?r, ?r,w ,Y,Y ,m,w,Ump,?rY"))]
- "TARGET_FLOAT && (register_operand (operands[0], TFmode)
- || register_operand (operands[1], TFmode))"
- "@
- orr\\t%0.16b, %1.16b, %1.16b
- mov\\t%0, %1\;mov\\t%H0, %H1
- fmov\\t%d0, %Q1\;fmov\\t%0.d[1], %R1
- fmov\\t%Q0, %d1\;fmov\\t%R0, %1.d[1]
- movi\\t%0.2d, #0
- fmov\\t%s0, wzr
- ldr\\t%q0, %1
- str\\t%q1, %0
- ldp\\t%0, %H0, %1
- stp\\t%1, %H1, %0"
- [(set_attr "v8type" "logic,move2,fmovi2f,fmovf2i,fconst,fconst,fpsimd_load,fpsimd_store,fpsimd_load2,fpsimd_store2")
- (set_attr "mode" "DF,DF,DF,DF,DF,DF,TF,TF,DF,DF")
- (set_attr "length" "4,8,8,8,4,4,4,4,4,4")
- (set_attr "fp" "*,*,yes,yes,*,yes,yes,yes,*,*")
- (set_attr "simd" "yes,*,*,*,yes,*,*,*,*,*")]
-)
-
-;; Operands 1 and 3 are tied together by the final condition; so we allow
-;; fairly lax checking on the second memory operation.
-(define_insn "load_pair<mode>"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (match_operand:GPI 1 "aarch64_mem_pair_operand" "Ump"))
- (set (match_operand:GPI 2 "register_operand" "=r")
- (match_operand:GPI 3 "memory_operand" "m"))]
- "rtx_equal_p (XEXP (operands[3], 0),
- plus_constant (Pmode,
- XEXP (operands[1], 0),
- GET_MODE_SIZE (<MODE>mode)))"
- "ldp\\t%<w>0, %<w>2, %1"
- [(set_attr "v8type" "load2")
- (set_attr "mode" "<MODE>")]
-)
-
-;; Operands 0 and 2 are tied together by the final condition; so we allow
-;; fairly lax checking on the second memory operation.
-(define_insn "store_pair<mode>"
- [(set (match_operand:GPI 0 "aarch64_mem_pair_operand" "=Ump")
- (match_operand:GPI 1 "register_operand" "r"))
- (set (match_operand:GPI 2 "memory_operand" "=m")
- (match_operand:GPI 3 "register_operand" "r"))]
- "rtx_equal_p (XEXP (operands[2], 0),
- plus_constant (Pmode,
- XEXP (operands[0], 0),
- GET_MODE_SIZE (<MODE>mode)))"
- "stp\\t%<w>1, %<w>3, %0"
- [(set_attr "v8type" "store2")
- (set_attr "mode" "<MODE>")]
-)
-
-;; Operands 1 and 3 are tied together by the final condition; so we allow
-;; fairly lax checking on the second memory operation.
-(define_insn "load_pair<mode>"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (match_operand:GPF 1 "aarch64_mem_pair_operand" "Ump"))
- (set (match_operand:GPF 2 "register_operand" "=w")
- (match_operand:GPF 3 "memory_operand" "m"))]
- "rtx_equal_p (XEXP (operands[3], 0),
- plus_constant (Pmode,
- XEXP (operands[1], 0),
- GET_MODE_SIZE (<MODE>mode)))"
- "ldp\\t%<w>0, %<w>2, %1"
- [(set_attr "v8type" "fpsimd_load2")
- (set_attr "mode" "<MODE>")]
-)
-
-;; Operands 0 and 2 are tied together by the final condition; so we allow
-;; fairly lax checking on the second memory operation.
-(define_insn "store_pair<mode>"
- [(set (match_operand:GPF 0 "aarch64_mem_pair_operand" "=Ump")
- (match_operand:GPF 1 "register_operand" "w"))
- (set (match_operand:GPF 2 "memory_operand" "=m")
- (match_operand:GPF 3 "register_operand" "w"))]
- "rtx_equal_p (XEXP (operands[2], 0),
- plus_constant (Pmode,
- XEXP (operands[0], 0),
- GET_MODE_SIZE (<MODE>mode)))"
- "stp\\t%<w>1, %<w>3, %0"
- [(set_attr "v8type" "fpsimd_load2")
- (set_attr "mode" "<MODE>")]
-)
-
-;; Load pair with writeback. This is primarily used in function epilogues
-;; when restoring [fp,lr]
-(define_insn "loadwb_pair<GPI:mode>_<PTR:mode>"
- [(parallel
- [(set (match_operand:PTR 0 "register_operand" "=k")
- (plus:PTR (match_operand:PTR 1 "register_operand" "0")
- (match_operand:PTR 4 "const_int_operand" "n")))
- (set (match_operand:GPI 2 "register_operand" "=r")
- (mem:GPI (plus:PTR (match_dup 1)
- (match_dup 4))))
- (set (match_operand:GPI 3 "register_operand" "=r")
- (mem:GPI (plus:PTR (match_dup 1)
- (match_operand:PTR 5 "const_int_operand" "n"))))])]
- "INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPI:MODE>mode)"
- "ldp\\t%<w>2, %<w>3, [%1], %4"
- [(set_attr "v8type" "load2")
- (set_attr "mode" "<GPI:MODE>")]
-)
-
-;; Store pair with writeback. This is primarily used in function prologues
-;; when saving [fp,lr]
-(define_insn "storewb_pair<GPI:mode>_<PTR:mode>"
- [(parallel
- [(set (match_operand:PTR 0 "register_operand" "=&k")
- (plus:PTR (match_operand:PTR 1 "register_operand" "0")
- (match_operand:PTR 4 "const_int_operand" "n")))
- (set (mem:GPI (plus:PTR (match_dup 0)
- (match_dup 4)))
- (match_operand:GPI 2 "register_operand" "r"))
- (set (mem:GPI (plus:PTR (match_dup 0)
- (match_operand:PTR 5 "const_int_operand" "n")))
- (match_operand:GPI 3 "register_operand" "r"))])]
- "INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPI:MODE>mode)"
- "stp\\t%<w>2, %<w>3, [%0, %4]!"
- [(set_attr "v8type" "store2")
- (set_attr "mode" "<GPI:MODE>")]
-)
-
-;; -------------------------------------------------------------------
-;; Sign/Zero extension
-;; -------------------------------------------------------------------
-
-(define_expand "<optab>sidi2"
- [(set (match_operand:DI 0 "register_operand")
- (ANY_EXTEND:DI (match_operand:SI 1 "nonimmediate_operand")))]
- ""
-)
-
-(define_insn "*extendsidi2_aarch64"
- [(set (match_operand:DI 0 "register_operand" "=r,r")
- (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
- ""
- "@
- sxtw\t%0, %w1
- ldrsw\t%0, %1"
- [(set_attr "v8type" "extend,load1")
- (set_attr "mode" "DI")]
-)
-
-(define_insn "*zero_extendsidi2_aarch64"
- [(set (match_operand:DI 0 "register_operand" "=r,r")
- (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
- ""
- "@
- uxtw\t%0, %w1
- ldr\t%w0, %1"
- [(set_attr "v8type" "extend,load1")
- (set_attr "mode" "DI")]
-)
-
-(define_expand "<ANY_EXTEND:optab><SHORT:mode><GPI:mode>2"
- [(set (match_operand:GPI 0 "register_operand")
- (ANY_EXTEND:GPI (match_operand:SHORT 1 "nonimmediate_operand")))]
- ""
-)
-
-(define_insn "*extend<SHORT:mode><GPI:mode>2_aarch64"
- [(set (match_operand:GPI 0 "register_operand" "=r,r")
- (sign_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
- ""
- "@
- sxt<SHORT:size>\t%<GPI:w>0, %w1
- ldrs<SHORT:size>\t%<GPI:w>0, %1"
- [(set_attr "v8type" "extend,load1")
- (set_attr "mode" "<GPI:MODE>")]
-)
-
-(define_insn "*zero_extend<SHORT:mode><GPI:mode>2_aarch64"
- [(set (match_operand:GPI 0 "register_operand" "=r,r")
- (zero_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
- ""
- "@
- uxt<SHORT:size>\t%<GPI:w>0, %w1
- ldr<SHORT:size>\t%w0, %1"
- [(set_attr "v8type" "extend,load1")
- (set_attr "mode" "<GPI:MODE>")]
-)
-
-(define_expand "<optab>qihi2"
- [(set (match_operand:HI 0 "register_operand")
- (ANY_EXTEND:HI (match_operand:QI 1 "nonimmediate_operand")))]
- ""
-)
-
-(define_insn "*<optab>qihi2_aarch64"
- [(set (match_operand:HI 0 "register_operand" "=r,r")
- (ANY_EXTEND:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
- ""
- "@
- <su>xtb\t%w0, %w1
- <ldrxt>b\t%w0, %1"
- [(set_attr "v8type" "extend,load1")
- (set_attr "mode" "HI")]
-)
-
-;; -------------------------------------------------------------------
-;; Simple arithmetic
-;; -------------------------------------------------------------------
-
-(define_expand "add<mode>3"
- [(set
- (match_operand:GPI 0 "register_operand" "")
- (plus:GPI (match_operand:GPI 1 "register_operand" "")
- (match_operand:GPI 2 "aarch64_pluslong_operand" "")))]
- ""
- "
- if (! aarch64_plus_operand (operands[2], VOIDmode))
- {
- rtx subtarget = ((optimize && can_create_pseudo_p ())
- ? gen_reg_rtx (<MODE>mode) : operands[0]);
- HOST_WIDE_INT imm = INTVAL (operands[2]);
-
- if (imm < 0)
- imm = -(-imm & ~0xfff);
- else
- imm &= ~0xfff;
-
- emit_insn (gen_add<mode>3 (subtarget, operands[1], GEN_INT (imm)));
- operands[1] = subtarget;
- operands[2] = GEN_INT (INTVAL (operands[2]) - imm);
- }
- "
-)
-
-(define_insn "*addsi3_aarch64"
- [(set
- (match_operand:SI 0 "register_operand" "=rk,rk,rk")
- (plus:SI
- (match_operand:SI 1 "register_operand" "%rk,rk,rk")
- (match_operand:SI 2 "aarch64_plus_operand" "I,r,J")))]
- ""
- "@
- add\\t%w0, %w1, %2
- add\\t%w0, %w1, %w2
- sub\\t%w0, %w1, #%n2"
- [(set_attr "v8type" "alu")
- (set_attr "mode" "SI")]
-)
-
-;; zero_extend version of above
-(define_insn "*addsi3_aarch64_uxtw"
- [(set
- (match_operand:DI 0 "register_operand" "=rk,rk,rk")
- (zero_extend:DI
- (plus:SI (match_operand:SI 1 "register_operand" "%rk,rk,rk")
- (match_operand:SI 2 "aarch64_plus_operand" "I,r,J"))))]
- ""
- "@
- add\\t%w0, %w1, %2
- add\\t%w0, %w1, %w2
- sub\\t%w0, %w1, #%n2"
- [(set_attr "v8type" "alu")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*adddi3_aarch64"
- [(set
- (match_operand:DI 0 "register_operand" "=rk,rk,rk,!w")
- (plus:DI
- (match_operand:DI 1 "register_operand" "%rk,rk,rk,!w")
- (match_operand:DI 2 "aarch64_plus_operand" "I,r,J,!w")))]
- ""
- "@
- add\\t%x0, %x1, %2
- add\\t%x0, %x1, %x2
- sub\\t%x0, %x1, #%n2
- add\\t%d0, %d1, %d2"
- [(set_attr "v8type" "alu")
- (set_attr "mode" "DI")
- (set_attr "simd" "*,*,*,yes")]
-)
-
-(define_insn "*add<mode>3_compare0"
- [(set (reg:CC_NZ CC_REGNUM)
- (compare:CC_NZ
- (plus:GPI (match_operand:GPI 1 "register_operand" "%r,r")
- (match_operand:GPI 2 "aarch64_plus_operand" "rI,J"))
- (const_int 0)))
- (set (match_operand:GPI 0 "register_operand" "=r,r")
- (plus:GPI (match_dup 1) (match_dup 2)))]
- ""
- "@
- adds\\t%<w>0, %<w>1, %<w>2
- subs\\t%<w>0, %<w>1, #%n2"
- [(set_attr "v8type" "alus")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*addsi3_compare0_uxtw"
- [(set (reg:CC_NZ CC_REGNUM)
- (compare:CC_NZ
- (plus:SI (match_operand:SI 1 "register_operand" "%r,r")
- (match_operand:SI 2 "aarch64_plus_operand" "rI,J"))
- (const_int 0)))
- (set (match_operand:DI 0 "register_operand" "=r,r")
- (zero_extend:DI (plus:SI (match_dup 1) (match_dup 2))))]
- ""
- "@
- adds\\t%w0, %w1, %w2
- subs\\t%w0, %w1, #%n2"
- [(set_attr "v8type" "alus")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*add<mode>3nr_compare0"
- [(set (reg:CC_NZ CC_REGNUM)
- (compare:CC_NZ
- (plus:GPI (match_operand:GPI 0 "register_operand" "%r,r")
- (match_operand:GPI 1 "aarch64_plus_operand" "rI,J"))
- (const_int 0)))]
- ""
- "@
- cmn\\t%<w>0, %<w>1
- cmp\\t%<w>0, #%n1"
- [(set_attr "v8type" "alus")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "*compare_neg<mode>"
- [(set (reg:CC CC_REGNUM)
- (compare:CC
- (match_operand:GPI 0 "register_operand" "r")
- (neg:GPI (match_operand:GPI 1 "register_operand" "r"))))]
- ""
- "cmn\\t%<w>0, %<w>1"
- [(set_attr "v8type" "alus")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "*add_<shift>_<mode>"
- [(set (match_operand:GPI 0 "register_operand" "=rk")
- (plus:GPI (ASHIFT:GPI (match_operand:GPI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))
- (match_operand:GPI 3 "register_operand" "r")))]
- ""
- "add\\t%<w>0, %<w>3, %<w>1, <shift> %2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*add_<shift>_si_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=rk")
- (zero_extend:DI
- (plus:SI (ASHIFT:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_shift_imm_si" "n"))
- (match_operand:SI 3 "register_operand" "r"))))]
- ""
- "add\\t%w0, %w3, %w1, <shift> %2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*add_mul_imm_<mode>"
- [(set (match_operand:GPI 0 "register_operand" "=rk")
- (plus:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))
- (match_operand:GPI 3 "register_operand" "r")))]
- ""
- "add\\t%<w>0, %<w>3, %<w>1, lsl %p2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "*add_<optab><ALLX:mode>_<GPI:mode>"
- [(set (match_operand:GPI 0 "register_operand" "=rk")
- (plus:GPI (ANY_EXTEND:GPI (match_operand:ALLX 1 "register_operand" "r"))
- (match_operand:GPI 2 "register_operand" "r")))]
- ""
- "add\\t%<GPI:w>0, %<GPI:w>2, %<GPI:w>1, <su>xt<ALLX:size>"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "<GPI:MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*add_<optab><SHORT:mode>_si_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=rk")
- (zero_extend:DI
- (plus:SI (ANY_EXTEND:SI (match_operand:SHORT 1 "register_operand" "r"))
- (match_operand:GPI 2 "register_operand" "r"))))]
- ""
- "add\\t%w0, %w2, %w1, <su>xt<SHORT:size>"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*add_<optab><ALLX:mode>_shft_<GPI:mode>"
- [(set (match_operand:GPI 0 "register_operand" "=rk")
- (plus:GPI (ashift:GPI (ANY_EXTEND:GPI
- (match_operand:ALLX 1 "register_operand" "r"))
- (match_operand 2 "aarch64_imm3" "Ui3"))
- (match_operand:GPI 3 "register_operand" "r")))]
- ""
- "add\\t%<GPI:w>0, %<GPI:w>3, %<GPI:w>1, <su>xt<ALLX:size> %2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "<GPI:MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*add_<optab><SHORT:mode>_shft_si_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=rk")
- (zero_extend:DI
- (plus:SI (ashift:SI (ANY_EXTEND:SI
- (match_operand:SHORT 1 "register_operand" "r"))
- (match_operand 2 "aarch64_imm3" "Ui3"))
- (match_operand:SI 3 "register_operand" "r"))))]
- ""
- "add\\t%w0, %w3, %w1, <su>xt<SHORT:size> %2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*add_<optab><ALLX:mode>_mult_<GPI:mode>"
- [(set (match_operand:GPI 0 "register_operand" "=rk")
- (plus:GPI (mult:GPI (ANY_EXTEND:GPI
- (match_operand:ALLX 1 "register_operand" "r"))
- (match_operand 2 "aarch64_pwr_imm3" "Up3"))
- (match_operand:GPI 3 "register_operand" "r")))]
- ""
- "add\\t%<GPI:w>0, %<GPI:w>3, %<GPI:w>1, <su>xt<ALLX:size> %p2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "<GPI:MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*add_<optab><SHORT:mode>_mult_si_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=rk")
- (zero_extend:DI (plus:SI (mult:SI (ANY_EXTEND:SI
- (match_operand:SHORT 1 "register_operand" "r"))
- (match_operand 2 "aarch64_pwr_imm3" "Up3"))
- (match_operand:SI 3 "register_operand" "r"))))]
- ""
- "add\\t%w0, %w3, %w1, <su>xt<SHORT:size> %p2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*add_<optab><mode>_multp2"
- [(set (match_operand:GPI 0 "register_operand" "=rk")
- (plus:GPI (ANY_EXTRACT:GPI
- (mult:GPI (match_operand:GPI 1 "register_operand" "r")
- (match_operand 2 "aarch64_pwr_imm3" "Up3"))
- (match_operand 3 "const_int_operand" "n")
- (const_int 0))
- (match_operand:GPI 4 "register_operand" "r")))]
- "aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])"
- "add\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*add_<optab>si_multp2_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=rk")
- (zero_extend:DI
- (plus:SI (ANY_EXTRACT:SI
- (mult:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand 2 "aarch64_pwr_imm3" "Up3"))
- (match_operand 3 "const_int_operand" "n")
- (const_int 0))
- (match_operand:SI 4 "register_operand" "r"))))]
- "aarch64_is_extend_from_extract (SImode, operands[2], operands[3])"
- "add\\t%w0, %w4, %w1, <su>xt%e3 %p2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*add<mode>3_carryin"
- [(set
- (match_operand:GPI 0 "register_operand" "=r")
- (plus:GPI (geu:GPI (reg:CC CC_REGNUM) (const_int 0))
- (plus:GPI
- (match_operand:GPI 1 "register_operand" "r")
- (match_operand:GPI 2 "register_operand" "r"))))]
- ""
- "adc\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "adc")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*addsi3_carryin_uxtw"
- [(set
- (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (plus:SI (geu:SI (reg:CC CC_REGNUM) (const_int 0))
- (plus:SI
- (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "register_operand" "r")))))]
- ""
- "adc\\t%w0, %w1, %w2"
- [(set_attr "v8type" "adc")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*add<mode>3_carryin_alt1"
- [(set
- (match_operand:GPI 0 "register_operand" "=r")
- (plus:GPI (plus:GPI
- (match_operand:GPI 1 "register_operand" "r")
- (match_operand:GPI 2 "register_operand" "r"))
- (geu:GPI (reg:CC CC_REGNUM) (const_int 0))))]
- ""
- "adc\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "adc")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*addsi3_carryin_alt1_uxtw"
- [(set
- (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (plus:SI (plus:SI
- (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "register_operand" "r"))
- (geu:SI (reg:CC CC_REGNUM) (const_int 0)))))]
- ""
- "adc\\t%w0, %w1, %w2"
- [(set_attr "v8type" "adc")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*add<mode>3_carryin_alt2"
- [(set
- (match_operand:GPI 0 "register_operand" "=r")
- (plus:GPI (plus:GPI
- (geu:GPI (reg:CC CC_REGNUM) (const_int 0))
- (match_operand:GPI 1 "register_operand" "r"))
- (match_operand:GPI 2 "register_operand" "r")))]
- ""
- "adc\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "adc")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*addsi3_carryin_alt2_uxtw"
- [(set
- (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (plus:SI (plus:SI
- (geu:SI (reg:CC CC_REGNUM) (const_int 0))
- (match_operand:SI 1 "register_operand" "r"))
- (match_operand:SI 2 "register_operand" "r"))))]
- ""
- "adc\\t%w0, %w1, %w2"
- [(set_attr "v8type" "adc")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*add<mode>3_carryin_alt3"
- [(set
- (match_operand:GPI 0 "register_operand" "=r")
- (plus:GPI (plus:GPI
- (geu:GPI (reg:CC CC_REGNUM) (const_int 0))
- (match_operand:GPI 2 "register_operand" "r"))
- (match_operand:GPI 1 "register_operand" "r")))]
- ""
- "adc\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "adc")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*addsi3_carryin_alt3_uxtw"
- [(set
- (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (plus:SI (plus:SI
- (geu:SI (reg:CC CC_REGNUM) (const_int 0))
- (match_operand:SI 2 "register_operand" "r"))
- (match_operand:SI 1 "register_operand" "r"))))]
- ""
- "adc\\t%w0, %w1, %w2"
- [(set_attr "v8type" "adc")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*add_uxt<mode>_multp2"
- [(set (match_operand:GPI 0 "register_operand" "=rk")
- (plus:GPI (and:GPI
- (mult:GPI (match_operand:GPI 1 "register_operand" "r")
- (match_operand 2 "aarch64_pwr_imm3" "Up3"))
- (match_operand 3 "const_int_operand" "n"))
- (match_operand:GPI 4 "register_operand" "r")))]
- "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), INTVAL (operands[3])) != 0"
- "*
- operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
- INTVAL (operands[3])));
- return \"add\t%<w>0, %<w>4, %<w>1, uxt%e3 %p2\";"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*add_uxtsi_multp2_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=rk")
- (zero_extend:DI
- (plus:SI (and:SI
- (mult:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand 2 "aarch64_pwr_imm3" "Up3"))
- (match_operand 3 "const_int_operand" "n"))
- (match_operand:SI 4 "register_operand" "r"))))]
- "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), INTVAL (operands[3])) != 0"
- "*
- operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
- INTVAL (operands[3])));
- return \"add\t%w0, %w4, %w1, uxt%e3 %p2\";"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "subsi3"
- [(set (match_operand:SI 0 "register_operand" "=rk")
- (minus:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "register_operand" "r")))]
- ""
- "sub\\t%w0, %w1, %w2"
- [(set_attr "v8type" "alu")
- (set_attr "mode" "SI")]
-)
-
-;; zero_extend version of above
-(define_insn "*subsi3_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=rk")
- (zero_extend:DI
- (minus:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "register_operand" "r"))))]
- ""
- "sub\\t%w0, %w1, %w2"
- [(set_attr "v8type" "alu")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "subdi3"
- [(set (match_operand:DI 0 "register_operand" "=rk,!w")
- (minus:DI (match_operand:DI 1 "register_operand" "r,!w")
- (match_operand:DI 2 "register_operand" "r,!w")))]
- ""
- "@
- sub\\t%x0, %x1, %x2
- sub\\t%d0, %d1, %d2"
- [(set_attr "v8type" "alu")
- (set_attr "mode" "DI")
- (set_attr "simd" "*,yes")]
-)
-
-
-(define_insn "*sub<mode>3_compare0"
- [(set (reg:CC_NZ CC_REGNUM)
- (compare:CC_NZ (minus:GPI (match_operand:GPI 1 "register_operand" "r")
- (match_operand:GPI 2 "register_operand" "r"))
- (const_int 0)))
- (set (match_operand:GPI 0 "register_operand" "=r")
- (minus:GPI (match_dup 1) (match_dup 2)))]
- ""
- "subs\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "alus")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*subsi3_compare0_uxtw"
- [(set (reg:CC_NZ CC_REGNUM)
- (compare:CC_NZ (minus:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "register_operand" "r"))
- (const_int 0)))
- (set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI (minus:SI (match_dup 1) (match_dup 2))))]
- ""
- "subs\\t%w0, %w1, %w2"
- [(set_attr "v8type" "alus")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*sub_<shift>_<mode>"
- [(set (match_operand:GPI 0 "register_operand" "=rk")
- (minus:GPI (match_operand:GPI 3 "register_operand" "r")
- (ASHIFT:GPI
- (match_operand:GPI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
- ""
- "sub\\t%<w>0, %<w>3, %<w>1, <shift> %2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*sub_<shift>_si_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=rk")
- (zero_extend:DI
- (minus:SI (match_operand:SI 3 "register_operand" "r")
- (ASHIFT:SI
- (match_operand:SI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_shift_imm_si" "n")))))]
- ""
- "sub\\t%w0, %w3, %w1, <shift> %2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*sub_mul_imm_<mode>"
- [(set (match_operand:GPI 0 "register_operand" "=rk")
- (minus:GPI (match_operand:GPI 3 "register_operand" "r")
- (mult:GPI
- (match_operand:GPI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))))]
- ""
- "sub\\t%<w>0, %<w>3, %<w>1, lsl %p2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*sub_mul_imm_si_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=rk")
- (zero_extend:DI
- (minus:SI (match_operand:SI 3 "register_operand" "r")
- (mult:SI
- (match_operand:SI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_pwr_2_si" "n")))))]
- ""
- "sub\\t%w0, %w3, %w1, lsl %p2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*sub_<optab><ALLX:mode>_<GPI:mode>"
- [(set (match_operand:GPI 0 "register_operand" "=rk")
- (minus:GPI (match_operand:GPI 1 "register_operand" "r")
- (ANY_EXTEND:GPI
- (match_operand:ALLX 2 "register_operand" "r"))))]
- ""
- "sub\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size>"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "<GPI:MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*sub_<optab><SHORT:mode>_si_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=rk")
- (zero_extend:DI
- (minus:SI (match_operand:SI 1 "register_operand" "r")
- (ANY_EXTEND:SI
- (match_operand:SHORT 2 "register_operand" "r")))))]
- ""
- "sub\\t%w0, %w1, %w2, <su>xt<SHORT:size>"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*sub_<optab><ALLX:mode>_shft_<GPI:mode>"
- [(set (match_operand:GPI 0 "register_operand" "=rk")
- (minus:GPI (match_operand:GPI 1 "register_operand" "r")
- (ashift:GPI (ANY_EXTEND:GPI
- (match_operand:ALLX 2 "register_operand" "r"))
- (match_operand 3 "aarch64_imm3" "Ui3"))))]
- ""
- "sub\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size> %3"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "<GPI:MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*sub_<optab><SHORT:mode>_shft_si_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=rk")
- (zero_extend:DI
- (minus:SI (match_operand:SI 1 "register_operand" "r")
- (ashift:SI (ANY_EXTEND:SI
- (match_operand:SHORT 2 "register_operand" "r"))
- (match_operand 3 "aarch64_imm3" "Ui3")))))]
- ""
- "sub\\t%w0, %w1, %w2, <su>xt<SHORT:size> %3"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*sub_<optab><mode>_multp2"
- [(set (match_operand:GPI 0 "register_operand" "=rk")
- (minus:GPI (match_operand:GPI 4 "register_operand" "r")
- (ANY_EXTRACT:GPI
- (mult:GPI (match_operand:GPI 1 "register_operand" "r")
- (match_operand 2 "aarch64_pwr_imm3" "Up3"))
- (match_operand 3 "const_int_operand" "n")
- (const_int 0))))]
- "aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])"
- "sub\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*sub_<optab>si_multp2_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=rk")
- (zero_extend:DI
- (minus:SI (match_operand:SI 4 "register_operand" "r")
- (ANY_EXTRACT:SI
- (mult:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand 2 "aarch64_pwr_imm3" "Up3"))
- (match_operand 3 "const_int_operand" "n")
- (const_int 0)))))]
- "aarch64_is_extend_from_extract (SImode, operands[2], operands[3])"
- "sub\\t%w0, %w4, %w1, <su>xt%e3 %p2"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*sub_uxt<mode>_multp2"
- [(set (match_operand:GPI 0 "register_operand" "=rk")
- (minus:GPI (match_operand:GPI 4 "register_operand" "r")
- (and:GPI
- (mult:GPI (match_operand:GPI 1 "register_operand" "r")
- (match_operand 2 "aarch64_pwr_imm3" "Up3"))
- (match_operand 3 "const_int_operand" "n"))))]
- "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),INTVAL (operands[3])) != 0"
- "*
- operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
- INTVAL (operands[3])));
- return \"sub\t%<w>0, %<w>4, %<w>1, uxt%e3 %p2\";"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*sub_uxtsi_multp2_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=rk")
- (zero_extend:DI
- (minus:SI (match_operand:SI 4 "register_operand" "r")
- (and:SI
- (mult:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand 2 "aarch64_pwr_imm3" "Up3"))
- (match_operand 3 "const_int_operand" "n")))))]
- "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),INTVAL (operands[3])) != 0"
- "*
- operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),
- INTVAL (operands[3])));
- return \"sub\t%w0, %w4, %w1, uxt%e3 %p2\";"
- [(set_attr "v8type" "alu_ext")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "neg<mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (neg:GPI (match_operand:GPI 1 "register_operand" "r")))]
- ""
- "neg\\t%<w>0, %<w>1"
- [(set_attr "v8type" "alu")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*negsi2_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI (neg:SI (match_operand:SI 1 "register_operand" "r"))))]
- ""
- "neg\\t%w0, %w1"
- [(set_attr "v8type" "alu")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*neg<mode>2_compare0"
- [(set (reg:CC_NZ CC_REGNUM)
- (compare:CC_NZ (neg:GPI (match_operand:GPI 1 "register_operand" "r"))
- (const_int 0)))
- (set (match_operand:GPI 0 "register_operand" "=r")
- (neg:GPI (match_dup 1)))]
- ""
- "negs\\t%<w>0, %<w>1"
- [(set_attr "v8type" "alus")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*negsi2_compare0_uxtw"
- [(set (reg:CC_NZ CC_REGNUM)
- (compare:CC_NZ (neg:SI (match_operand:SI 1 "register_operand" "r"))
- (const_int 0)))
- (set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI (neg:SI (match_dup 1))))]
- ""
- "negs\\t%w0, %w1"
- [(set_attr "v8type" "alus")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*neg_<shift>_<mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (neg:GPI (ASHIFT:GPI
- (match_operand:GPI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
- ""
- "neg\\t%<w>0, %<w>1, <shift> %2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*neg_<shift>_si2_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (neg:SI (ASHIFT:SI
- (match_operand:SI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_shift_imm_si" "n")))))]
- ""
- "neg\\t%w0, %w1, <shift> %2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*neg_mul_imm_<mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (neg:GPI (mult:GPI
- (match_operand:GPI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))))]
- ""
- "neg\\t%<w>0, %<w>1, lsl %p2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*neg_mul_imm_si2_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (neg:SI (mult:SI
- (match_operand:SI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_pwr_2_si" "n")))))]
- ""
- "neg\\t%w0, %w1, lsl %p2"
- [(set_attr "v8type" "alu_shift")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "mul<mode>3"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (mult:GPI (match_operand:GPI 1 "register_operand" "r")
- (match_operand:GPI 2 "register_operand" "r")))]
- ""
- "mul\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "mult")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*mulsi3_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (mult:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "register_operand" "r"))))]
- ""
- "mul\\t%w0, %w1, %w2"
- [(set_attr "v8type" "mult")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*madd<mode>"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (plus:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r")
- (match_operand:GPI 2 "register_operand" "r"))
- (match_operand:GPI 3 "register_operand" "r")))]
- ""
- "madd\\t%<w>0, %<w>1, %<w>2, %<w>3"
- [(set_attr "v8type" "madd")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*maddsi_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "register_operand" "r"))
- (match_operand:SI 3 "register_operand" "r"))))]
- ""
- "madd\\t%w0, %w1, %w2, %w3"
- [(set_attr "v8type" "madd")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*msub<mode>"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (minus:GPI (match_operand:GPI 3 "register_operand" "r")
- (mult:GPI (match_operand:GPI 1 "register_operand" "r")
- (match_operand:GPI 2 "register_operand" "r"))))]
-
- ""
- "msub\\t%<w>0, %<w>1, %<w>2, %<w>3"
- [(set_attr "v8type" "madd")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*msubsi_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (minus:SI (match_operand:SI 3 "register_operand" "r")
- (mult:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "register_operand" "r")))))]
-
- ""
- "msub\\t%w0, %w1, %w2, %w3"
- [(set_attr "v8type" "madd")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*mul<mode>_neg"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (mult:GPI (neg:GPI (match_operand:GPI 1 "register_operand" "r"))
- (match_operand:GPI 2 "register_operand" "r")))]
-
- ""
- "mneg\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "mult")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*mulsi_neg_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (mult:SI (neg:SI (match_operand:SI 1 "register_operand" "r"))
- (match_operand:SI 2 "register_operand" "r"))))]
-
- ""
- "mneg\\t%w0, %w1, %w2"
- [(set_attr "v8type" "mult")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "<su_optab>mulsidi3"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (mult:DI (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r"))
- (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))))]
- ""
- "<su>mull\\t%0, %w1, %w2"
- [(set_attr "v8type" "mull")
- (set_attr "mode" "DI")]
-)
-
-(define_insn "<su_optab>maddsidi4"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (plus:DI (mult:DI
- (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r"))
- (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r")))
- (match_operand:DI 3 "register_operand" "r")))]
- ""
- "<su>maddl\\t%0, %w1, %w2, %3"
- [(set_attr "v8type" "maddl")
- (set_attr "mode" "DI")]
-)
-
-(define_insn "<su_optab>msubsidi4"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (minus:DI
- (match_operand:DI 3 "register_operand" "r")
- (mult:DI (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r"))
- (ANY_EXTEND:DI
- (match_operand:SI 2 "register_operand" "r")))))]
- ""
- "<su>msubl\\t%0, %w1, %w2, %3"
- [(set_attr "v8type" "maddl")
- (set_attr "mode" "DI")]
-)
-
-(define_insn "*<su_optab>mulsidi_neg"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (mult:DI (neg:DI
- (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r")))
- (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))))]
- ""
- "<su>mnegl\\t%0, %w1, %w2"
- [(set_attr "v8type" "mull")
- (set_attr "mode" "DI")]
-)
-
-(define_insn "<su>muldi3_highpart"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (truncate:DI
- (lshiftrt:TI
- (mult:TI
- (ANY_EXTEND:TI (match_operand:DI 1 "register_operand" "r"))
- (ANY_EXTEND:TI (match_operand:DI 2 "register_operand" "r")))
- (const_int 64))))]
- ""
- "<su>mulh\\t%0, %1, %2"
- [(set_attr "v8type" "mulh")
- (set_attr "mode" "DI")]
-)
-
-(define_insn "<su_optab>div<mode>3"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (ANY_DIV:GPI (match_operand:GPI 1 "register_operand" "r")
- (match_operand:GPI 2 "register_operand" "r")))]
- ""
- "<su>div\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "<su>div")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*<su_optab>divsi3_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (ANY_DIV:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:SI 2 "register_operand" "r"))))]
- ""
- "<su>div\\t%w0, %w1, %w2"
- [(set_attr "v8type" "<su>div")
- (set_attr "mode" "SI")]
-)
-
-;; -------------------------------------------------------------------
-;; Comparison insns
-;; -------------------------------------------------------------------
-
-(define_insn "*cmp<mode>"
- [(set (reg:CC CC_REGNUM)
- (compare:CC (match_operand:GPI 0 "register_operand" "r,r")
- (match_operand:GPI 1 "aarch64_plus_operand" "rI,J")))]
- ""
- "@
- cmp\\t%<w>0, %<w>1
- cmn\\t%<w>0, #%n1"
- [(set_attr "v8type" "alus")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "*cmp<mode>"
- [(set (reg:CCFP CC_REGNUM)
- (compare:CCFP (match_operand:GPF 0 "register_operand" "w,w")
- (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))]
- "TARGET_FLOAT"
- "@
- fcmp\\t%<s>0, #0.0
- fcmp\\t%<s>0, %<s>1"
- [(set_attr "v8type" "fcmp")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "*cmpe<mode>"
- [(set (reg:CCFPE CC_REGNUM)
- (compare:CCFPE (match_operand:GPF 0 "register_operand" "w,w")
- (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))]
- "TARGET_FLOAT"
- "@
- fcmpe\\t%<s>0, #0.0
- fcmpe\\t%<s>0, %<s>1"
- [(set_attr "v8type" "fcmp")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "*cmp_swp_<shift>_reg<mode>"
- [(set (reg:CC_SWP CC_REGNUM)
- (compare:CC_SWP (ASHIFT:GPI
- (match_operand:GPI 0 "register_operand" "r")
- (match_operand:QI 1 "aarch64_shift_imm_<mode>" "n"))
- (match_operand:GPI 2 "aarch64_reg_or_zero" "rZ")))]
- ""
- "cmp\\t%<w>2, %<w>0, <shift> %1"
- [(set_attr "v8type" "alus_shift")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "*cmp_swp_<optab><ALLX:mode>_reg<GPI:mode>"
- [(set (reg:CC_SWP CC_REGNUM)
- (compare:CC_SWP (ANY_EXTEND:GPI
- (match_operand:ALLX 0 "register_operand" "r"))
- (match_operand:GPI 1 "register_operand" "r")))]
- ""
- "cmp\\t%<GPI:w>1, %<GPI:w>0, <su>xt<ALLX:size>"
- [(set_attr "v8type" "alus_ext")
- (set_attr "mode" "<GPI:MODE>")]
-)
-
-
-;; -------------------------------------------------------------------
-;; Store-flag and conditional select insns
-;; -------------------------------------------------------------------
-
-(define_expand "cstore<mode>4"
- [(set (match_operand:SI 0 "register_operand" "")
- (match_operator:SI 1 "aarch64_comparison_operator"
- [(match_operand:GPI 2 "register_operand" "")
- (match_operand:GPI 3 "aarch64_plus_operand" "")]))]
- ""
- "
- operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
- operands[3]);
- operands[3] = const0_rtx;
- "
-)
-
-(define_expand "cstore<mode>4"
- [(set (match_operand:SI 0 "register_operand" "")
- (match_operator:SI 1 "aarch64_comparison_operator"
- [(match_operand:GPF 2 "register_operand" "")
- (match_operand:GPF 3 "register_operand" "")]))]
- ""
- "
- operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
- operands[3]);
- operands[3] = const0_rtx;
- "
-)
-
-(define_insn "*cstore<mode>_insn"
- [(set (match_operand:ALLI 0 "register_operand" "=r")
- (match_operator:ALLI 1 "aarch64_comparison_operator"
- [(match_operand 2 "cc_register" "") (const_int 0)]))]
- ""
- "cset\\t%<w>0, %m1"
- [(set_attr "v8type" "csel")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of the above
-(define_insn "*cstoresi_insn_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (match_operator:SI 1 "aarch64_comparison_operator"
- [(match_operand 2 "cc_register" "") (const_int 0)])))]
- ""
- "cset\\t%w0, %m1"
- [(set_attr "v8type" "csel")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*cstore<mode>_neg"
- [(set (match_operand:ALLI 0 "register_operand" "=r")
- (neg:ALLI (match_operator:ALLI 1 "aarch64_comparison_operator"
- [(match_operand 2 "cc_register" "") (const_int 0)])))]
- ""
- "csetm\\t%<w>0, %m1"
- [(set_attr "v8type" "csel")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of the above
-(define_insn "*cstoresi_neg_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (neg:SI (match_operator:SI 1 "aarch64_comparison_operator"
- [(match_operand 2 "cc_register" "") (const_int 0)]))))]
- ""
- "csetm\\t%w0, %m1"
- [(set_attr "v8type" "csel")
- (set_attr "mode" "SI")]
-)
-
-(define_expand "cmov<mode>6"
- [(set (match_operand:GPI 0 "register_operand" "")
- (if_then_else:GPI
- (match_operator 1 "aarch64_comparison_operator"
- [(match_operand:GPI 2 "register_operand" "")
- (match_operand:GPI 3 "aarch64_plus_operand" "")])
- (match_operand:GPI 4 "register_operand" "")
- (match_operand:GPI 5 "register_operand" "")))]
- ""
- "
- operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
- operands[3]);
- operands[3] = const0_rtx;
- "
-)
-
-(define_expand "cmov<mode>6"
- [(set (match_operand:GPF 0 "register_operand" "")
- (if_then_else:GPF
- (match_operator 1 "aarch64_comparison_operator"
- [(match_operand:GPF 2 "register_operand" "")
- (match_operand:GPF 3 "register_operand" "")])
- (match_operand:GPF 4 "register_operand" "")
- (match_operand:GPF 5 "register_operand" "")))]
- ""
- "
- operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2],
- operands[3]);
- operands[3] = const0_rtx;
- "
-)
-
-(define_insn "*cmov<mode>_insn"
- [(set (match_operand:ALLI 0 "register_operand" "=r,r,r,r,r,r,r")
- (if_then_else:ALLI
- (match_operator 1 "aarch64_comparison_operator"
- [(match_operand 2 "cc_register" "") (const_int 0)])
- (match_operand:ALLI 3 "aarch64_reg_zero_or_m1_or_1" "rZ,rZ,UsM,rZ,Ui1,UsM,Ui1")
- (match_operand:ALLI 4 "aarch64_reg_zero_or_m1_or_1" "rZ,UsM,rZ,Ui1,rZ,UsM,Ui1")))]
- "!((operands[3] == const1_rtx && operands[4] == constm1_rtx)
- || (operands[3] == constm1_rtx && operands[4] == const1_rtx))"
- ;; Final two alternatives should be unreachable, but included for completeness
- "@
- csel\\t%<w>0, %<w>3, %<w>4, %m1
- csinv\\t%<w>0, %<w>3, <w>zr, %m1
- csinv\\t%<w>0, %<w>4, <w>zr, %M1
- csinc\\t%<w>0, %<w>3, <w>zr, %m1
- csinc\\t%<w>0, %<w>4, <w>zr, %M1
- mov\\t%<w>0, -1
- mov\\t%<w>0, 1"
- [(set_attr "v8type" "csel")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*cmovsi_insn_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r,r,r")
- (zero_extend:DI
- (if_then_else:SI
- (match_operator 1 "aarch64_comparison_operator"
- [(match_operand 2 "cc_register" "") (const_int 0)])
- (match_operand:SI 3 "aarch64_reg_zero_or_m1_or_1" "rZ,rZ,UsM,rZ,Ui1,UsM,Ui1")
- (match_operand:SI 4 "aarch64_reg_zero_or_m1_or_1" "rZ,UsM,rZ,Ui1,rZ,UsM,Ui1"))))]
- "!((operands[3] == const1_rtx && operands[4] == constm1_rtx)
- || (operands[3] == constm1_rtx && operands[4] == const1_rtx))"
- ;; Final two alternatives should be unreachable, but included for completeness
- "@
- csel\\t%w0, %w3, %w4, %m1
- csinv\\t%w0, %w3, wzr, %m1
- csinv\\t%w0, %w4, wzr, %M1
- csinc\\t%w0, %w3, wzr, %m1
- csinc\\t%w0, %w4, wzr, %M1
- mov\\t%w0, -1
- mov\\t%w0, 1"
- [(set_attr "v8type" "csel")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*cmov<mode>_insn"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (if_then_else:GPF
- (match_operator 1 "aarch64_comparison_operator"
- [(match_operand 2 "cc_register" "") (const_int 0)])
- (match_operand:GPF 3 "register_operand" "w")
- (match_operand:GPF 4 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fcsel\\t%<s>0, %<s>3, %<s>4, %m1"
- [(set_attr "v8type" "fcsel")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_expand "mov<mode>cc"
- [(set (match_operand:ALLI 0 "register_operand" "")
- (if_then_else:ALLI (match_operand 1 "aarch64_comparison_operator" "")
- (match_operand:ALLI 2 "register_operand" "")
- (match_operand:ALLI 3 "register_operand" "")))]
- ""
- {
- rtx ccreg;
- enum rtx_code code = GET_CODE (operands[1]);
-
- if (code == UNEQ || code == LTGT)
- FAIL;
-
- ccreg = aarch64_gen_compare_reg (code, XEXP (operands[1], 0),
- XEXP (operands[1], 1));
- operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
- }
-)
-
-(define_expand "mov<GPF:mode><GPI:mode>cc"
- [(set (match_operand:GPI 0 "register_operand" "")
- (if_then_else:GPI (match_operand 1 "aarch64_comparison_operator" "")
- (match_operand:GPF 2 "register_operand" "")
- (match_operand:GPF 3 "register_operand" "")))]
- ""
- {
- rtx ccreg;
- enum rtx_code code = GET_CODE (operands[1]);
-
- if (code == UNEQ || code == LTGT)
- FAIL;
-
- ccreg = aarch64_gen_compare_reg (code, XEXP (operands[1], 0),
- XEXP (operands[1], 1));
- operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
- }
-)
-
-(define_insn "*csinc2<mode>_insn"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (plus:GPI (match_operator:GPI 2 "aarch64_comparison_operator"
- [(match_operand:CC 3 "cc_register" "") (const_int 0)])
- (match_operand:GPI 1 "register_operand" "r")))]
- ""
- "csinc\\t%<w>0, %<w>1, %<w>1, %M2"
- [(set_attr "v8type" "csel")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "csinc3<mode>_insn"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (if_then_else:GPI
- (match_operator:GPI 1 "aarch64_comparison_operator"
- [(match_operand:CC 2 "cc_register" "") (const_int 0)])
- (plus:GPI (match_operand:GPI 3 "register_operand" "r")
- (const_int 1))
- (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
- ""
- "csinc\\t%<w>0, %<w>4, %<w>3, %M1"
- [(set_attr "v8type" "csel")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "*csinv3<mode>_insn"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (if_then_else:GPI
- (match_operator:GPI 1 "aarch64_comparison_operator"
- [(match_operand:CC 2 "cc_register" "") (const_int 0)])
- (not:GPI (match_operand:GPI 3 "register_operand" "r"))
- (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
- ""
- "csinv\\t%<w>0, %<w>4, %<w>3, %M1"
- [(set_attr "v8type" "csel")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "*csneg3<mode>_insn"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (if_then_else:GPI
- (match_operator:GPI 1 "aarch64_comparison_operator"
- [(match_operand:CC 2 "cc_register" "") (const_int 0)])
- (neg:GPI (match_operand:GPI 3 "register_operand" "r"))
- (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))]
- ""
- "csneg\\t%<w>0, %<w>4, %<w>3, %M1"
- [(set_attr "v8type" "csel")
- (set_attr "mode" "<MODE>")])
-
-;; -------------------------------------------------------------------
-;; Logical operations
-;; -------------------------------------------------------------------
-
-(define_insn "<optab><mode>3"
- [(set (match_operand:GPI 0 "register_operand" "=r,rk")
- (LOGICAL:GPI (match_operand:GPI 1 "register_operand" "%r,r")
- (match_operand:GPI 2 "aarch64_logical_operand" "r,<lconst>")))]
- ""
- "<logical>\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "logic,logic_imm")
- (set_attr "mode" "<MODE>")])
-
-;; zero_extend version of above
-(define_insn "*<optab>si3_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r,rk")
- (zero_extend:DI
- (LOGICAL:SI (match_operand:SI 1 "register_operand" "%r,r")
- (match_operand:SI 2 "aarch64_logical_operand" "r,K"))))]
- ""
- "<logical>\\t%w0, %w1, %w2"
- [(set_attr "v8type" "logic,logic_imm")
- (set_attr "mode" "SI")])
-
-(define_insn "*<LOGICAL:optab>_<SHIFT:optab><mode>3"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (LOGICAL:GPI (SHIFT:GPI
- (match_operand:GPI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))
- (match_operand:GPI 3 "register_operand" "r")))]
- ""
- "<LOGICAL:logical>\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2"
- [(set_attr "v8type" "logic_shift")
- (set_attr "mode" "<MODE>")])
-
-;; zero_extend version of above
-(define_insn "*<LOGICAL:optab>_<SHIFT:optab>si3_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (LOGICAL:SI (SHIFT:SI
- (match_operand:SI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_shift_imm_si" "n"))
- (match_operand:SI 3 "register_operand" "r"))))]
- ""
- "<LOGICAL:logical>\\t%w0, %w3, %w1, <SHIFT:shift> %2"
- [(set_attr "v8type" "logic_shift")
- (set_attr "mode" "SI")])
-
-(define_insn "one_cmpl<mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (not:GPI (match_operand:GPI 1 "register_operand" "r")))]
- ""
- "mvn\\t%<w>0, %<w>1"
- [(set_attr "v8type" "logic")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "*one_cmpl_<optab><mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (not:GPI (SHIFT:GPI (match_operand:GPI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))]
- ""
- "mvn\\t%<w>0, %<w>1, <shift> %2"
- [(set_attr "v8type" "logic_shift")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "*<LOGICAL:optab>_one_cmpl<mode>3"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (LOGICAL:GPI (not:GPI
- (match_operand:GPI 1 "register_operand" "r"))
- (match_operand:GPI 2 "register_operand" "r")))]
- ""
- "<LOGICAL:nlogical>\\t%<w>0, %<w>2, %<w>1"
- [(set_attr "v8type" "logic")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "*<LOGICAL:optab>_one_cmpl_<SHIFT:optab><mode>3"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (LOGICAL:GPI (not:GPI
- (SHIFT:GPI
- (match_operand:GPI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n")))
- (match_operand:GPI 3 "register_operand" "r")))]
- ""
- "<LOGICAL:nlogical>\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2"
- [(set_attr "v8type" "logic_shift")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "clz<mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (clz:GPI (match_operand:GPI 1 "register_operand" "r")))]
- ""
- "clz\\t%<w>0, %<w>1"
- [(set_attr "v8type" "clz")
- (set_attr "mode" "<MODE>")])
-
-(define_expand "ffs<mode>2"
- [(match_operand:GPI 0 "register_operand")
- (match_operand:GPI 1 "register_operand")]
- ""
- {
- rtx ccreg = aarch64_gen_compare_reg (EQ, operands[1], const0_rtx);
- rtx x = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
-
- emit_insn (gen_rbit<mode>2 (operands[0], operands[1]));
- emit_insn (gen_clz<mode>2 (operands[0], operands[0]));
- emit_insn (gen_csinc3<mode>_insn (operands[0], x, ccreg, operands[0], const0_rtx));
- DONE;
- }
-)
-
-(define_insn "clrsb<mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (unspec:GPI [(match_operand:GPI 1 "register_operand" "r")] UNSPEC_CLS))]
- ""
- "cls\\t%<w>0, %<w>1"
- [(set_attr "v8type" "clz")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "rbit<mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (unspec:GPI [(match_operand:GPI 1 "register_operand" "r")] UNSPEC_RBIT))]
- ""
- "rbit\\t%<w>0, %<w>1"
- [(set_attr "v8type" "rbit")
- (set_attr "mode" "<MODE>")])
-
-(define_expand "ctz<mode>2"
- [(match_operand:GPI 0 "register_operand")
- (match_operand:GPI 1 "register_operand")]
- ""
- {
- emit_insn (gen_rbit<mode>2 (operands[0], operands[1]));
- emit_insn (gen_clz<mode>2 (operands[0], operands[0]));
- DONE;
- }
-)
-
-(define_insn "*and<mode>3nr_compare0"
- [(set (reg:CC_NZ CC_REGNUM)
- (compare:CC_NZ
- (and:GPI (match_operand:GPI 0 "register_operand" "%r,r")
- (match_operand:GPI 1 "aarch64_logical_operand" "r,<lconst>"))
- (const_int 0)))]
- ""
- "tst\\t%<w>0, %<w>1"
- [(set_attr "v8type" "logics")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "*and_<SHIFT:optab><mode>3nr_compare0"
- [(set (reg:CC_NZ CC_REGNUM)
- (compare:CC_NZ
- (and:GPI (SHIFT:GPI
- (match_operand:GPI 0 "register_operand" "r")
- (match_operand:QI 1 "aarch64_shift_imm_<mode>" "n"))
- (match_operand:GPI 2 "register_operand" "r"))
- (const_int 0)))]
- ""
- "tst\\t%<w>2, %<w>0, <SHIFT:shift> %1"
- [(set_attr "v8type" "logics_shift")
- (set_attr "mode" "<MODE>")])
-
-;; -------------------------------------------------------------------
-;; Shifts
-;; -------------------------------------------------------------------
-
-(define_expand "<optab><mode>3"
- [(set (match_operand:GPI 0 "register_operand")
- (ASHIFT:GPI (match_operand:GPI 1 "register_operand")
- (match_operand:QI 2 "nonmemory_operand")))]
- ""
- {
- if (CONST_INT_P (operands[2]))
- {
- operands[2] = GEN_INT (INTVAL (operands[2])
- & (GET_MODE_BITSIZE (<MODE>mode) - 1));
-
- if (operands[2] == const0_rtx)
- {
- emit_insn (gen_mov<mode> (operands[0], operands[1]));
- DONE;
- }
- }
- }
-)
-
-(define_expand "ashl<mode>3"
- [(set (match_operand:SHORT 0 "register_operand")
- (ashift:SHORT (match_operand:SHORT 1 "register_operand")
- (match_operand:QI 2 "nonmemory_operand")))]
- ""
- {
- if (CONST_INT_P (operands[2]))
- {
- operands[2] = GEN_INT (INTVAL (operands[2])
- & (GET_MODE_BITSIZE (<MODE>mode) - 1));
-
- if (operands[2] == const0_rtx)
- {
- emit_insn (gen_mov<mode> (operands[0], operands[1]));
- DONE;
- }
- }
- }
-)
-
-(define_expand "rotr<mode>3"
- [(set (match_operand:GPI 0 "register_operand")
- (rotatert:GPI (match_operand:GPI 1 "register_operand")
- (match_operand:QI 2 "nonmemory_operand")))]
- ""
- {
- if (CONST_INT_P (operands[2]))
- {
- operands[2] = GEN_INT (INTVAL (operands[2])
- & (GET_MODE_BITSIZE (<MODE>mode) - 1));
-
- if (operands[2] == const0_rtx)
- {
- emit_insn (gen_mov<mode> (operands[0], operands[1]));
- DONE;
- }
- }
- }
-)
-
-(define_expand "rotl<mode>3"
- [(set (match_operand:GPI 0 "register_operand")
- (rotatert:GPI (match_operand:GPI 1 "register_operand")
- (match_operand:QI 2 "nonmemory_operand")))]
- ""
- {
- /* (SZ - cnt) % SZ == -cnt % SZ */
- if (CONST_INT_P (operands[2]))
- {
- operands[2] = GEN_INT ((-INTVAL (operands[2]))
- & (GET_MODE_BITSIZE (<MODE>mode) - 1));
- if (operands[2] == const0_rtx)
- {
- emit_insn (gen_mov<mode> (operands[0], operands[1]));
- DONE;
- }
- }
- else
- operands[2] = expand_simple_unop (QImode, NEG, operands[2],
- NULL_RTX, 1);
- }
-)
-
-(define_insn "*<optab><mode>3_insn"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (SHIFT:GPI
- (match_operand:GPI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_reg_or_shift_imm_<mode>" "rUs<cmode>")))]
- ""
- "<shift>\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "shift")
- (set_attr "mode" "<MODE>")]
-)
-
-;; zero_extend version of above
-(define_insn "*<optab>si3_insn_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI (SHIFT:SI
- (match_operand:SI 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_reg_or_shift_imm_si" "rUss"))))]
- ""
- "<shift>\\t%w0, %w1, %w2"
- [(set_attr "v8type" "shift")
- (set_attr "mode" "SI")]
-)
-
-(define_insn "*ashl<mode>3_insn"
- [(set (match_operand:SHORT 0 "register_operand" "=r")
- (ashift:SHORT (match_operand:SHORT 1 "register_operand" "r")
- (match_operand:QI 2 "aarch64_reg_or_shift_imm_si" "rUss")))]
- ""
- "lsl\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "v8type" "shift")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "*<optab><mode>3_insn"
- [(set (match_operand:SHORT 0 "register_operand" "=r")
- (ASHIFT:SHORT (match_operand:SHORT 1 "register_operand" "r")
- (match_operand 2 "const_int_operand" "n")))]
- "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<MODE>mode)"
-{
- operands[3] = GEN_INT (<sizen> - UINTVAL (operands[2]));
- return "<bfshift>\t%w0, %w1, %2, %3";
-}
- [(set_attr "v8type" "bfm")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "*<ANY_EXTEND:optab><GPI:mode>_ashl<SHORT:mode>"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (ANY_EXTEND:GPI
- (ashift:SHORT (match_operand:SHORT 1 "register_operand" "r")
- (match_operand 2 "const_int_operand" "n"))))]
- "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<SHORT:MODE>mode)"
-{
- operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
- return "<su>bfiz\t%<GPI:w>0, %<GPI:w>1, %2, %3";
-}
- [(set_attr "v8type" "bfm")
- (set_attr "mode" "<GPI:MODE>")]
-)
-
-(define_insn "*zero_extend<GPI:mode>_lshr<SHORT:mode>"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (zero_extend:GPI
- (lshiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
- (match_operand 2 "const_int_operand" "n"))))]
- "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<SHORT:MODE>mode)"
-{
- operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
- return "ubfx\t%<GPI:w>0, %<GPI:w>1, %2, %3";
-}
- [(set_attr "v8type" "bfm")
- (set_attr "mode" "<GPI:MODE>")]
-)
-
-(define_insn "*extend<GPI:mode>_ashr<SHORT:mode>"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (sign_extend:GPI
- (ashiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r")
- (match_operand 2 "const_int_operand" "n"))))]
- "UINTVAL (operands[2]) < GET_MODE_BITSIZE (<SHORT:MODE>mode)"
-{
- operands[3] = GEN_INT (<SHORT:sizen> - UINTVAL (operands[2]));
- return "sbfx\\t%<GPI:w>0, %<GPI:w>1, %2, %3";
-}
- [(set_attr "v8type" "bfm")
- (set_attr "mode" "<GPI:MODE>")]
-)
-
-;; -------------------------------------------------------------------
-;; Bitfields
-;; -------------------------------------------------------------------
-
-(define_expand "<optab>"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (ANY_EXTRACT:DI (match_operand:DI 1 "register_operand" "r")
- (match_operand 2 "const_int_operand" "n")
- (match_operand 3 "const_int_operand" "n")))]
- ""
- ""
-)
-
-(define_insn "*<optab><mode>"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (ANY_EXTRACT:GPI (match_operand:GPI 1 "register_operand" "r")
- (match_operand 2 "const_int_operand" "n")
- (match_operand 3 "const_int_operand" "n")))]
- ""
- "<su>bfx\\t%<w>0, %<w>1, %3, %2"
- [(set_attr "v8type" "bfm")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "*<optab><ALLX:mode>_shft_<GPI:mode>"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (ashift:GPI (ANY_EXTEND:GPI
- (match_operand:ALLX 1 "register_operand" "r"))
- (match_operand 2 "const_int_operand" "n")))]
- "UINTVAL (operands[2]) < <GPI:sizen>"
-{
- operands[3] = (<ALLX:sizen> <= (<GPI:sizen> - UINTVAL (operands[2])))
- ? GEN_INT (<ALLX:sizen>)
- : GEN_INT (<GPI:sizen> - UINTVAL (operands[2]));
- return "<su>bfiz\t%<GPI:w>0, %<GPI:w>1, %2, %3";
-}
- [(set_attr "v8type" "bfm")
- (set_attr "mode" "<GPI:MODE>")]
-)
-
-;; XXX We should match (any_extend (ashift)) here, like (and (ashift)) below
-
-(define_insn "*andim_ashift<mode>_bfiz"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (and:GPI (ashift:GPI (match_operand:GPI 1 "register_operand" "r")
- (match_operand 2 "const_int_operand" "n"))
- (match_operand 3 "const_int_operand" "n")))]
- "exact_log2 ((INTVAL (operands[3]) >> INTVAL (operands[2])) + 1) >= 0
- && (INTVAL (operands[3]) & ((1 << INTVAL (operands[2])) - 1)) == 0"
- "ubfiz\\t%<w>0, %<w>1, %2, %P3"
- [(set_attr "v8type" "bfm")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "bswap<mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (bswap:GPI (match_operand:GPI 1 "register_operand" "r")))]
- ""
- "rev\\t%<w>0, %<w>1"
- [(set_attr "v8type" "rev")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "bswaphi2"
- [(set (match_operand:HI 0 "register_operand" "=r")
- (bswap:HI (match_operand:HI 1 "register_operand" "r")))]
- ""
- "rev16\\t%w0, %w1"
- [(set_attr "v8type" "rev")
- (set_attr "mode" "HI")]
-)
-
-;; zero_extend version of above
-(define_insn "*bswapsi2_uxtw"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI (bswap:SI (match_operand:SI 1 "register_operand" "r"))))]
- ""
- "rev\\t%w0, %w1"
- [(set_attr "v8type" "rev")
- (set_attr "mode" "SI")]
-)
-
-;; -------------------------------------------------------------------
-;; Floating-point intrinsics
-;; -------------------------------------------------------------------
-
-;; frint floating-point round to integral standard patterns.
-;; Expands to btrunc, ceil, floor, nearbyint, rint, round.
-
-(define_insn "<frint_pattern><mode>2"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
- FRINT))]
- "TARGET_FLOAT"
- "frint<frint_suffix>\\t%<s>0, %<s>1"
- [(set_attr "v8type" "frint")
- (set_attr "mode" "<MODE>")]
-)
-
-;; frcvt floating-point round to integer and convert standard patterns.
-;; Expands to lbtrunc, lceil, lfloor, lround.
-(define_insn "l<fcvt_pattern><su_optab><GPF:mode><GPI:mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (FIXUORS:GPI (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")]
- FCVT)))]
- "TARGET_FLOAT"
- "fcvt<frint_suffix><su>\\t%<GPI:w>0, %<GPF:s>1"
- [(set_attr "v8type" "fcvtf2i")
- (set_attr "mode" "<GPF:MODE>")
- (set_attr "mode2" "<GPI:MODE>")]
-)
-
-;; fma - no throw
-
-(define_insn "fma<mode>4"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (fma:GPF (match_operand:GPF 1 "register_operand" "w")
- (match_operand:GPF 2 "register_operand" "w")
- (match_operand:GPF 3 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
- [(set_attr "v8type" "fmadd")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "fnma<mode>4"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (fma:GPF (neg:GPF (match_operand:GPF 1 "register_operand" "w"))
- (match_operand:GPF 2 "register_operand" "w")
- (match_operand:GPF 3 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fmsub\\t%<s>0, %<s>1, %<s>2, %<s>3"
- [(set_attr "v8type" "fmadd")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "fms<mode>4"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (fma:GPF (match_operand:GPF 1 "register_operand" "w")
- (match_operand:GPF 2 "register_operand" "w")
- (neg:GPF (match_operand:GPF 3 "register_operand" "w"))))]
- "TARGET_FLOAT"
- "fnmsub\\t%<s>0, %<s>1, %<s>2, %<s>3"
- [(set_attr "v8type" "fmadd")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "fnms<mode>4"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (fma:GPF (neg:GPF (match_operand:GPF 1 "register_operand" "w"))
- (match_operand:GPF 2 "register_operand" "w")
- (neg:GPF (match_operand:GPF 3 "register_operand" "w"))))]
- "TARGET_FLOAT"
- "fnmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
- [(set_attr "v8type" "fmadd")
- (set_attr "mode" "<MODE>")]
-)
-
-;; If signed zeros are ignored, -(a * b + c) = -a * b - c.
-(define_insn "*fnmadd<mode>4"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (neg:GPF (fma:GPF (match_operand:GPF 1 "register_operand" "w")
- (match_operand:GPF 2 "register_operand" "w")
- (match_operand:GPF 3 "register_operand" "w"))))]
- "!HONOR_SIGNED_ZEROS (<MODE>mode) && TARGET_FLOAT"
- "fnmadd\\t%<s>0, %<s>1, %<s>2, %<s>3"
- [(set_attr "v8type" "fmadd")
- (set_attr "mode" "<MODE>")]
-)
-
-;; -------------------------------------------------------------------
-;; Floating-point conversions
-;; -------------------------------------------------------------------
-
-(define_insn "extendsfdf2"
- [(set (match_operand:DF 0 "register_operand" "=w")
- (float_extend:DF (match_operand:SF 1 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fcvt\\t%d0, %s1"
- [(set_attr "v8type" "fcvt")
- (set_attr "mode" "DF")
- (set_attr "mode2" "SF")]
-)
-
-(define_insn "truncdfsf2"
- [(set (match_operand:SF 0 "register_operand" "=w")
- (float_truncate:SF (match_operand:DF 1 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fcvt\\t%s0, %d1"
- [(set_attr "v8type" "fcvt")
- (set_attr "mode" "SF")
- (set_attr "mode2" "DF")]
-)
-
-(define_insn "fix_trunc<GPF:mode><GPI:mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (fix:GPI (match_operand:GPF 1 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fcvtzs\\t%<GPI:w>0, %<GPF:s>1"
- [(set_attr "v8type" "fcvtf2i")
- (set_attr "mode" "<GPF:MODE>")
- (set_attr "mode2" "<GPI:MODE>")]
-)
-
-(define_insn "fixuns_trunc<GPF:mode><GPI:mode>2"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (unsigned_fix:GPI (match_operand:GPF 1 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fcvtzu\\t%<GPI:w>0, %<GPF:s>1"
- [(set_attr "v8type" "fcvtf2i")
- (set_attr "mode" "<GPF:MODE>")
- (set_attr "mode2" "<GPI:MODE>")]
-)
-
-(define_insn "float<GPI:mode><GPF:mode>2"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (float:GPF (match_operand:GPI 1 "register_operand" "r")))]
- "TARGET_FLOAT"
- "scvtf\\t%<GPF:s>0, %<GPI:w>1"
- [(set_attr "v8type" "fcvti2f")
- (set_attr "mode" "<GPF:MODE>")
- (set_attr "mode2" "<GPI:MODE>")]
-)
-
-(define_insn "floatuns<GPI:mode><GPF:mode>2"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (unsigned_float:GPF (match_operand:GPI 1 "register_operand" "r")))]
- "TARGET_FLOAT"
- "ucvtf\\t%<GPF:s>0, %<GPI:w>1"
- [(set_attr "v8type" "fcvt")
- (set_attr "mode" "<GPF:MODE>")
- (set_attr "mode2" "<GPI:MODE>")]
-)
-
-;; -------------------------------------------------------------------
-;; Floating-point arithmetic
-;; -------------------------------------------------------------------
-
-(define_insn "add<mode>3"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (plus:GPF
- (match_operand:GPF 1 "register_operand" "w")
- (match_operand:GPF 2 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fadd\\t%<s>0, %<s>1, %<s>2"
- [(set_attr "v8type" "fadd")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "sub<mode>3"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (minus:GPF
- (match_operand:GPF 1 "register_operand" "w")
- (match_operand:GPF 2 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fsub\\t%<s>0, %<s>1, %<s>2"
- [(set_attr "v8type" "fadd")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "mul<mode>3"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (mult:GPF
- (match_operand:GPF 1 "register_operand" "w")
- (match_operand:GPF 2 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fmul\\t%<s>0, %<s>1, %<s>2"
- [(set_attr "v8type" "fmul")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "*fnmul<mode>3"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (mult:GPF
- (neg:GPF (match_operand:GPF 1 "register_operand" "w"))
- (match_operand:GPF 2 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fnmul\\t%<s>0, %<s>1, %<s>2"
- [(set_attr "v8type" "fmul")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "div<mode>3"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (div:GPF
- (match_operand:GPF 1 "register_operand" "w")
- (match_operand:GPF 2 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fdiv\\t%<s>0, %<s>1, %<s>2"
- [(set_attr "v8type" "fdiv")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "neg<mode>2"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (neg:GPF (match_operand:GPF 1 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fneg\\t%<s>0, %<s>1"
- [(set_attr "v8type" "ffarith")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "sqrt<mode>2"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (sqrt:GPF (match_operand:GPF 1 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fsqrt\\t%<s>0, %<s>1"
- [(set_attr "v8type" "fsqrt")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "abs<mode>2"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (abs:GPF (match_operand:GPF 1 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fabs\\t%<s>0, %<s>1"
- [(set_attr "v8type" "ffarith")
- (set_attr "mode" "<MODE>")]
-)
-
-;; Given that smax/smin do not specify the result when either input is NaN,
-;; we could use either FMAXNM or FMAX for smax, and either FMINNM or FMIN
-;; for smin.
-
-(define_insn "smax<mode>3"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (smax:GPF (match_operand:GPF 1 "register_operand" "w")
- (match_operand:GPF 2 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fmaxnm\\t%<s>0, %<s>1, %<s>2"
- [(set_attr "v8type" "fminmax")
- (set_attr "mode" "<MODE>")]
-)
-
-(define_insn "smin<mode>3"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (smin:GPF (match_operand:GPF 1 "register_operand" "w")
- (match_operand:GPF 2 "register_operand" "w")))]
- "TARGET_FLOAT"
- "fminnm\\t%<s>0, %<s>1, %<s>2"
- [(set_attr "v8type" "fminmax")
- (set_attr "mode" "<MODE>")]
-)
-
-;; -------------------------------------------------------------------
-;; Reload support
-;; -------------------------------------------------------------------
-
-;; Reload SP+imm where imm cannot be handled by a single ADD instruction.
-;; Must load imm into a scratch register and copy SP to the dest reg before
-;; adding, since SP cannot be used as a source register in an ADD
-;; instruction.
-(define_expand "reload_sp_immediate"
- [(parallel [(set (match_operand:DI 0 "register_operand" "=r")
- (match_operand:DI 1 "" ""))
- (clobber (match_operand:TI 2 "register_operand" "=&r"))])]
- ""
- {
- rtx sp = XEXP (operands[1], 0);
- rtx val = XEXP (operands[1], 1);
- unsigned regno = REGNO (operands[2]);
- rtx scratch = operands[1];
- gcc_assert (GET_CODE (operands[1]) == PLUS);
- gcc_assert (sp == stack_pointer_rtx);
- gcc_assert (CONST_INT_P (val));
-
- /* It is possible that one of the registers we got for operands[2]
- might coincide with that of operands[0] (which is why we made
- it TImode). Pick the other one to use as our scratch. */
- if (regno == REGNO (operands[0]))
- regno++;
- scratch = gen_rtx_REG (DImode, regno);
-
- emit_move_insn (scratch, val);
- emit_move_insn (operands[0], sp);
- emit_insn (gen_adddi3 (operands[0], operands[0], scratch));
- DONE;
- }
-)
-
-(define_expand "aarch64_reload_mov<mode>"
- [(set (match_operand:TX 0 "register_operand" "=w")
- (match_operand:TX 1 "register_operand" "w"))
- (clobber (match_operand:DI 2 "register_operand" "=&r"))
- ]
- ""
- {
- rtx op0 = simplify_gen_subreg (TImode, operands[0], <MODE>mode, 0);
- rtx op1 = simplify_gen_subreg (TImode, operands[1], <MODE>mode, 0);
- gen_aarch64_movtilow_tilow (op0, op1);
- gen_aarch64_movdi_tihigh (operands[2], op1);
- gen_aarch64_movtihigh_di (op0, operands[2]);
- DONE;
- }
-)
-
-;; The following secondary reload helpers patterns are invoked
-;; after or during reload as we don't want these patterns to start
-;; kicking in during the combiner.
-
-(define_insn "aarch64_movdi_tilow"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (truncate:DI (match_operand:TI 1 "register_operand" "w")))]
- "reload_completed || reload_in_progress"
- "fmov\\t%x0, %d1"
- [(set_attr "v8type" "fmovf2i")
- (set_attr "mode" "DI")
- (set_attr "length" "4")
- ])
-
-(define_insn "aarch64_movdi_tihigh"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (truncate:DI
- (lshiftrt:TI (match_operand:TI 1 "register_operand" "w")
- (const_int 64))))]
- "reload_completed || reload_in_progress"
- "fmov\\t%x0, %1.d[1]"
- [(set_attr "v8type" "fmovf2i")
- (set_attr "mode" "DI")
- (set_attr "length" "4")
- ])
-
-(define_insn "aarch64_movtihigh_di"
- [(set (zero_extract:TI (match_operand:TI 0 "register_operand" "+w")
- (const_int 64) (const_int 64))
- (zero_extend:TI (match_operand:DI 1 "register_operand" "r")))]
- "reload_completed || reload_in_progress"
- "fmov\\t%0.d[1], %x1"
-
- [(set_attr "v8type" "fmovi2f")
- (set_attr "mode" "DI")
- (set_attr "length" "4")
- ])
-
-(define_insn "aarch64_movtilow_di"
- [(set (match_operand:TI 0 "register_operand" "=w")
- (zero_extend:TI (match_operand:DI 1 "register_operand" "r")))]
- "reload_completed || reload_in_progress"
- "fmov\\t%d0, %x1"
-
- [(set_attr "v8type" "fmovi2f")
- (set_attr "mode" "DI")
- (set_attr "length" "4")
- ])
-
-(define_insn "aarch64_movtilow_tilow"
- [(set (match_operand:TI 0 "register_operand" "=w")
- (zero_extend:TI
- (truncate:DI (match_operand:TI 1 "register_operand" "w"))))]
- "reload_completed || reload_in_progress"
- "fmov\\t%d0, %d1"
-
- [(set_attr "v8type" "fmovi2f")
- (set_attr "mode" "DI")
- (set_attr "length" "4")
- ])
-
-;; There is a deliberate reason why the parameters of high and lo_sum's
-;; don't have modes for ADRP and ADD instructions. This is to allow high
-;; and lo_sum's to be used with the labels defining the jump tables in
-;; rodata section.
-
-(define_insn "add_losym"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (lo_sum:DI (match_operand:DI 1 "register_operand" "r")
- (match_operand 2 "aarch64_valid_symref" "S")))]
- ""
- "add\\t%0, %1, :lo12:%a2"
- [(set_attr "v8type" "alu")
- (set_attr "mode" "DI")]
-
-)
-
-(define_insn "ldr_got_small"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (unspec:DI [(mem:DI (lo_sum:DI
- (match_operand:DI 1 "register_operand" "r")
- (match_operand:DI 2 "aarch64_valid_symref" "S")))]
- UNSPEC_GOTSMALLPIC))]
- ""
- "ldr\\t%0, [%1, #:got_lo12:%a2]"
- [(set_attr "v8type" "load1")
- (set_attr "mode" "DI")]
-)
-
-(define_insn "aarch64_load_tp_hard"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (unspec:DI [(const_int 0)] UNSPEC_TLS))]
- ""
- "mrs\\t%0, tpidr_el0"
- [(set_attr "v8type" "mrs")
- (set_attr "mode" "DI")]
-)
-
-;; The TLS ABI specifically requires that the compiler does not schedule
-;; instructions in the TLS stubs, in order to enable linker relaxation.
-;; Therefore we treat the stubs as an atomic sequence.
-(define_expand "tlsgd_small"
- [(parallel [(set (match_operand 0 "register_operand" "")
- (call (mem:DI (match_dup 2)) (const_int 1)))
- (unspec:DI [(match_operand:DI 1 "aarch64_valid_symref" "")] UNSPEC_GOTSMALLTLS)
- (clobber (reg:DI LR_REGNUM))])]
- ""
-{
- operands[2] = aarch64_tls_get_addr ();
-})
-
-(define_insn "*tlsgd_small"
- [(set (match_operand 0 "register_operand" "")
- (call (mem:DI (match_operand:DI 2 "" "")) (const_int 1)))
- (unspec:DI [(match_operand:DI 1 "aarch64_valid_symref" "S")] UNSPEC_GOTSMALLTLS)
- (clobber (reg:DI LR_REGNUM))
- ]
- ""
- "adrp\\tx0, %A1\;add\\tx0, x0, %L1\;bl\\t%2\;nop"
- [(set_attr "v8type" "call")
- (set_attr "length" "16")])
-
-(define_insn "tlsie_small"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (unspec:DI [(match_operand:DI 1 "aarch64_tls_ie_symref" "S")]
- UNSPEC_GOTSMALLTLS))]
- ""
- "adrp\\t%0, %A1\;ldr\\t%0, [%0, #%L1]"
- [(set_attr "v8type" "load1")
- (set_attr "mode" "DI")
- (set_attr "length" "8")]
-)
-
-(define_insn "tlsle_small"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (unspec:DI [(match_operand:DI 1 "register_operand" "r")
- (match_operand:DI 2 "aarch64_tls_le_symref" "S")]
- UNSPEC_GOTSMALLTLS))]
- ""
- "add\\t%0, %1, #%G2\;add\\t%0, %0, #%L2"
- [(set_attr "v8type" "alu")
- (set_attr "mode" "DI")
- (set_attr "length" "8")]
-)
-
-(define_insn "tlsdesc_small"
- [(set (reg:DI R0_REGNUM)
- (unspec:DI [(match_operand:DI 0 "aarch64_valid_symref" "S")]
- UNSPEC_TLSDESC))
- (clobber (reg:DI LR_REGNUM))
- (clobber (match_scratch:DI 1 "=r"))]
- "TARGET_TLS_DESC"
- "adrp\\tx0, %A0\;ldr\\t%1, [x0, #%L0]\;add\\tx0, x0, %L0\;.tlsdesccall\\t%0\;blr\\t%1"
- [(set_attr "v8type" "call")
- (set_attr "length" "16")])
-
-(define_insn "stack_tie"
- [(set (mem:BLK (scratch))
- (unspec:BLK [(match_operand:DI 0 "register_operand" "rk")
- (match_operand:DI 1 "register_operand" "rk")]
- UNSPEC_PRLG_STK))]
- ""
- ""
- [(set_attr "length" "0")]
-)
-
-;; Named pattern for expanding thread pointer reference.
-(define_expand "get_thread_pointerdi"
- [(match_operand:DI 0 "register_operand" "=r")]
- ""
-{
- rtx tmp = aarch64_load_tp (operands[0]);
- if (tmp != operands[0])
- emit_move_insn (operands[0], tmp);
- DONE;
-})
-
-;; AdvSIMD Stuff
-(include "aarch64-simd.md")
-
-;; Atomic Operations
-(include "atomics.md")
diff --git a/gcc-4.8.1/gcc/config/aarch64/aarch64.opt b/gcc-4.8.1/gcc/config/aarch64/aarch64.opt
deleted file mode 100644
index 35182489d..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/aarch64.opt
+++ /dev/null
@@ -1,100 +0,0 @@
-; Machine description for AArch64 architecture.
-; Copyright (C) 2009-2013 Free Software Foundation, Inc.
-; Contributed by ARM Ltd.
-;
-; This file is part of GCC.
-;
-; GCC is free software; you can redistribute it and/or modify it
-; under the terms of the GNU General Public License as published by
-; the Free Software Foundation; either version 3, or (at your option)
-; any later version.
-;
-; GCC is distributed in the hope that it will be useful, but
-; WITHOUT ANY WARRANTY; without even the implied warranty of
-; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-; General Public License for more details.
-;
-; You should have received a copy of the GNU General Public License
-; along with GCC; see the file COPYING3. If not see
-; <http://www.gnu.org/licenses/>.
-
-HeaderInclude
-config/aarch64/aarch64-opts.h
-
-; The TLS dialect names to use with -mtls-dialect.
-
-Enum
-Name(tls_type) Type(enum aarch64_tls_type)
-The possible TLS dialects:
-
-EnumValue
-Enum(tls_type) String(trad) Value(TLS_TRADITIONAL)
-
-EnumValue
-Enum(tls_type) String(desc) Value(TLS_DESCRIPTORS)
-
-; The code model option names for -mcmodel.
-
-Enum
-Name(cmodel) Type(enum aarch64_code_model)
-The code model option names for -mcmodel:
-
-EnumValue
-Enum(cmodel) String(tiny) Value(AARCH64_CMODEL_TINY)
-
-EnumValue
-Enum(cmodel) String(small) Value(AARCH64_CMODEL_SMALL)
-
-EnumValue
-Enum(cmodel) String(large) Value(AARCH64_CMODEL_LARGE)
-
-; The cpu/arch option names to use in cpu/arch selection.
-
-Variable
-const char *aarch64_arch_string
-
-Variable
-const char *aarch64_cpu_string
-
-Variable
-const char *aarch64_tune_string
-
-mbig-endian
-Target Report RejectNegative Mask(BIG_END)
-Assume target CPU is configured as big endian
-
-mgeneral-regs-only
-Target Report RejectNegative Mask(GENERAL_REGS_ONLY)
-Generate code which uses only the general registers
-
-mlittle-endian
-Target Report RejectNegative InverseMask(BIG_END)
-Assume target CPU is configured as little endian
-
-mcmodel=
-Target RejectNegative Joined Enum(cmodel) Var(aarch64_cmodel_var) Init(AARCH64_CMODEL_SMALL)
-Specify the code model
-
-mstrict-align
-Target Report RejectNegative Mask(STRICT_ALIGN)
-Don't assume that unaligned accesses are handled by the system
-
-momit-leaf-frame-pointer
-Target Report Save Var(flag_omit_leaf_frame_pointer) Init(1)
-Omit the frame pointer in leaf functions
-
-mtls-dialect=
-Target RejectNegative Joined Enum(tls_type) Var(aarch64_tls_dialect) Init(TLS_DESCRIPTORS)
-Specify TLS dialect
-
-march=
-Target RejectNegative Joined Var(aarch64_arch_string)
--march=ARCH Use features of architecture ARCH
-
-mcpu=
-Target RejectNegative Joined Var(aarch64_cpu_string)
--mcpu=CPU Use features of and optimize for CPU
-
-mtune=
-Target RejectNegative Joined Var(aarch64_tune_string)
--mtune=CPU Optimize for CPU
diff --git a/gcc-4.8.1/gcc/config/aarch64/arm_neon.h b/gcc-4.8.1/gcc/config/aarch64/arm_neon.h
deleted file mode 100644
index 669217e27..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/arm_neon.h
+++ /dev/null
@@ -1,25535 +0,0 @@
-/* ARM NEON intrinsics include file.
-
- Copyright (C) 2011-2013 Free Software Foundation, Inc.
- Contributed by ARM Ltd.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3, or (at your
- option) any later version.
-
- GCC is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- License for more details.
-
- Under Section 7 of GPL version 3, you are granted additional
- permissions described in the GCC Runtime Library Exception, version
- 3.1, as published by the Free Software Foundation.
-
- You should have received a copy of the GNU General Public License and
- a copy of the GCC Runtime Library Exception along with this program;
- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
- <http://www.gnu.org/licenses/>. */
-
-#ifndef _AARCH64_NEON_H_
-#define _AARCH64_NEON_H_
-
-#include <stdint.h>
-
-typedef __builtin_aarch64_simd_qi int8x8_t
- __attribute__ ((__vector_size__ (8)));
-typedef __builtin_aarch64_simd_hi int16x4_t
- __attribute__ ((__vector_size__ (8)));
-typedef __builtin_aarch64_simd_si int32x2_t
- __attribute__ ((__vector_size__ (8)));
-typedef int64_t int64x1_t;
-typedef int32_t int32x1_t;
-typedef int16_t int16x1_t;
-typedef int8_t int8x1_t;
-typedef double float64x1_t;
-typedef __builtin_aarch64_simd_sf float32x2_t
- __attribute__ ((__vector_size__ (8)));
-typedef __builtin_aarch64_simd_poly8 poly8x8_t
- __attribute__ ((__vector_size__ (8)));
-typedef __builtin_aarch64_simd_poly16 poly16x4_t
- __attribute__ ((__vector_size__ (8)));
-typedef __builtin_aarch64_simd_uqi uint8x8_t
- __attribute__ ((__vector_size__ (8)));
-typedef __builtin_aarch64_simd_uhi uint16x4_t
- __attribute__ ((__vector_size__ (8)));
-typedef __builtin_aarch64_simd_usi uint32x2_t
- __attribute__ ((__vector_size__ (8)));
-typedef uint64_t uint64x1_t;
-typedef uint32_t uint32x1_t;
-typedef uint16_t uint16x1_t;
-typedef uint8_t uint8x1_t;
-typedef __builtin_aarch64_simd_qi int8x16_t
- __attribute__ ((__vector_size__ (16)));
-typedef __builtin_aarch64_simd_hi int16x8_t
- __attribute__ ((__vector_size__ (16)));
-typedef __builtin_aarch64_simd_si int32x4_t
- __attribute__ ((__vector_size__ (16)));
-typedef __builtin_aarch64_simd_di int64x2_t
- __attribute__ ((__vector_size__ (16)));
-typedef __builtin_aarch64_simd_sf float32x4_t
- __attribute__ ((__vector_size__ (16)));
-typedef __builtin_aarch64_simd_df float64x2_t
- __attribute__ ((__vector_size__ (16)));
-typedef __builtin_aarch64_simd_poly8 poly8x16_t
- __attribute__ ((__vector_size__ (16)));
-typedef __builtin_aarch64_simd_poly16 poly16x8_t
- __attribute__ ((__vector_size__ (16)));
-typedef __builtin_aarch64_simd_uqi uint8x16_t
- __attribute__ ((__vector_size__ (16)));
-typedef __builtin_aarch64_simd_uhi uint16x8_t
- __attribute__ ((__vector_size__ (16)));
-typedef __builtin_aarch64_simd_usi uint32x4_t
- __attribute__ ((__vector_size__ (16)));
-typedef __builtin_aarch64_simd_udi uint64x2_t
- __attribute__ ((__vector_size__ (16)));
-
-typedef float float32_t;
-typedef double float64_t;
-typedef __builtin_aarch64_simd_poly8 poly8_t;
-typedef __builtin_aarch64_simd_poly16 poly16_t;
-
-typedef struct int8x8x2_t
-{
- int8x8_t val[2];
-} int8x8x2_t;
-
-typedef struct int8x16x2_t
-{
- int8x16_t val[2];
-} int8x16x2_t;
-
-typedef struct int16x4x2_t
-{
- int16x4_t val[2];
-} int16x4x2_t;
-
-typedef struct int16x8x2_t
-{
- int16x8_t val[2];
-} int16x8x2_t;
-
-typedef struct int32x2x2_t
-{
- int32x2_t val[2];
-} int32x2x2_t;
-
-typedef struct int32x4x2_t
-{
- int32x4_t val[2];
-} int32x4x2_t;
-
-typedef struct int64x1x2_t
-{
- int64x1_t val[2];
-} int64x1x2_t;
-
-typedef struct int64x2x2_t
-{
- int64x2_t val[2];
-} int64x2x2_t;
-
-typedef struct uint8x8x2_t
-{
- uint8x8_t val[2];
-} uint8x8x2_t;
-
-typedef struct uint8x16x2_t
-{
- uint8x16_t val[2];
-} uint8x16x2_t;
-
-typedef struct uint16x4x2_t
-{
- uint16x4_t val[2];
-} uint16x4x2_t;
-
-typedef struct uint16x8x2_t
-{
- uint16x8_t val[2];
-} uint16x8x2_t;
-
-typedef struct uint32x2x2_t
-{
- uint32x2_t val[2];
-} uint32x2x2_t;
-
-typedef struct uint32x4x2_t
-{
- uint32x4_t val[2];
-} uint32x4x2_t;
-
-typedef struct uint64x1x2_t
-{
- uint64x1_t val[2];
-} uint64x1x2_t;
-
-typedef struct uint64x2x2_t
-{
- uint64x2_t val[2];
-} uint64x2x2_t;
-
-typedef struct float32x2x2_t
-{
- float32x2_t val[2];
-} float32x2x2_t;
-
-typedef struct float32x4x2_t
-{
- float32x4_t val[2];
-} float32x4x2_t;
-
-typedef struct float64x2x2_t
-{
- float64x2_t val[2];
-} float64x2x2_t;
-
-typedef struct float64x1x2_t
-{
- float64x1_t val[2];
-} float64x1x2_t;
-
-typedef struct poly8x8x2_t
-{
- poly8x8_t val[2];
-} poly8x8x2_t;
-
-typedef struct poly8x16x2_t
-{
- poly8x16_t val[2];
-} poly8x16x2_t;
-
-typedef struct poly16x4x2_t
-{
- poly16x4_t val[2];
-} poly16x4x2_t;
-
-typedef struct poly16x8x2_t
-{
- poly16x8_t val[2];
-} poly16x8x2_t;
-
-typedef struct int8x8x3_t
-{
- int8x8_t val[3];
-} int8x8x3_t;
-
-typedef struct int8x16x3_t
-{
- int8x16_t val[3];
-} int8x16x3_t;
-
-typedef struct int16x4x3_t
-{
- int16x4_t val[3];
-} int16x4x3_t;
-
-typedef struct int16x8x3_t
-{
- int16x8_t val[3];
-} int16x8x3_t;
-
-typedef struct int32x2x3_t
-{
- int32x2_t val[3];
-} int32x2x3_t;
-
-typedef struct int32x4x3_t
-{
- int32x4_t val[3];
-} int32x4x3_t;
-
-typedef struct int64x1x3_t
-{
- int64x1_t val[3];
-} int64x1x3_t;
-
-typedef struct int64x2x3_t
-{
- int64x2_t val[3];
-} int64x2x3_t;
-
-typedef struct uint8x8x3_t
-{
- uint8x8_t val[3];
-} uint8x8x3_t;
-
-typedef struct uint8x16x3_t
-{
- uint8x16_t val[3];
-} uint8x16x3_t;
-
-typedef struct uint16x4x3_t
-{
- uint16x4_t val[3];
-} uint16x4x3_t;
-
-typedef struct uint16x8x3_t
-{
- uint16x8_t val[3];
-} uint16x8x3_t;
-
-typedef struct uint32x2x3_t
-{
- uint32x2_t val[3];
-} uint32x2x3_t;
-
-typedef struct uint32x4x3_t
-{
- uint32x4_t val[3];
-} uint32x4x3_t;
-
-typedef struct uint64x1x3_t
-{
- uint64x1_t val[3];
-} uint64x1x3_t;
-
-typedef struct uint64x2x3_t
-{
- uint64x2_t val[3];
-} uint64x2x3_t;
-
-typedef struct float32x2x3_t
-{
- float32x2_t val[3];
-} float32x2x3_t;
-
-typedef struct float32x4x3_t
-{
- float32x4_t val[3];
-} float32x4x3_t;
-
-typedef struct float64x2x3_t
-{
- float64x2_t val[3];
-} float64x2x3_t;
-
-typedef struct float64x1x3_t
-{
- float64x1_t val[3];
-} float64x1x3_t;
-
-typedef struct poly8x8x3_t
-{
- poly8x8_t val[3];
-} poly8x8x3_t;
-
-typedef struct poly8x16x3_t
-{
- poly8x16_t val[3];
-} poly8x16x3_t;
-
-typedef struct poly16x4x3_t
-{
- poly16x4_t val[3];
-} poly16x4x3_t;
-
-typedef struct poly16x8x3_t
-{
- poly16x8_t val[3];
-} poly16x8x3_t;
-
-typedef struct int8x8x4_t
-{
- int8x8_t val[4];
-} int8x8x4_t;
-
-typedef struct int8x16x4_t
-{
- int8x16_t val[4];
-} int8x16x4_t;
-
-typedef struct int16x4x4_t
-{
- int16x4_t val[4];
-} int16x4x4_t;
-
-typedef struct int16x8x4_t
-{
- int16x8_t val[4];
-} int16x8x4_t;
-
-typedef struct int32x2x4_t
-{
- int32x2_t val[4];
-} int32x2x4_t;
-
-typedef struct int32x4x4_t
-{
- int32x4_t val[4];
-} int32x4x4_t;
-
-typedef struct int64x1x4_t
-{
- int64x1_t val[4];
-} int64x1x4_t;
-
-typedef struct int64x2x4_t
-{
- int64x2_t val[4];
-} int64x2x4_t;
-
-typedef struct uint8x8x4_t
-{
- uint8x8_t val[4];
-} uint8x8x4_t;
-
-typedef struct uint8x16x4_t
-{
- uint8x16_t val[4];
-} uint8x16x4_t;
-
-typedef struct uint16x4x4_t
-{
- uint16x4_t val[4];
-} uint16x4x4_t;
-
-typedef struct uint16x8x4_t
-{
- uint16x8_t val[4];
-} uint16x8x4_t;
-
-typedef struct uint32x2x4_t
-{
- uint32x2_t val[4];
-} uint32x2x4_t;
-
-typedef struct uint32x4x4_t
-{
- uint32x4_t val[4];
-} uint32x4x4_t;
-
-typedef struct uint64x1x4_t
-{
- uint64x1_t val[4];
-} uint64x1x4_t;
-
-typedef struct uint64x2x4_t
-{
- uint64x2_t val[4];
-} uint64x2x4_t;
-
-typedef struct float32x2x4_t
-{
- float32x2_t val[4];
-} float32x2x4_t;
-
-typedef struct float32x4x4_t
-{
- float32x4_t val[4];
-} float32x4x4_t;
-
-typedef struct float64x2x4_t
-{
- float64x2_t val[4];
-} float64x2x4_t;
-
-typedef struct float64x1x4_t
-{
- float64x1_t val[4];
-} float64x1x4_t;
-
-typedef struct poly8x8x4_t
-{
- poly8x8_t val[4];
-} poly8x8x4_t;
-
-typedef struct poly8x16x4_t
-{
- poly8x16_t val[4];
-} poly8x16x4_t;
-
-typedef struct poly16x4x4_t
-{
- poly16x4_t val[4];
-} poly16x4x4_t;
-
-typedef struct poly16x8x4_t
-{
- poly16x8_t val[4];
-} poly16x8x4_t;
-
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vadd_s8 (int8x8_t __a, int8x8_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vadd_s16 (int16x4_t __a, int16x4_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vadd_s32 (int32x2_t __a, int32x2_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vadd_f32 (float32x2_t __a, float32x2_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vadd_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vadd_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vadd_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vadd_s64 (int64x1_t __a, int64x1_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vadd_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vaddq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vaddq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vaddq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vaddq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vaddq_f32 (float32x4_t __a, float32x4_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vaddq_f64 (float64x2_t __a, float64x2_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vaddq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vaddl_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (int16x8_t) __builtin_aarch64_saddlv8qi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vaddl_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (int32x4_t) __builtin_aarch64_saddlv4hi (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vaddl_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (int64x2_t) __builtin_aarch64_saddlv2si (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vaddl_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_uaddlv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vaddl_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_uaddlv4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vaddl_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_uaddlv2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vaddl_high_s8 (int8x16_t __a, int8x16_t __b)
-{
- return (int16x8_t) __builtin_aarch64_saddl2v16qi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vaddl_high_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (int32x4_t) __builtin_aarch64_saddl2v8hi (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vaddl_high_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (int64x2_t) __builtin_aarch64_saddl2v4si (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vaddl_high_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_uaddl2v16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vaddl_high_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_uaddl2v8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vaddl_high_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_uaddl2v4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vaddw_s8 (int16x8_t __a, int8x8_t __b)
-{
- return (int16x8_t) __builtin_aarch64_saddwv8qi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vaddw_s16 (int32x4_t __a, int16x4_t __b)
-{
- return (int32x4_t) __builtin_aarch64_saddwv4hi (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vaddw_s32 (int64x2_t __a, int32x2_t __b)
-{
- return (int64x2_t) __builtin_aarch64_saddwv2si (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vaddw_u8 (uint16x8_t __a, uint8x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_uaddwv8qi ((int16x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vaddw_u16 (uint32x4_t __a, uint16x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_uaddwv4hi ((int32x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vaddw_u32 (uint64x2_t __a, uint32x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_uaddwv2si ((int64x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vaddw_high_s8 (int16x8_t __a, int8x16_t __b)
-{
- return (int16x8_t) __builtin_aarch64_saddw2v16qi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vaddw_high_s16 (int32x4_t __a, int16x8_t __b)
-{
- return (int32x4_t) __builtin_aarch64_saddw2v8hi (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vaddw_high_s32 (int64x2_t __a, int32x4_t __b)
-{
- return (int64x2_t) __builtin_aarch64_saddw2v4si (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vaddw_high_u8 (uint16x8_t __a, uint8x16_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_uaddw2v16qi ((int16x8_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vaddw_high_u16 (uint32x4_t __a, uint16x8_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_uaddw2v8hi ((int32x4_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vaddw_high_u32 (uint64x2_t __a, uint32x4_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_uaddw2v4si ((int64x2_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vhadd_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (int8x8_t) __builtin_aarch64_shaddv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vhadd_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (int16x4_t) __builtin_aarch64_shaddv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vhadd_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (int32x2_t) __builtin_aarch64_shaddv2si (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vhadd_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_uhaddv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vhadd_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_uhaddv4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vhadd_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_uhaddv2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vhaddq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return (int8x16_t) __builtin_aarch64_shaddv16qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vhaddq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (int16x8_t) __builtin_aarch64_shaddv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vhaddq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (int32x4_t) __builtin_aarch64_shaddv4si (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_uhaddv16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_uhaddv8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_uhaddv4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vrhadd_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (int8x8_t) __builtin_aarch64_srhaddv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vrhadd_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (int16x4_t) __builtin_aarch64_srhaddv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vrhadd_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (int32x2_t) __builtin_aarch64_srhaddv2si (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vrhadd_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_urhaddv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vrhadd_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_urhaddv4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vrhadd_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_urhaddv2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return (int8x16_t) __builtin_aarch64_srhaddv16qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (int16x8_t) __builtin_aarch64_srhaddv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (int32x4_t) __builtin_aarch64_srhaddv4si (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_urhaddv16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_urhaddv8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_urhaddv4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vaddhn_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (int8x8_t) __builtin_aarch64_addhnv8hi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vaddhn_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (int16x4_t) __builtin_aarch64_addhnv4si (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vaddhn_s64 (int64x2_t __a, int64x2_t __b)
-{
- return (int32x2_t) __builtin_aarch64_addhnv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vaddhn_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_addhnv8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vaddhn_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_addhnv4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vaddhn_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_addhnv2di ((int64x2_t) __a,
- (int64x2_t) __b);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vraddhn_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (int8x8_t) __builtin_aarch64_raddhnv8hi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vraddhn_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (int16x4_t) __builtin_aarch64_raddhnv4si (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vraddhn_s64 (int64x2_t __a, int64x2_t __b)
-{
- return (int32x2_t) __builtin_aarch64_raddhnv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vraddhn_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_raddhnv8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vraddhn_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_raddhnv4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vraddhn_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_raddhnv2di ((int64x2_t) __a,
- (int64x2_t) __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vaddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
-{
- return (int8x16_t) __builtin_aarch64_addhn2v8hi (__a, __b, __c);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vaddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
-{
- return (int16x8_t) __builtin_aarch64_addhn2v4si (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vaddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
-{
- return (int32x4_t) __builtin_aarch64_addhn2v2di (__a, __b, __c);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vaddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
-{
- return (uint8x16_t) __builtin_aarch64_addhn2v8hi ((int8x8_t) __a,
- (int16x8_t) __b,
- (int16x8_t) __c);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vaddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
-{
- return (uint16x8_t) __builtin_aarch64_addhn2v4si ((int16x4_t) __a,
- (int32x4_t) __b,
- (int32x4_t) __c);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vaddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
-{
- return (uint32x4_t) __builtin_aarch64_addhn2v2di ((int32x2_t) __a,
- (int64x2_t) __b,
- (int64x2_t) __c);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vraddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
-{
- return (int8x16_t) __builtin_aarch64_raddhn2v8hi (__a, __b, __c);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vraddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
-{
- return (int16x8_t) __builtin_aarch64_raddhn2v4si (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vraddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
-{
- return (int32x4_t) __builtin_aarch64_raddhn2v2di (__a, __b, __c);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vraddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
-{
- return (uint8x16_t) __builtin_aarch64_raddhn2v8hi ((int8x8_t) __a,
- (int16x8_t) __b,
- (int16x8_t) __c);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vraddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
-{
- return (uint16x8_t) __builtin_aarch64_raddhn2v4si ((int16x4_t) __a,
- (int32x4_t) __b,
- (int32x4_t) __c);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vraddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
-{
- return (uint32x4_t) __builtin_aarch64_raddhn2v2di ((int32x2_t) __a,
- (int64x2_t) __b,
- (int64x2_t) __c);
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vdiv_f32 (float32x2_t __a, float32x2_t __b)
-{
- return __a / __b;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vdivq_f32 (float32x4_t __a, float32x4_t __b)
-{
- return __a / __b;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vdivq_f64 (float64x2_t __a, float64x2_t __b)
-{
- return __a / __b;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vmul_s8 (int8x8_t __a, int8x8_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmul_s16 (int16x4_t __a, int16x4_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmul_s32 (int32x2_t __a, int32x2_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vmul_f32 (float32x2_t __a, float32x2_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vmul_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmul_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmul_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vmul_p8 (poly8x8_t __a, poly8x8_t __b)
-{
- return (poly8x8_t) __builtin_aarch64_pmulv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vmulq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmulq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmulq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vmulq_f32 (float32x4_t __a, float32x4_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vmulq_f64 (float64x2_t __a, float64x2_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return __a * __b;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vmulq_p8 (poly8x16_t __a, poly8x16_t __b)
-{
- return (poly8x16_t) __builtin_aarch64_pmulv16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vand_s8 (int8x8_t __a, int8x8_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vand_s16 (int16x4_t __a, int16x4_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vand_s32 (int32x2_t __a, int32x2_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vand_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vand_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vand_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vand_s64 (int64x1_t __a, int64x1_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vand_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vandq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vandq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vandq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vandq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vandq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vandq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vandq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vandq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return __a & __b;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vorr_s8 (int8x8_t __a, int8x8_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vorr_s16 (int16x4_t __a, int16x4_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vorr_s32 (int32x2_t __a, int32x2_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vorr_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vorr_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vorr_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vorr_s64 (int64x1_t __a, int64x1_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vorr_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vorrq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vorrq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vorrq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vorrq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vorrq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vorrq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vorrq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vorrq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return __a | __b;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-veor_s8 (int8x8_t __a, int8x8_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-veor_s16 (int16x4_t __a, int16x4_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-veor_s32 (int32x2_t __a, int32x2_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-veor_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-veor_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-veor_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-veor_s64 (int64x1_t __a, int64x1_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-veor_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-veorq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-veorq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-veorq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-veorq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-veorq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-veorq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-veorq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-veorq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return __a ^ __b;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vbic_s8 (int8x8_t __a, int8x8_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vbic_s16 (int16x4_t __a, int16x4_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vbic_s32 (int32x2_t __a, int32x2_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vbic_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vbic_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vbic_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vbic_s64 (int64x1_t __a, int64x1_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vbic_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vbicq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vbicq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vbicq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vbicq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vbicq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return __a & ~__b;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vorn_s8 (int8x8_t __a, int8x8_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vorn_s16 (int16x4_t __a, int16x4_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vorn_s32 (int32x2_t __a, int32x2_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vorn_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vorn_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vorn_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vorn_s64 (int64x1_t __a, int64x1_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vorn_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vornq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vornq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vornq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vornq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vornq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vornq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vornq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vornq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return __a | ~__b;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vsub_s8 (int8x8_t __a, int8x8_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vsub_s16 (int16x4_t __a, int16x4_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vsub_s32 (int32x2_t __a, int32x2_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vsub_f32 (float32x2_t __a, float32x2_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vsub_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vsub_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vsub_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vsub_s64 (int64x1_t __a, int64x1_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vsub_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vsubq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vsubq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vsubq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vsubq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vsubq_f32 (float32x4_t __a, float32x4_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vsubq_f64 (float64x2_t __a, float64x2_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vsubq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vsubl_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (int16x8_t) __builtin_aarch64_ssublv8qi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vsubl_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (int32x4_t) __builtin_aarch64_ssublv4hi (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vsubl_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (int64x2_t) __builtin_aarch64_ssublv2si (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vsubl_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_usublv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vsubl_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_usublv4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vsubl_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_usublv2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vsubl_high_s8 (int8x16_t __a, int8x16_t __b)
-{
- return (int16x8_t) __builtin_aarch64_ssubl2v16qi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vsubl_high_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (int32x4_t) __builtin_aarch64_ssubl2v8hi (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vsubl_high_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (int64x2_t) __builtin_aarch64_ssubl2v4si (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vsubl_high_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_usubl2v16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vsubl_high_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_usubl2v8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vsubl_high_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_usubl2v4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vsubw_s8 (int16x8_t __a, int8x8_t __b)
-{
- return (int16x8_t) __builtin_aarch64_ssubwv8qi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vsubw_s16 (int32x4_t __a, int16x4_t __b)
-{
- return (int32x4_t) __builtin_aarch64_ssubwv4hi (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vsubw_s32 (int64x2_t __a, int32x2_t __b)
-{
- return (int64x2_t) __builtin_aarch64_ssubwv2si (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vsubw_u8 (uint16x8_t __a, uint8x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_usubwv8qi ((int16x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vsubw_u16 (uint32x4_t __a, uint16x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_usubwv4hi ((int32x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vsubw_u32 (uint64x2_t __a, uint32x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_usubwv2si ((int64x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vsubw_high_s8 (int16x8_t __a, int8x16_t __b)
-{
- return (int16x8_t) __builtin_aarch64_ssubw2v16qi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vsubw_high_s16 (int32x4_t __a, int16x8_t __b)
-{
- return (int32x4_t) __builtin_aarch64_ssubw2v8hi (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vsubw_high_s32 (int64x2_t __a, int32x4_t __b)
-{
- return (int64x2_t) __builtin_aarch64_ssubw2v4si (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vsubw_high_u8 (uint16x8_t __a, uint8x16_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_usubw2v16qi ((int16x8_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vsubw_high_u16 (uint32x4_t __a, uint16x8_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_usubw2v8hi ((int32x4_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vsubw_high_u32 (uint64x2_t __a, uint32x4_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_usubw2v4si ((int64x2_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqadd_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (int8x8_t) __builtin_aarch64_sqaddv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqadd_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (int16x4_t) __builtin_aarch64_sqaddv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqadd_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (int32x2_t) __builtin_aarch64_sqaddv2si (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqadd_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (int64x1_t) __builtin_aarch64_sqadddi (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqadd_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_uqaddv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vqadd_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_uqaddv4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vqadd_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_uqaddv2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vqadd_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_uqadddi ((int64x1_t) __a,
- (int64x1_t) __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqaddq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return (int8x16_t) __builtin_aarch64_sqaddv16qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqaddq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (int16x8_t) __builtin_aarch64_sqaddv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqaddq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (int32x4_t) __builtin_aarch64_sqaddv4si (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqaddq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return (int64x2_t) __builtin_aarch64_sqaddv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_uqaddv16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_uqaddv8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_uqaddv4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vqaddq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_uqaddv2di ((int64x2_t) __a,
- (int64x2_t) __b);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqsub_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (int8x8_t) __builtin_aarch64_sqsubv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqsub_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (int16x4_t) __builtin_aarch64_sqsubv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqsub_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (int32x2_t) __builtin_aarch64_sqsubv2si (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqsub_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (int64x1_t) __builtin_aarch64_sqsubdi (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqsub_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_uqsubv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vqsub_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_uqsubv4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vqsub_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_uqsubv2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vqsub_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_uqsubdi ((int64x1_t) __a,
- (int64x1_t) __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqsubq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return (int8x16_t) __builtin_aarch64_sqsubv16qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqsubq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (int16x8_t) __builtin_aarch64_sqsubv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqsubq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (int32x4_t) __builtin_aarch64_sqsubv4si (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqsubq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return (int64x2_t) __builtin_aarch64_sqsubv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_uqsubv16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_uqsubv8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_uqsubv4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vqsubq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_uqsubv2di ((int64x2_t) __a,
- (int64x2_t) __b);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqneg_s8 (int8x8_t __a)
-{
- return (int8x8_t) __builtin_aarch64_sqnegv8qi (__a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqneg_s16 (int16x4_t __a)
-{
- return (int16x4_t) __builtin_aarch64_sqnegv4hi (__a);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqneg_s32 (int32x2_t __a)
-{
- return (int32x2_t) __builtin_aarch64_sqnegv2si (__a);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqnegq_s8 (int8x16_t __a)
-{
- return (int8x16_t) __builtin_aarch64_sqnegv16qi (__a);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqnegq_s16 (int16x8_t __a)
-{
- return (int16x8_t) __builtin_aarch64_sqnegv8hi (__a);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqnegq_s32 (int32x4_t __a)
-{
- return (int32x4_t) __builtin_aarch64_sqnegv4si (__a);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqabs_s8 (int8x8_t __a)
-{
- return (int8x8_t) __builtin_aarch64_sqabsv8qi (__a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqabs_s16 (int16x4_t __a)
-{
- return (int16x4_t) __builtin_aarch64_sqabsv4hi (__a);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqabs_s32 (int32x2_t __a)
-{
- return (int32x2_t) __builtin_aarch64_sqabsv2si (__a);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqabsq_s8 (int8x16_t __a)
-{
- return (int8x16_t) __builtin_aarch64_sqabsv16qi (__a);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqabsq_s16 (int16x8_t __a)
-{
- return (int16x8_t) __builtin_aarch64_sqabsv8hi (__a);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqabsq_s32 (int32x4_t __a)
-{
- return (int32x4_t) __builtin_aarch64_sqabsv4si (__a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqdmulh_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (int16x4_t) __builtin_aarch64_sqdmulhv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqdmulh_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (int32x2_t) __builtin_aarch64_sqdmulhv2si (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (int16x8_t) __builtin_aarch64_sqdmulhv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (int32x4_t) __builtin_aarch64_sqdmulhv4si (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqrdmulh_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (int16x4_t) __builtin_aarch64_sqrdmulhv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqrdmulh_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (int32x2_t) __builtin_aarch64_sqrdmulhv2si (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (int16x8_t) __builtin_aarch64_sqrdmulhv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (int32x4_t) __builtin_aarch64_sqrdmulhv4si (__a, __b);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vcreate_s8 (uint64_t __a)
-{
- return (int8x8_t) __a;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vcreate_s16 (uint64_t __a)
-{
- return (int16x4_t) __a;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vcreate_s32 (uint64_t __a)
-{
- return (int32x2_t) __a;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vcreate_s64 (uint64_t __a)
-{
- return (int64x1_t) __a;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vcreate_f32 (uint64_t __a)
-{
- return (float32x2_t) __a;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vcreate_u8 (uint64_t __a)
-{
- return (uint8x8_t) __a;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vcreate_u16 (uint64_t __a)
-{
- return (uint16x4_t) __a;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcreate_u32 (uint64_t __a)
-{
- return (uint32x2_t) __a;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcreate_u64 (uint64_t __a)
-{
- return (uint64x1_t) __a;
-}
-
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
-vcreate_f64 (uint64_t __a)
-{
- return (float64x1_t) __builtin_aarch64_createdf (__a);
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vcreate_p8 (uint64_t __a)
-{
- return (poly8x8_t) __a;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vcreate_p16 (uint64_t __a)
-{
- return (poly16x4_t) __a;
-}
-
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
-vget_lane_s8 (int8x8_t __a, const int __b)
-{
- return (int8_t) __builtin_aarch64_get_lane_signedv8qi (__a, __b);
-}
-
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
-vget_lane_s16 (int16x4_t __a, const int __b)
-{
- return (int16_t) __builtin_aarch64_get_lane_signedv4hi (__a, __b);
-}
-
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
-vget_lane_s32 (int32x2_t __a, const int __b)
-{
- return (int32_t) __builtin_aarch64_get_lane_signedv2si (__a, __b);
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vget_lane_f32 (float32x2_t __a, const int __b)
-{
- return (float32_t) __builtin_aarch64_get_lanev2sf (__a, __b);
-}
-
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
-vget_lane_u8 (uint8x8_t __a, const int __b)
-{
- return (uint8_t) __builtin_aarch64_get_lane_unsignedv8qi ((int8x8_t) __a,
- __b);
-}
-
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
-vget_lane_u16 (uint16x4_t __a, const int __b)
-{
- return (uint16_t) __builtin_aarch64_get_lane_unsignedv4hi ((int16x4_t) __a,
- __b);
-}
-
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
-vget_lane_u32 (uint32x2_t __a, const int __b)
-{
- return (uint32_t) __builtin_aarch64_get_lane_unsignedv2si ((int32x2_t) __a,
- __b);
-}
-
-__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
-vget_lane_p8 (poly8x8_t __a, const int __b)
-{
- return (poly8_t) __builtin_aarch64_get_lane_unsignedv8qi ((int8x8_t) __a,
- __b);
-}
-
-__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
-vget_lane_p16 (poly16x4_t __a, const int __b)
-{
- return (poly16_t) __builtin_aarch64_get_lane_unsignedv4hi ((int16x4_t) __a,
- __b);
-}
-
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
-vget_lane_s64 (int64x1_t __a, const int __b)
-{
- return (int64_t) __builtin_aarch64_get_lanedi (__a, __b);
-}
-
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
-vget_lane_u64 (uint64x1_t __a, const int __b)
-{
- return (uint64_t) __builtin_aarch64_get_lanedi ((int64x1_t) __a, __b);
-}
-
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
-vgetq_lane_s8 (int8x16_t __a, const int __b)
-{
- return (int8_t) __builtin_aarch64_get_lane_signedv16qi (__a, __b);
-}
-
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
-vgetq_lane_s16 (int16x8_t __a, const int __b)
-{
- return (int16_t) __builtin_aarch64_get_lane_signedv8hi (__a, __b);
-}
-
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
-vgetq_lane_s32 (int32x4_t __a, const int __b)
-{
- return (int32_t) __builtin_aarch64_get_lane_signedv4si (__a, __b);
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vgetq_lane_f32 (float32x4_t __a, const int __b)
-{
- return (float32_t) __builtin_aarch64_get_lanev4sf (__a, __b);
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vgetq_lane_f64 (float64x2_t __a, const int __b)
-{
- return (float64_t) __builtin_aarch64_get_lanev2df (__a, __b);
-}
-
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
-vgetq_lane_u8 (uint8x16_t __a, const int __b)
-{
- return (uint8_t) __builtin_aarch64_get_lane_unsignedv16qi ((int8x16_t) __a,
- __b);
-}
-
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
-vgetq_lane_u16 (uint16x8_t __a, const int __b)
-{
- return (uint16_t) __builtin_aarch64_get_lane_unsignedv8hi ((int16x8_t) __a,
- __b);
-}
-
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
-vgetq_lane_u32 (uint32x4_t __a, const int __b)
-{
- return (uint32_t) __builtin_aarch64_get_lane_unsignedv4si ((int32x4_t) __a,
- __b);
-}
-
-__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
-vgetq_lane_p8 (poly8x16_t __a, const int __b)
-{
- return (poly8_t) __builtin_aarch64_get_lane_unsignedv16qi ((int8x16_t) __a,
- __b);
-}
-
-__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
-vgetq_lane_p16 (poly16x8_t __a, const int __b)
-{
- return (poly16_t) __builtin_aarch64_get_lane_unsignedv8hi ((int16x8_t) __a,
- __b);
-}
-
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
-vgetq_lane_s64 (int64x2_t __a, const int __b)
-{
- return __builtin_aarch64_get_lane_unsignedv2di (__a, __b);
-}
-
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
-vgetq_lane_u64 (uint64x2_t __a, const int __b)
-{
- return (uint64_t) __builtin_aarch64_get_lane_unsignedv2di ((int64x2_t) __a,
- __b);
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_s8 (int8x8_t __a)
-{
- return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv8qi (__a);
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_s16 (int16x4_t __a)
-{
- return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv4hi (__a);
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_s32 (int32x2_t __a)
-{
- return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv2si (__a);
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_s64 (int64x1_t __a)
-{
- return (poly8x8_t) __builtin_aarch64_reinterpretv8qidi (__a);
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_f32 (float32x2_t __a)
-{
- return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv2sf (__a);
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_u8 (uint8x8_t __a)
-{
- return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_u16 (uint16x4_t __a)
-{
- return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_u32 (uint32x2_t __a)
-{
- return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv2si ((int32x2_t) __a);
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_u64 (uint64x1_t __a)
-{
- return (poly8x8_t) __builtin_aarch64_reinterpretv8qidi ((int64x1_t) __a);
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vreinterpret_p8_p16 (poly16x4_t __a)
-{
- return (poly8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_s8 (int8x16_t __a)
-{
- return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv16qi (__a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_s16 (int16x8_t __a)
-{
- return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv8hi (__a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_s32 (int32x4_t __a)
-{
- return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv4si (__a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_s64 (int64x2_t __a)
-{
- return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv2di (__a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_f32 (float32x4_t __a)
-{
- return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv4sf (__a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_u8 (uint8x16_t __a)
-{
- return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
- __a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_u16 (uint16x8_t __a)
-{
- return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
- __a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_u32 (uint32x4_t __a)
-{
- return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv4si ((int32x4_t)
- __a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_u64 (uint64x2_t __a)
-{
- return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv2di ((int64x2_t)
- __a);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_p8_p16 (poly16x8_t __a)
-{
- return (poly8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
- __a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_s8 (int8x8_t __a)
-{
- return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv8qi (__a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_s16 (int16x4_t __a)
-{
- return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv4hi (__a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_s32 (int32x2_t __a)
-{
- return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv2si (__a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_s64 (int64x1_t __a)
-{
- return (poly16x4_t) __builtin_aarch64_reinterpretv4hidi (__a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_f32 (float32x2_t __a)
-{
- return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv2sf (__a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_u8 (uint8x8_t __a)
-{
- return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_u16 (uint16x4_t __a)
-{
- return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_u32 (uint32x2_t __a)
-{
- return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv2si ((int32x2_t) __a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_u64 (uint64x1_t __a)
-{
- return (poly16x4_t) __builtin_aarch64_reinterpretv4hidi ((int64x1_t) __a);
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vreinterpret_p16_p8 (poly8x8_t __a)
-{
- return (poly16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_s8 (int8x16_t __a)
-{
- return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv16qi (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_s16 (int16x8_t __a)
-{
- return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv8hi (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_s32 (int32x4_t __a)
-{
- return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv4si (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_s64 (int64x2_t __a)
-{
- return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv2di (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_f32 (float32x4_t __a)
-{
- return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv4sf (__a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_u8 (uint8x16_t __a)
-{
- return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
- __a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_u16 (uint16x8_t __a)
-{
- return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_u32 (uint32x4_t __a)
-{
- return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv4si ((int32x4_t) __a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_u64 (uint64x2_t __a)
-{
- return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv2di ((int64x2_t) __a);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_p16_p8 (poly8x16_t __a)
-{
- return (poly16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
- __a);
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_s8 (int8x8_t __a)
-{
- return (float32x2_t) __builtin_aarch64_reinterpretv2sfv8qi (__a);
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_s16 (int16x4_t __a)
-{
- return (float32x2_t) __builtin_aarch64_reinterpretv2sfv4hi (__a);
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_s32 (int32x2_t __a)
-{
- return (float32x2_t) __builtin_aarch64_reinterpretv2sfv2si (__a);
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_s64 (int64x1_t __a)
-{
- return (float32x2_t) __builtin_aarch64_reinterpretv2sfdi (__a);
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_u8 (uint8x8_t __a)
-{
- return (float32x2_t) __builtin_aarch64_reinterpretv2sfv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_u16 (uint16x4_t __a)
-{
- return (float32x2_t) __builtin_aarch64_reinterpretv2sfv4hi ((int16x4_t)
- __a);
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_u32 (uint32x2_t __a)
-{
- return (float32x2_t) __builtin_aarch64_reinterpretv2sfv2si ((int32x2_t)
- __a);
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_u64 (uint64x1_t __a)
-{
- return (float32x2_t) __builtin_aarch64_reinterpretv2sfdi ((int64x1_t) __a);
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_p8 (poly8x8_t __a)
-{
- return (float32x2_t) __builtin_aarch64_reinterpretv2sfv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vreinterpret_f32_p16 (poly16x4_t __a)
-{
- return (float32x2_t) __builtin_aarch64_reinterpretv2sfv4hi ((int16x4_t)
- __a);
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_s8 (int8x16_t __a)
-{
- return (float32x4_t) __builtin_aarch64_reinterpretv4sfv16qi (__a);
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_s16 (int16x8_t __a)
-{
- return (float32x4_t) __builtin_aarch64_reinterpretv4sfv8hi (__a);
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_s32 (int32x4_t __a)
-{
- return (float32x4_t) __builtin_aarch64_reinterpretv4sfv4si (__a);
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_s64 (int64x2_t __a)
-{
- return (float32x4_t) __builtin_aarch64_reinterpretv4sfv2di (__a);
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_u8 (uint8x16_t __a)
-{
- return (float32x4_t) __builtin_aarch64_reinterpretv4sfv16qi ((int8x16_t)
- __a);
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_u16 (uint16x8_t __a)
-{
- return (float32x4_t) __builtin_aarch64_reinterpretv4sfv8hi ((int16x8_t)
- __a);
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_u32 (uint32x4_t __a)
-{
- return (float32x4_t) __builtin_aarch64_reinterpretv4sfv4si ((int32x4_t)
- __a);
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_u64 (uint64x2_t __a)
-{
- return (float32x4_t) __builtin_aarch64_reinterpretv4sfv2di ((int64x2_t)
- __a);
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_p8 (poly8x16_t __a)
-{
- return (float32x4_t) __builtin_aarch64_reinterpretv4sfv16qi ((int8x16_t)
- __a);
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_f32_p16 (poly16x8_t __a)
-{
- return (float32x4_t) __builtin_aarch64_reinterpretv4sfv8hi ((int16x8_t)
- __a);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_s8 (int8x8_t __a)
-{
- return (int64x1_t) __builtin_aarch64_reinterpretdiv8qi (__a);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_s16 (int16x4_t __a)
-{
- return (int64x1_t) __builtin_aarch64_reinterpretdiv4hi (__a);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_s32 (int32x2_t __a)
-{
- return (int64x1_t) __builtin_aarch64_reinterpretdiv2si (__a);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_f32 (float32x2_t __a)
-{
- return (int64x1_t) __builtin_aarch64_reinterpretdiv2sf (__a);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_u8 (uint8x8_t __a)
-{
- return (int64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_u16 (uint16x4_t __a)
-{
- return (int64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_u32 (uint32x2_t __a)
-{
- return (int64x1_t) __builtin_aarch64_reinterpretdiv2si ((int32x2_t) __a);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_u64 (uint64x1_t __a)
-{
- return (int64x1_t) __builtin_aarch64_reinterpretdidi ((int64x1_t) __a);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_p8 (poly8x8_t __a)
-{
- return (int64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vreinterpret_s64_p16 (poly16x4_t __a)
-{
- return (int64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_s8 (int8x16_t __a)
-{
- return (int64x2_t) __builtin_aarch64_reinterpretv2div16qi (__a);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_s16 (int16x8_t __a)
-{
- return (int64x2_t) __builtin_aarch64_reinterpretv2div8hi (__a);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_s32 (int32x4_t __a)
-{
- return (int64x2_t) __builtin_aarch64_reinterpretv2div4si (__a);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_f32 (float32x4_t __a)
-{
- return (int64x2_t) __builtin_aarch64_reinterpretv2div4sf (__a);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_u8 (uint8x16_t __a)
-{
- return (int64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t) __a);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_u16 (uint16x8_t __a)
-{
- return (int64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_u32 (uint32x4_t __a)
-{
- return (int64x2_t) __builtin_aarch64_reinterpretv2div4si ((int32x4_t) __a);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_u64 (uint64x2_t __a)
-{
- return (int64x2_t) __builtin_aarch64_reinterpretv2div2di ((int64x2_t) __a);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_p8 (poly8x16_t __a)
-{
- return (int64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t) __a);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_s64_p16 (poly16x8_t __a)
-{
- return (int64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_s8 (int8x8_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_reinterpretdiv8qi (__a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_s16 (int16x4_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_reinterpretdiv4hi (__a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_s32 (int32x2_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_reinterpretdiv2si (__a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_s64 (int64x1_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_reinterpretdidi (__a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_f32 (float32x2_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_reinterpretdiv2sf (__a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_u8 (uint8x8_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_u16 (uint16x4_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_u32 (uint32x2_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_reinterpretdiv2si ((int32x2_t) __a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_p8 (poly8x8_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_reinterpretdiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vreinterpret_u64_p16 (poly16x4_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_reinterpretdiv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_s8 (int8x16_t __a)
-{
- return (uint64x2_t) __builtin_aarch64_reinterpretv2div16qi (__a);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_s16 (int16x8_t __a)
-{
- return (uint64x2_t) __builtin_aarch64_reinterpretv2div8hi (__a);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_s32 (int32x4_t __a)
-{
- return (uint64x2_t) __builtin_aarch64_reinterpretv2div4si (__a);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_s64 (int64x2_t __a)
-{
- return (uint64x2_t) __builtin_aarch64_reinterpretv2div2di (__a);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_f32 (float32x4_t __a)
-{
- return (uint64x2_t) __builtin_aarch64_reinterpretv2div4sf (__a);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_u8 (uint8x16_t __a)
-{
- return (uint64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t)
- __a);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_u16 (uint16x8_t __a)
-{
- return (uint64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_u32 (uint32x4_t __a)
-{
- return (uint64x2_t) __builtin_aarch64_reinterpretv2div4si ((int32x4_t) __a);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_p8 (poly8x16_t __a)
-{
- return (uint64x2_t) __builtin_aarch64_reinterpretv2div16qi ((int8x16_t)
- __a);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vreinterpretq_u64_p16 (poly16x8_t __a)
-{
- return (uint64x2_t) __builtin_aarch64_reinterpretv2div8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_s16 (int16x4_t __a)
-{
- return (int8x8_t) __builtin_aarch64_reinterpretv8qiv4hi (__a);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_s32 (int32x2_t __a)
-{
- return (int8x8_t) __builtin_aarch64_reinterpretv8qiv2si (__a);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_s64 (int64x1_t __a)
-{
- return (int8x8_t) __builtin_aarch64_reinterpretv8qidi (__a);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_f32 (float32x2_t __a)
-{
- return (int8x8_t) __builtin_aarch64_reinterpretv8qiv2sf (__a);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_u8 (uint8x8_t __a)
-{
- return (int8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_u16 (uint16x4_t __a)
-{
- return (int8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_u32 (uint32x2_t __a)
-{
- return (int8x8_t) __builtin_aarch64_reinterpretv8qiv2si ((int32x2_t) __a);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_u64 (uint64x1_t __a)
-{
- return (int8x8_t) __builtin_aarch64_reinterpretv8qidi ((int64x1_t) __a);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_p8 (poly8x8_t __a)
-{
- return (int8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vreinterpret_s8_p16 (poly16x4_t __a)
-{
- return (int8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_s16 (int16x8_t __a)
-{
- return (int8x16_t) __builtin_aarch64_reinterpretv16qiv8hi (__a);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_s32 (int32x4_t __a)
-{
- return (int8x16_t) __builtin_aarch64_reinterpretv16qiv4si (__a);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_s64 (int64x2_t __a)
-{
- return (int8x16_t) __builtin_aarch64_reinterpretv16qiv2di (__a);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_f32 (float32x4_t __a)
-{
- return (int8x16_t) __builtin_aarch64_reinterpretv16qiv4sf (__a);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_u8 (uint8x16_t __a)
-{
- return (int8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
- __a);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_u16 (uint16x8_t __a)
-{
- return (int8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_u32 (uint32x4_t __a)
-{
- return (int8x16_t) __builtin_aarch64_reinterpretv16qiv4si ((int32x4_t) __a);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_u64 (uint64x2_t __a)
-{
- return (int8x16_t) __builtin_aarch64_reinterpretv16qiv2di ((int64x2_t) __a);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_p8 (poly8x16_t __a)
-{
- return (int8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
- __a);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_s8_p16 (poly16x8_t __a)
-{
- return (int8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_s8 (int8x8_t __a)
-{
- return (int16x4_t) __builtin_aarch64_reinterpretv4hiv8qi (__a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_s32 (int32x2_t __a)
-{
- return (int16x4_t) __builtin_aarch64_reinterpretv4hiv2si (__a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_s64 (int64x1_t __a)
-{
- return (int16x4_t) __builtin_aarch64_reinterpretv4hidi (__a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_f32 (float32x2_t __a)
-{
- return (int16x4_t) __builtin_aarch64_reinterpretv4hiv2sf (__a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_u8 (uint8x8_t __a)
-{
- return (int16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_u16 (uint16x4_t __a)
-{
- return (int16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_u32 (uint32x2_t __a)
-{
- return (int16x4_t) __builtin_aarch64_reinterpretv4hiv2si ((int32x2_t) __a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_u64 (uint64x1_t __a)
-{
- return (int16x4_t) __builtin_aarch64_reinterpretv4hidi ((int64x1_t) __a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_p8 (poly8x8_t __a)
-{
- return (int16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vreinterpret_s16_p16 (poly16x4_t __a)
-{
- return (int16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_s8 (int8x16_t __a)
-{
- return (int16x8_t) __builtin_aarch64_reinterpretv8hiv16qi (__a);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_s32 (int32x4_t __a)
-{
- return (int16x8_t) __builtin_aarch64_reinterpretv8hiv4si (__a);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_s64 (int64x2_t __a)
-{
- return (int16x8_t) __builtin_aarch64_reinterpretv8hiv2di (__a);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_f32 (float32x4_t __a)
-{
- return (int16x8_t) __builtin_aarch64_reinterpretv8hiv4sf (__a);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_u8 (uint8x16_t __a)
-{
- return (int16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t) __a);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_u16 (uint16x8_t __a)
-{
- return (int16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_u32 (uint32x4_t __a)
-{
- return (int16x8_t) __builtin_aarch64_reinterpretv8hiv4si ((int32x4_t) __a);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_u64 (uint64x2_t __a)
-{
- return (int16x8_t) __builtin_aarch64_reinterpretv8hiv2di ((int64x2_t) __a);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_p8 (poly8x16_t __a)
-{
- return (int16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t) __a);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_s16_p16 (poly16x8_t __a)
-{
- return (int16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_s8 (int8x8_t __a)
-{
- return (int32x2_t) __builtin_aarch64_reinterpretv2siv8qi (__a);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_s16 (int16x4_t __a)
-{
- return (int32x2_t) __builtin_aarch64_reinterpretv2siv4hi (__a);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_s64 (int64x1_t __a)
-{
- return (int32x2_t) __builtin_aarch64_reinterpretv2sidi (__a);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_f32 (float32x2_t __a)
-{
- return (int32x2_t) __builtin_aarch64_reinterpretv2siv2sf (__a);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_u8 (uint8x8_t __a)
-{
- return (int32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_u16 (uint16x4_t __a)
-{
- return (int32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_u32 (uint32x2_t __a)
-{
- return (int32x2_t) __builtin_aarch64_reinterpretv2siv2si ((int32x2_t) __a);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_u64 (uint64x1_t __a)
-{
- return (int32x2_t) __builtin_aarch64_reinterpretv2sidi ((int64x1_t) __a);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_p8 (poly8x8_t __a)
-{
- return (int32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vreinterpret_s32_p16 (poly16x4_t __a)
-{
- return (int32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_s8 (int8x16_t __a)
-{
- return (int32x4_t) __builtin_aarch64_reinterpretv4siv16qi (__a);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_s16 (int16x8_t __a)
-{
- return (int32x4_t) __builtin_aarch64_reinterpretv4siv8hi (__a);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_s64 (int64x2_t __a)
-{
- return (int32x4_t) __builtin_aarch64_reinterpretv4siv2di (__a);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_f32 (float32x4_t __a)
-{
- return (int32x4_t) __builtin_aarch64_reinterpretv4siv4sf (__a);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_u8 (uint8x16_t __a)
-{
- return (int32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t) __a);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_u16 (uint16x8_t __a)
-{
- return (int32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_u32 (uint32x4_t __a)
-{
- return (int32x4_t) __builtin_aarch64_reinterpretv4siv4si ((int32x4_t) __a);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_u64 (uint64x2_t __a)
-{
- return (int32x4_t) __builtin_aarch64_reinterpretv4siv2di ((int64x2_t) __a);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_p8 (poly8x16_t __a)
-{
- return (int32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t) __a);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_s32_p16 (poly16x8_t __a)
-{
- return (int32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_s8 (int8x8_t __a)
-{
- return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv8qi (__a);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_s16 (int16x4_t __a)
-{
- return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv4hi (__a);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_s32 (int32x2_t __a)
-{
- return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv2si (__a);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_s64 (int64x1_t __a)
-{
- return (uint8x8_t) __builtin_aarch64_reinterpretv8qidi (__a);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_f32 (float32x2_t __a)
-{
- return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv2sf (__a);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_u16 (uint16x4_t __a)
-{
- return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_u32 (uint32x2_t __a)
-{
- return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv2si ((int32x2_t) __a);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_u64 (uint64x1_t __a)
-{
- return (uint8x8_t) __builtin_aarch64_reinterpretv8qidi ((int64x1_t) __a);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_p8 (poly8x8_t __a)
-{
- return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vreinterpret_u8_p16 (poly16x4_t __a)
-{
- return (uint8x8_t) __builtin_aarch64_reinterpretv8qiv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_s8 (int8x16_t __a)
-{
- return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv16qi (__a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_s16 (int16x8_t __a)
-{
- return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv8hi (__a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_s32 (int32x4_t __a)
-{
- return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv4si (__a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_s64 (int64x2_t __a)
-{
- return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv2di (__a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_f32 (float32x4_t __a)
-{
- return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv4sf (__a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_u16 (uint16x8_t __a)
-{
- return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
- __a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_u32 (uint32x4_t __a)
-{
- return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv4si ((int32x4_t)
- __a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_u64 (uint64x2_t __a)
-{
- return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv2di ((int64x2_t)
- __a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_p8 (poly8x16_t __a)
-{
- return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv16qi ((int8x16_t)
- __a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vreinterpretq_u8_p16 (poly16x8_t __a)
-{
- return (uint8x16_t) __builtin_aarch64_reinterpretv16qiv8hi ((int16x8_t)
- __a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_s8 (int8x8_t __a)
-{
- return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv8qi (__a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_s16 (int16x4_t __a)
-{
- return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv4hi (__a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_s32 (int32x2_t __a)
-{
- return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv2si (__a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_s64 (int64x1_t __a)
-{
- return (uint16x4_t) __builtin_aarch64_reinterpretv4hidi (__a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_f32 (float32x2_t __a)
-{
- return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv2sf (__a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_u8 (uint8x8_t __a)
-{
- return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_u32 (uint32x2_t __a)
-{
- return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv2si ((int32x2_t) __a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_u64 (uint64x1_t __a)
-{
- return (uint16x4_t) __builtin_aarch64_reinterpretv4hidi ((int64x1_t) __a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_p8 (poly8x8_t __a)
-{
- return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vreinterpret_u16_p16 (poly16x4_t __a)
-{
- return (uint16x4_t) __builtin_aarch64_reinterpretv4hiv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_s8 (int8x16_t __a)
-{
- return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv16qi (__a);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_s16 (int16x8_t __a)
-{
- return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv8hi (__a);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_s32 (int32x4_t __a)
-{
- return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv4si (__a);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_s64 (int64x2_t __a)
-{
- return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv2di (__a);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_f32 (float32x4_t __a)
-{
- return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv4sf (__a);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_u8 (uint8x16_t __a)
-{
- return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
- __a);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_u32 (uint32x4_t __a)
-{
- return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv4si ((int32x4_t) __a);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_u64 (uint64x2_t __a)
-{
- return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv2di ((int64x2_t) __a);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_p8 (poly8x16_t __a)
-{
- return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv16qi ((int8x16_t)
- __a);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vreinterpretq_u16_p16 (poly16x8_t __a)
-{
- return (uint16x8_t) __builtin_aarch64_reinterpretv8hiv8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_s8 (int8x8_t __a)
-{
- return (uint32x2_t) __builtin_aarch64_reinterpretv2siv8qi (__a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_s16 (int16x4_t __a)
-{
- return (uint32x2_t) __builtin_aarch64_reinterpretv2siv4hi (__a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_s32 (int32x2_t __a)
-{
- return (uint32x2_t) __builtin_aarch64_reinterpretv2siv2si (__a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_s64 (int64x1_t __a)
-{
- return (uint32x2_t) __builtin_aarch64_reinterpretv2sidi (__a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_f32 (float32x2_t __a)
-{
- return (uint32x2_t) __builtin_aarch64_reinterpretv2siv2sf (__a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_u8 (uint8x8_t __a)
-{
- return (uint32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_u16 (uint16x4_t __a)
-{
- return (uint32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_u64 (uint64x1_t __a)
-{
- return (uint32x2_t) __builtin_aarch64_reinterpretv2sidi ((int64x1_t) __a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_p8 (poly8x8_t __a)
-{
- return (uint32x2_t) __builtin_aarch64_reinterpretv2siv8qi ((int8x8_t) __a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vreinterpret_u32_p16 (poly16x4_t __a)
-{
- return (uint32x2_t) __builtin_aarch64_reinterpretv2siv4hi ((int16x4_t) __a);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_s8 (int8x16_t __a)
-{
- return (uint32x4_t) __builtin_aarch64_reinterpretv4siv16qi (__a);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_s16 (int16x8_t __a)
-{
- return (uint32x4_t) __builtin_aarch64_reinterpretv4siv8hi (__a);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_s32 (int32x4_t __a)
-{
- return (uint32x4_t) __builtin_aarch64_reinterpretv4siv4si (__a);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_s64 (int64x2_t __a)
-{
- return (uint32x4_t) __builtin_aarch64_reinterpretv4siv2di (__a);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_f32 (float32x4_t __a)
-{
- return (uint32x4_t) __builtin_aarch64_reinterpretv4siv4sf (__a);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_u8 (uint8x16_t __a)
-{
- return (uint32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t)
- __a);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_u16 (uint16x8_t __a)
-{
- return (uint32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_u64 (uint64x2_t __a)
-{
- return (uint32x4_t) __builtin_aarch64_reinterpretv4siv2di ((int64x2_t) __a);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_p8 (poly8x16_t __a)
-{
- return (uint32x4_t) __builtin_aarch64_reinterpretv4siv16qi ((int8x16_t)
- __a);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vreinterpretq_u32_p16 (poly16x8_t __a)
-{
- return (uint32x4_t) __builtin_aarch64_reinterpretv4siv8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vcombine_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (int8x16_t) __builtin_aarch64_combinev8qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vcombine_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (int16x8_t) __builtin_aarch64_combinev4hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vcombine_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (int32x4_t) __builtin_aarch64_combinev2si (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vcombine_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (int64x2_t) __builtin_aarch64_combinedi (__a, __b);
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vcombine_f32 (float32x2_t __a, float32x2_t __b)
-{
- return (float32x4_t) __builtin_aarch64_combinev2sf (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vcombine_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vcombine_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcombine_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_combinev2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcombine_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_combinedi ((int64x1_t) __a,
- (int64x1_t) __b);
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vcombine_f64 (float64x1_t __a, float64x1_t __b)
-{
- return (float64x2_t) __builtin_aarch64_combinedf (__a, __b);
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vcombine_p8 (poly8x8_t __a, poly8x8_t __b)
-{
- return (poly8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vcombine_p16 (poly16x4_t __a, poly16x4_t __b)
-{
- return (poly16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-/* Start of temporary inline asm implementations. */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vaba_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
-{
- int8x8_t result;
- __asm__ ("saba %0.8b,%2.8b,%3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vaba_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
-{
- int16x4_t result;
- __asm__ ("saba %0.4h,%2.4h,%3.4h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vaba_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
-{
- int32x2_t result;
- __asm__ ("saba %0.2s,%2.2s,%3.2s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vaba_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
-{
- uint8x8_t result;
- __asm__ ("uaba %0.8b,%2.8b,%3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vaba_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
-{
- uint16x4_t result;
- __asm__ ("uaba %0.4h,%2.4h,%3.4h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vaba_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
-{
- uint32x2_t result;
- __asm__ ("uaba %0.2s,%2.2s,%3.2s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vabal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
-{
- int16x8_t result;
- __asm__ ("sabal2 %0.8h,%2.16b,%3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vabal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
-{
- int32x4_t result;
- __asm__ ("sabal2 %0.4s,%2.8h,%3.8h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vabal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
-{
- int64x2_t result;
- __asm__ ("sabal2 %0.2d,%2.4s,%3.4s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vabal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
-{
- uint16x8_t result;
- __asm__ ("uabal2 %0.8h,%2.16b,%3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vabal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
-{
- uint32x4_t result;
- __asm__ ("uabal2 %0.4s,%2.8h,%3.8h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vabal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
-{
- uint64x2_t result;
- __asm__ ("uabal2 %0.2d,%2.4s,%3.4s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vabal_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
-{
- int16x8_t result;
- __asm__ ("sabal %0.8h,%2.8b,%3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vabal_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
-{
- int32x4_t result;
- __asm__ ("sabal %0.4s,%2.4h,%3.4h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vabal_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
-{
- int64x2_t result;
- __asm__ ("sabal %0.2d,%2.2s,%3.2s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vabal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
-{
- uint16x8_t result;
- __asm__ ("uabal %0.8h,%2.8b,%3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vabal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
-{
- uint32x4_t result;
- __asm__ ("uabal %0.4s,%2.4h,%3.4h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vabal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
-{
- uint64x2_t result;
- __asm__ ("uabal %0.2d,%2.2s,%3.2s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vabaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
-{
- int8x16_t result;
- __asm__ ("saba %0.16b,%2.16b,%3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vabaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
-{
- int16x8_t result;
- __asm__ ("saba %0.8h,%2.8h,%3.8h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vabaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
-{
- int32x4_t result;
- __asm__ ("saba %0.4s,%2.4s,%3.4s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vabaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
-{
- uint8x16_t result;
- __asm__ ("uaba %0.16b,%2.16b,%3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vabaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
-{
- uint16x8_t result;
- __asm__ ("uaba %0.8h,%2.8h,%3.8h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vabaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
-{
- uint32x4_t result;
- __asm__ ("uaba %0.4s,%2.4s,%3.4s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vabd_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("fabd %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vabd_s8 (int8x8_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("sabd %0.8b, %1.8b, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vabd_s16 (int16x4_t a, int16x4_t b)
-{
- int16x4_t result;
- __asm__ ("sabd %0.4h, %1.4h, %2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vabd_s32 (int32x2_t a, int32x2_t b)
-{
- int32x2_t result;
- __asm__ ("sabd %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vabd_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("uabd %0.8b, %1.8b, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vabd_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("uabd %0.4h, %1.4h, %2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vabd_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("uabd %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vabdd_f64 (float64_t a, float64_t b)
-{
- float64_t result;
- __asm__ ("fabd %d0, %d1, %d2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vabdl_high_s8 (int8x16_t a, int8x16_t b)
-{
- int16x8_t result;
- __asm__ ("sabdl2 %0.8h,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vabdl_high_s16 (int16x8_t a, int16x8_t b)
-{
- int32x4_t result;
- __asm__ ("sabdl2 %0.4s,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vabdl_high_s32 (int32x4_t a, int32x4_t b)
-{
- int64x2_t result;
- __asm__ ("sabdl2 %0.2d,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vabdl_high_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint16x8_t result;
- __asm__ ("uabdl2 %0.8h,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vabdl_high_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint32x4_t result;
- __asm__ ("uabdl2 %0.4s,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vabdl_high_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint64x2_t result;
- __asm__ ("uabdl2 %0.2d,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vabdl_s8 (int8x8_t a, int8x8_t b)
-{
- int16x8_t result;
- __asm__ ("sabdl %0.8h, %1.8b, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vabdl_s16 (int16x4_t a, int16x4_t b)
-{
- int32x4_t result;
- __asm__ ("sabdl %0.4s, %1.4h, %2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vabdl_s32 (int32x2_t a, int32x2_t b)
-{
- int64x2_t result;
- __asm__ ("sabdl %0.2d, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vabdl_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint16x8_t result;
- __asm__ ("uabdl %0.8h, %1.8b, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vabdl_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint32x4_t result;
- __asm__ ("uabdl %0.4s, %1.4h, %2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vabdl_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint64x2_t result;
- __asm__ ("uabdl %0.2d, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vabdq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("fabd %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vabdq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("fabd %0.2d, %1.2d, %2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vabdq_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("sabd %0.16b, %1.16b, %2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vabdq_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("sabd %0.8h, %1.8h, %2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vabdq_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("sabd %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vabdq_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("uabd %0.16b, %1.16b, %2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vabdq_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("uabd %0.8h, %1.8h, %2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vabdq_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("uabd %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vabds_f32 (float32_t a, float32_t b)
-{
- float32_t result;
- __asm__ ("fabd %s0, %s1, %s2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vabs_f32 (float32x2_t a)
-{
- float32x2_t result;
- __asm__ ("fabs %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vabs_s8 (int8x8_t a)
-{
- int8x8_t result;
- __asm__ ("abs %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vabs_s16 (int16x4_t a)
-{
- int16x4_t result;
- __asm__ ("abs %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vabs_s32 (int32x2_t a)
-{
- int32x2_t result;
- __asm__ ("abs %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vabsq_f32 (float32x4_t a)
-{
- float32x4_t result;
- __asm__ ("fabs %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vabsq_f64 (float64x2_t a)
-{
- float64x2_t result;
- __asm__ ("fabs %0.2d,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vabsq_s8 (int8x16_t a)
-{
- int8x16_t result;
- __asm__ ("abs %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vabsq_s16 (int16x8_t a)
-{
- int16x8_t result;
- __asm__ ("abs %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vabsq_s32 (int32x4_t a)
-{
- int32x4_t result;
- __asm__ ("abs %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vabsq_s64 (int64x2_t a)
-{
- int64x2_t result;
- __asm__ ("abs %0.2d,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vacged_f64 (float64_t a, float64_t b)
-{
- float64_t result;
- __asm__ ("facge %d0,%d1,%d2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vacges_f32 (float32_t a, float32_t b)
-{
- float32_t result;
- __asm__ ("facge %s0,%s1,%s2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vacgtd_f64 (float64_t a, float64_t b)
-{
- float64_t result;
- __asm__ ("facgt %d0,%d1,%d2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vacgts_f32 (float32_t a, float32_t b)
-{
- float32_t result;
- __asm__ ("facgt %s0,%s1,%s2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
-vaddlv_s8 (int8x8_t a)
-{
- int16_t result;
- __asm__ ("saddlv %h0,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
-vaddlv_s16 (int16x4_t a)
-{
- int32_t result;
- __asm__ ("saddlv %s0,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
-vaddlv_u8 (uint8x8_t a)
-{
- uint16_t result;
- __asm__ ("uaddlv %h0,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
-vaddlv_u16 (uint16x4_t a)
-{
- uint32_t result;
- __asm__ ("uaddlv %s0,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
-vaddlvq_s8 (int8x16_t a)
-{
- int16_t result;
- __asm__ ("saddlv %h0,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
-vaddlvq_s16 (int16x8_t a)
-{
- int32_t result;
- __asm__ ("saddlv %s0,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
-vaddlvq_s32 (int32x4_t a)
-{
- int64_t result;
- __asm__ ("saddlv %d0,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
-vaddlvq_u8 (uint8x16_t a)
-{
- uint16_t result;
- __asm__ ("uaddlv %h0,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
-vaddlvq_u16 (uint16x8_t a)
-{
- uint32_t result;
- __asm__ ("uaddlv %s0,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
-vaddlvq_u32 (uint32x4_t a)
-{
- uint64_t result;
- __asm__ ("uaddlv %d0,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
-vaddv_s8 (int8x8_t a)
-{
- int8_t result;
- __asm__ ("addv %b0,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
-vaddv_s16 (int16x4_t a)
-{
- int16_t result;
- __asm__ ("addv %h0,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
-vaddv_u8 (uint8x8_t a)
-{
- uint8_t result;
- __asm__ ("addv %b0,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
-vaddv_u16 (uint16x4_t a)
-{
- uint16_t result;
- __asm__ ("addv %h0,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
-vaddvq_s8 (int8x16_t a)
-{
- int8_t result;
- __asm__ ("addv %b0,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
-vaddvq_s16 (int16x8_t a)
-{
- int16_t result;
- __asm__ ("addv %h0,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
-vaddvq_s32 (int32x4_t a)
-{
- int32_t result;
- __asm__ ("addv %s0,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
-vaddvq_u8 (uint8x16_t a)
-{
- uint8_t result;
- __asm__ ("addv %b0,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
-vaddvq_u16 (uint16x8_t a)
-{
- uint16_t result;
- __asm__ ("addv %h0,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
-vaddvq_u32 (uint32x4_t a)
-{
- uint32_t result;
- __asm__ ("addv %s0,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vbsl_f32 (uint32x2_t a, float32x2_t b, float32x2_t c)
-{
- float32x2_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vbsl_p8 (uint8x8_t a, poly8x8_t b, poly8x8_t c)
-{
- poly8x8_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vbsl_p16 (uint16x4_t a, poly16x4_t b, poly16x4_t c)
-{
- poly16x4_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vbsl_s8 (uint8x8_t a, int8x8_t b, int8x8_t c)
-{
- int8x8_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vbsl_s16 (uint16x4_t a, int16x4_t b, int16x4_t c)
-{
- int16x4_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vbsl_s32 (uint32x2_t a, int32x2_t b, int32x2_t c)
-{
- int32x2_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vbsl_s64 (uint64x1_t a, int64x1_t b, int64x1_t c)
-{
- int64x1_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vbsl_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
-{
- uint8x8_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vbsl_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
-{
- uint16x4_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vbsl_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
-{
- uint32x2_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vbsl_u64 (uint64x1_t a, uint64x1_t b, uint64x1_t c)
-{
- uint64x1_t result;
- __asm__ ("bsl %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vbslq_f32 (uint32x4_t a, float32x4_t b, float32x4_t c)
-{
- float32x4_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vbslq_f64 (uint64x2_t a, float64x2_t b, float64x2_t c)
-{
- float64x2_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vbslq_p8 (uint8x16_t a, poly8x16_t b, poly8x16_t c)
-{
- poly8x16_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vbslq_p16 (uint16x8_t a, poly16x8_t b, poly16x8_t c)
-{
- poly16x8_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vbslq_s8 (uint8x16_t a, int8x16_t b, int8x16_t c)
-{
- int8x16_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vbslq_s16 (uint16x8_t a, int16x8_t b, int16x8_t c)
-{
- int16x8_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vbslq_s32 (uint32x4_t a, int32x4_t b, int32x4_t c)
-{
- int32x4_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vbslq_s64 (uint64x2_t a, int64x2_t b, int64x2_t c)
-{
- int64x2_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vbslq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
-{
- uint8x16_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vbslq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
-{
- uint16x8_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vbslq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
-{
- uint32x4_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vbslq_u64 (uint64x2_t a, uint64x2_t b, uint64x2_t c)
-{
- uint64x2_t result;
- __asm__ ("bsl %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcage_f32 (float32x2_t a, float32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("facge %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcageq_f32 (float32x4_t a, float32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("facge %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcageq_f64 (float64x2_t a, float64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("facge %0.2d, %1.2d, %2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcagt_f32 (float32x2_t a, float32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("facgt %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcagtq_f32 (float32x4_t a, float32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("facgt %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcagtq_f64 (float64x2_t a, float64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("facgt %0.2d, %1.2d, %2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcale_f32 (float32x2_t a, float32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("facge %0.2s, %2.2s, %1.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcaleq_f32 (float32x4_t a, float32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("facge %0.4s, %2.4s, %1.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcaleq_f64 (float64x2_t a, float64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("facge %0.2d, %2.2d, %1.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcalt_f32 (float32x2_t a, float32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("facgt %0.2s, %2.2s, %1.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcaltq_f32 (float32x4_t a, float32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("facgt %0.4s, %2.4s, %1.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcaltq_f64 (float64x2_t a, float64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("facgt %0.2d, %2.2d, %1.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vceq_f32 (float32x2_t a, float32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("fcmeq %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vceq_f64 (float64x1_t a, float64x1_t b)
-{
- uint64x1_t result;
- __asm__ ("fcmeq %d0, %d1, %d2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vceqd_f64 (float64_t a, float64_t b)
-{
- float64_t result;
- __asm__ ("fcmeq %d0,%d1,%d2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vceqq_f32 (float32x4_t a, float32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("fcmeq %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vceqq_f64 (float64x2_t a, float64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("fcmeq %0.2d, %1.2d, %2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vceqs_f32 (float32_t a, float32_t b)
-{
- float32_t result;
- __asm__ ("fcmeq %s0,%s1,%s2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vceqzd_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("fcmeq %d0,%d1,#0"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vceqzs_f32 (float32_t a)
-{
- float32_t result;
- __asm__ ("fcmeq %s0,%s1,#0"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcge_f32 (float32x2_t a, float32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("fcmge %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcge_f64 (float64x1_t a, float64x1_t b)
-{
- uint64x1_t result;
- __asm__ ("fcmge %d0, %d1, %d2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcgeq_f32 (float32x4_t a, float32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("fcmge %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcgeq_f64 (float64x2_t a, float64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("fcmge %0.2d, %1.2d, %2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcgt_f32 (float32x2_t a, float32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("fcmgt %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcgt_f64 (float64x1_t a, float64x1_t b)
-{
- uint64x1_t result;
- __asm__ ("fcmgt %d0, %d1, %d2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcgtq_f32 (float32x4_t a, float32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("fcmgt %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcgtq_f64 (float64x2_t a, float64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("fcmgt %0.2d, %1.2d, %2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcle_f32 (float32x2_t a, float32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("fcmge %0.2s, %2.2s, %1.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcle_f64 (float64x1_t a, float64x1_t b)
-{
- uint64x1_t result;
- __asm__ ("fcmge %d0, %d2, %d1"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcleq_f32 (float32x4_t a, float32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("fcmge %0.4s, %2.4s, %1.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcleq_f64 (float64x2_t a, float64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("fcmge %0.2d, %2.2d, %1.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vcls_s8 (int8x8_t a)
-{
- int8x8_t result;
- __asm__ ("cls %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vcls_s16 (int16x4_t a)
-{
- int16x4_t result;
- __asm__ ("cls %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vcls_s32 (int32x2_t a)
-{
- int32x2_t result;
- __asm__ ("cls %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vclsq_s8 (int8x16_t a)
-{
- int8x16_t result;
- __asm__ ("cls %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vclsq_s16 (int16x8_t a)
-{
- int16x8_t result;
- __asm__ ("cls %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vclsq_s32 (int32x4_t a)
-{
- int32x4_t result;
- __asm__ ("cls %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vclt_f32 (float32x2_t a, float32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("fcmgt %0.2s, %2.2s, %1.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vclt_f64 (float64x1_t a, float64x1_t b)
-{
- uint64x1_t result;
- __asm__ ("fcmgt %d0, %d2, %d1"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcltq_f32 (float32x4_t a, float32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("fcmgt %0.4s, %2.4s, %1.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcltq_f64 (float64x2_t a, float64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("fcmgt %0.2d, %2.2d, %1.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vclz_s8 (int8x8_t a)
-{
- int8x8_t result;
- __asm__ ("clz %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vclz_s16 (int16x4_t a)
-{
- int16x4_t result;
- __asm__ ("clz %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vclz_s32 (int32x2_t a)
-{
- int32x2_t result;
- __asm__ ("clz %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vclz_u8 (uint8x8_t a)
-{
- uint8x8_t result;
- __asm__ ("clz %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vclz_u16 (uint16x4_t a)
-{
- uint16x4_t result;
- __asm__ ("clz %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vclz_u32 (uint32x2_t a)
-{
- uint32x2_t result;
- __asm__ ("clz %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vclzq_s8 (int8x16_t a)
-{
- int8x16_t result;
- __asm__ ("clz %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vclzq_s16 (int16x8_t a)
-{
- int16x8_t result;
- __asm__ ("clz %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vclzq_s32 (int32x4_t a)
-{
- int32x4_t result;
- __asm__ ("clz %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vclzq_u8 (uint8x16_t a)
-{
- uint8x16_t result;
- __asm__ ("clz %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vclzq_u16 (uint16x8_t a)
-{
- uint16x8_t result;
- __asm__ ("clz %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vclzq_u32 (uint32x4_t a)
-{
- uint32x4_t result;
- __asm__ ("clz %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vcnt_p8 (poly8x8_t a)
-{
- poly8x8_t result;
- __asm__ ("cnt %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vcnt_s8 (int8x8_t a)
-{
- int8x8_t result;
- __asm__ ("cnt %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vcnt_u8 (uint8x8_t a)
-{
- uint8x8_t result;
- __asm__ ("cnt %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vcntq_p8 (poly8x16_t a)
-{
- poly8x16_t result;
- __asm__ ("cnt %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vcntq_s8 (int8x16_t a)
-{
- int8x16_t result;
- __asm__ ("cnt %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vcntq_u8 (uint8x16_t a)
-{
- uint8x16_t result;
- __asm__ ("cnt %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-#define vcopyq_lane_f32(a, b, c, d) \
- __extension__ \
- ({ \
- float32x4_t c_ = (c); \
- float32x4_t a_ = (a); \
- float32x4_t result; \
- __asm__ ("ins %0.s[%2], %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_f64(a, b, c, d) \
- __extension__ \
- ({ \
- float64x2_t c_ = (c); \
- float64x2_t a_ = (a); \
- float64x2_t result; \
- __asm__ ("ins %0.d[%2], %3.d[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_p8(a, b, c, d) \
- __extension__ \
- ({ \
- poly8x16_t c_ = (c); \
- poly8x16_t a_ = (a); \
- poly8x16_t result; \
- __asm__ ("ins %0.b[%2], %3.b[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_p16(a, b, c, d) \
- __extension__ \
- ({ \
- poly16x8_t c_ = (c); \
- poly16x8_t a_ = (a); \
- poly16x8_t result; \
- __asm__ ("ins %0.h[%2], %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_s8(a, b, c, d) \
- __extension__ \
- ({ \
- int8x16_t c_ = (c); \
- int8x16_t a_ = (a); \
- int8x16_t result; \
- __asm__ ("ins %0.b[%2], %3.b[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x8_t c_ = (c); \
- int16x8_t a_ = (a); \
- int16x8_t result; \
- __asm__ ("ins %0.h[%2], %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x4_t c_ = (c); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("ins %0.s[%2], %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_s64(a, b, c, d) \
- __extension__ \
- ({ \
- int64x2_t c_ = (c); \
- int64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("ins %0.d[%2], %3.d[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_u8(a, b, c, d) \
- __extension__ \
- ({ \
- uint8x16_t c_ = (c); \
- uint8x16_t a_ = (a); \
- uint8x16_t result; \
- __asm__ ("ins %0.b[%2], %3.b[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x8_t c_ = (c); \
- uint16x8_t a_ = (a); \
- uint16x8_t result; \
- __asm__ ("ins %0.h[%2], %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x4_t c_ = (c); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("ins %0.s[%2], %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_u64(a, b, c, d) \
- __extension__ \
- ({ \
- uint64x2_t c_ = (c); \
- uint64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("ins %0.d[%2], %3.d[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-/* vcvt_f16_f32 not supported */
-
-/* vcvt_f32_f16 not supported */
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vcvt_f32_f64 (float64x2_t a)
-{
- float32x2_t result;
- __asm__ ("fcvtn %0.2s,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vcvt_f32_s32 (int32x2_t a)
-{
- float32x2_t result;
- __asm__ ("scvtf %0.2s, %1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vcvt_f32_u32 (uint32x2_t a)
-{
- float32x2_t result;
- __asm__ ("ucvtf %0.2s, %1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vcvt_f64_f32 (float32x2_t a)
-{
- float64x2_t result;
- __asm__ ("fcvtl %0.2d,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
-vcvt_f64_s64 (uint64x1_t a)
-{
- float64x1_t result;
- __asm__ ("scvtf %d0, %d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
-vcvt_f64_u64 (uint64x1_t a)
-{
- float64x1_t result;
- __asm__ ("ucvtf %d0, %d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-/* vcvt_high_f16_f32 not supported */
-
-/* vcvt_high_f32_f16 not supported */
-
-static float32x2_t vdup_n_f32 (float32_t);
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vcvt_high_f32_f64 (float32x2_t a, float64x2_t b)
-{
- float32x4_t result = vcombine_f32 (a, vdup_n_f32 (0.0f));
- __asm__ ("fcvtn2 %0.4s,%2.2d"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vcvt_high_f64_f32 (float32x4_t a)
-{
- float64x2_t result;
- __asm__ ("fcvtl2 %0.2d,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-#define vcvt_n_f32_s32(a, b) \
- __extension__ \
- ({ \
- int32x2_t a_ = (a); \
- float32x2_t result; \
- __asm__ ("scvtf %0.2s, %1.2s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvt_n_f32_u32(a, b) \
- __extension__ \
- ({ \
- uint32x2_t a_ = (a); \
- float32x2_t result; \
- __asm__ ("ucvtf %0.2s, %1.2s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvt_n_s32_f32(a, b) \
- __extension__ \
- ({ \
- float32x2_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("fcvtzs %0.2s, %1.2s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvt_n_u32_f32(a, b) \
- __extension__ \
- ({ \
- float32x2_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("fcvtzu %0.2s, %1.2s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vcvt_s32_f32 (float32x2_t a)
-{
- int32x2_t result;
- __asm__ ("fcvtzs %0.2s, %1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcvt_u32_f32 (float32x2_t a)
-{
- uint32x2_t result;
- __asm__ ("fcvtzu %0.2s, %1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vcvta_s32_f32 (float32x2_t a)
-{
- int32x2_t result;
- __asm__ ("fcvtas %0.2s, %1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcvta_u32_f32 (float32x2_t a)
-{
- uint32x2_t result;
- __asm__ ("fcvtau %0.2s, %1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vcvtad_s64_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("fcvtas %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vcvtad_u64_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("fcvtau %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vcvtaq_s32_f32 (float32x4_t a)
-{
- int32x4_t result;
- __asm__ ("fcvtas %0.4s, %1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vcvtaq_s64_f64 (float64x2_t a)
-{
- int64x2_t result;
- __asm__ ("fcvtas %0.2d, %1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcvtaq_u32_f32 (float32x4_t a)
-{
- uint32x4_t result;
- __asm__ ("fcvtau %0.4s, %1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcvtaq_u64_f64 (float64x2_t a)
-{
- uint64x2_t result;
- __asm__ ("fcvtau %0.2d, %1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vcvtas_s64_f64 (float32_t a)
-{
- float32_t result;
- __asm__ ("fcvtas %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vcvtas_u64_f64 (float32_t a)
-{
- float32_t result;
- __asm__ ("fcvtau %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
-vcvtd_f64_s64 (int64_t a)
-{
- int64_t result;
- __asm__ ("scvtf %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
-vcvtd_f64_u64 (uint64_t a)
-{
- uint64_t result;
- __asm__ ("ucvtf %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-#define vcvtd_n_f64_s64(a, b) \
- __extension__ \
- ({ \
- int64_t a_ = (a); \
- int64_t result; \
- __asm__ ("scvtf %d0,%d1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtd_n_f64_u64(a, b) \
- __extension__ \
- ({ \
- uint64_t a_ = (a); \
- uint64_t result; \
- __asm__ ("ucvtf %d0,%d1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtd_n_s64_f64(a, b) \
- __extension__ \
- ({ \
- float64_t a_ = (a); \
- float64_t result; \
- __asm__ ("fcvtzs %d0,%d1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtd_n_u64_f64(a, b) \
- __extension__ \
- ({ \
- float64_t a_ = (a); \
- float64_t result; \
- __asm__ ("fcvtzu %d0,%d1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vcvtd_s64_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("fcvtzs %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vcvtd_u64_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("fcvtzu %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vcvtm_s32_f32 (float32x2_t a)
-{
- int32x2_t result;
- __asm__ ("fcvtms %0.2s, %1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcvtm_u32_f32 (float32x2_t a)
-{
- uint32x2_t result;
- __asm__ ("fcvtmu %0.2s, %1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vcvtmd_s64_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("fcvtms %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vcvtmd_u64_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("fcvtmu %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vcvtmq_s32_f32 (float32x4_t a)
-{
- int32x4_t result;
- __asm__ ("fcvtms %0.4s, %1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vcvtmq_s64_f64 (float64x2_t a)
-{
- int64x2_t result;
- __asm__ ("fcvtms %0.2d, %1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcvtmq_u32_f32 (float32x4_t a)
-{
- uint32x4_t result;
- __asm__ ("fcvtmu %0.4s, %1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcvtmq_u64_f64 (float64x2_t a)
-{
- uint64x2_t result;
- __asm__ ("fcvtmu %0.2d, %1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vcvtms_s64_f64 (float32_t a)
-{
- float32_t result;
- __asm__ ("fcvtms %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vcvtms_u64_f64 (float32_t a)
-{
- float32_t result;
- __asm__ ("fcvtmu %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vcvtn_s32_f32 (float32x2_t a)
-{
- int32x2_t result;
- __asm__ ("fcvtns %0.2s, %1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcvtn_u32_f32 (float32x2_t a)
-{
- uint32x2_t result;
- __asm__ ("fcvtnu %0.2s, %1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vcvtnd_s64_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("fcvtns %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vcvtnd_u64_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("fcvtnu %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vcvtnq_s32_f32 (float32x4_t a)
-{
- int32x4_t result;
- __asm__ ("fcvtns %0.4s, %1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vcvtnq_s64_f64 (float64x2_t a)
-{
- int64x2_t result;
- __asm__ ("fcvtns %0.2d, %1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcvtnq_u32_f32 (float32x4_t a)
-{
- uint32x4_t result;
- __asm__ ("fcvtnu %0.4s, %1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcvtnq_u64_f64 (float64x2_t a)
-{
- uint64x2_t result;
- __asm__ ("fcvtnu %0.2d, %1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vcvtns_s64_f64 (float32_t a)
-{
- float32_t result;
- __asm__ ("fcvtns %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vcvtns_u64_f64 (float32_t a)
-{
- float32_t result;
- __asm__ ("fcvtnu %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vcvtp_s32_f32 (float32x2_t a)
-{
- int32x2_t result;
- __asm__ ("fcvtps %0.2s, %1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcvtp_u32_f32 (float32x2_t a)
-{
- uint32x2_t result;
- __asm__ ("fcvtpu %0.2s, %1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vcvtpd_s64_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("fcvtps %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vcvtpd_u64_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("fcvtpu %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vcvtpq_s32_f32 (float32x4_t a)
-{
- int32x4_t result;
- __asm__ ("fcvtps %0.4s, %1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vcvtpq_s64_f64 (float64x2_t a)
-{
- int64x2_t result;
- __asm__ ("fcvtps %0.2d, %1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcvtpq_u32_f32 (float32x4_t a)
-{
- uint32x4_t result;
- __asm__ ("fcvtpu %0.4s, %1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcvtpq_u64_f64 (float64x2_t a)
-{
- uint64x2_t result;
- __asm__ ("fcvtpu %0.2d, %1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vcvtps_s64_f64 (float32_t a)
-{
- float32_t result;
- __asm__ ("fcvtps %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vcvtps_u64_f64 (float32_t a)
-{
- float32_t result;
- __asm__ ("fcvtpu %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vcvtq_f32_s32 (int32x4_t a)
-{
- float32x4_t result;
- __asm__ ("scvtf %0.4s, %1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vcvtq_f32_u32 (uint32x4_t a)
-{
- float32x4_t result;
- __asm__ ("ucvtf %0.4s, %1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vcvtq_f64_s64 (int64x2_t a)
-{
- float64x2_t result;
- __asm__ ("scvtf %0.2d, %1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vcvtq_f64_u64 (uint64x2_t a)
-{
- float64x2_t result;
- __asm__ ("ucvtf %0.2d, %1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-#define vcvtq_n_f32_s32(a, b) \
- __extension__ \
- ({ \
- int32x4_t a_ = (a); \
- float32x4_t result; \
- __asm__ ("scvtf %0.4s, %1.4s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_f32_u32(a, b) \
- __extension__ \
- ({ \
- uint32x4_t a_ = (a); \
- float32x4_t result; \
- __asm__ ("ucvtf %0.4s, %1.4s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_f64_s64(a, b) \
- __extension__ \
- ({ \
- int64x2_t a_ = (a); \
- float64x2_t result; \
- __asm__ ("scvtf %0.2d, %1.2d, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_f64_u64(a, b) \
- __extension__ \
- ({ \
- uint64x2_t a_ = (a); \
- float64x2_t result; \
- __asm__ ("ucvtf %0.2d, %1.2d, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_s32_f32(a, b) \
- __extension__ \
- ({ \
- float32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("fcvtzs %0.4s, %1.4s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_s64_f64(a, b) \
- __extension__ \
- ({ \
- float64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("fcvtzs %0.2d, %1.2d, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_u32_f32(a, b) \
- __extension__ \
- ({ \
- float32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("fcvtzu %0.4s, %1.4s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_u64_f64(a, b) \
- __extension__ \
- ({ \
- float64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("fcvtzu %0.2d, %1.2d, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vcvtq_s32_f32 (float32x4_t a)
-{
- int32x4_t result;
- __asm__ ("fcvtzs %0.4s, %1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vcvtq_s64_f64 (float64x2_t a)
-{
- int64x2_t result;
- __asm__ ("fcvtzs %0.2d, %1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcvtq_u32_f32 (float32x4_t a)
-{
- uint32x4_t result;
- __asm__ ("fcvtzu %0.4s, %1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcvtq_u64_f64 (float64x2_t a)
-{
- uint64x2_t result;
- __asm__ ("fcvtzu %0.2d, %1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
-vcvts_f64_s32 (int32_t a)
-{
- int32_t result;
- __asm__ ("scvtf %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
-vcvts_f64_u32 (uint32_t a)
-{
- uint32_t result;
- __asm__ ("ucvtf %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-#define vcvts_n_f32_s32(a, b) \
- __extension__ \
- ({ \
- int32_t a_ = (a); \
- int32_t result; \
- __asm__ ("scvtf %s0,%s1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvts_n_f32_u32(a, b) \
- __extension__ \
- ({ \
- uint32_t a_ = (a); \
- uint32_t result; \
- __asm__ ("ucvtf %s0,%s1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvts_n_s32_f32(a, b) \
- __extension__ \
- ({ \
- float32_t a_ = (a); \
- float32_t result; \
- __asm__ ("fcvtzs %s0,%s1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvts_n_u32_f32(a, b) \
- __extension__ \
- ({ \
- float32_t a_ = (a); \
- float32_t result; \
- __asm__ ("fcvtzu %s0,%s1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vcvts_s64_f64 (float32_t a)
-{
- float32_t result;
- __asm__ ("fcvtzs %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vcvts_u64_f64 (float32_t a)
-{
- float32_t result;
- __asm__ ("fcvtzu %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vcvtx_f32_f64 (float64x2_t a)
-{
- float32x2_t result;
- __asm__ ("fcvtxn %0.2s,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vcvtx_high_f32_f64 (float64x2_t a)
-{
- float32x4_t result;
- __asm__ ("fcvtxn2 %0.4s,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vcvtxd_f32_f64 (float64_t a)
-{
- float32_t result;
- __asm__ ("fcvtxn %s0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-#define vdup_lane_f32(a, b) \
- __extension__ \
- ({ \
- float32x2_t a_ = (a); \
- float32x2_t result; \
- __asm__ ("dup %0.2s,%1.s[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdup_lane_p8(a, b) \
- __extension__ \
- ({ \
- poly8x8_t a_ = (a); \
- poly8x8_t result; \
- __asm__ ("dup %0.8b,%1.b[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdup_lane_p16(a, b) \
- __extension__ \
- ({ \
- poly16x4_t a_ = (a); \
- poly16x4_t result; \
- __asm__ ("dup %0.4h,%1.h[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdup_lane_s8(a, b) \
- __extension__ \
- ({ \
- int8x8_t a_ = (a); \
- int8x8_t result; \
- __asm__ ("dup %0.8b,%1.b[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdup_lane_s16(a, b) \
- __extension__ \
- ({ \
- int16x4_t a_ = (a); \
- int16x4_t result; \
- __asm__ ("dup %0.4h,%1.h[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdup_lane_s32(a, b) \
- __extension__ \
- ({ \
- int32x2_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("dup %0.2s,%1.s[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdup_lane_s64(a, b) \
- __extension__ \
- ({ \
- int64x1_t a_ = (a); \
- int64x1_t result; \
- __asm__ ("ins %0.d[0],%1.d[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdup_lane_u8(a, b) \
- __extension__ \
- ({ \
- uint8x8_t a_ = (a); \
- uint8x8_t result; \
- __asm__ ("dup %0.8b,%1.b[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdup_lane_u16(a, b) \
- __extension__ \
- ({ \
- uint16x4_t a_ = (a); \
- uint16x4_t result; \
- __asm__ ("dup %0.4h,%1.h[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdup_lane_u32(a, b) \
- __extension__ \
- ({ \
- uint32x2_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("dup %0.2s,%1.s[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdup_lane_u64(a, b) \
- __extension__ \
- ({ \
- uint64x1_t a_ = (a); \
- uint64x1_t result; \
- __asm__ ("ins %0.d[0],%1.d[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vdup_n_f32 (float32_t a)
-{
- float32x2_t result;
- __asm__ ("dup %0.2s, %w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vdup_n_p8 (uint32_t a)
-{
- poly8x8_t result;
- __asm__ ("dup %0.8b,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vdup_n_p16 (uint32_t a)
-{
- poly16x4_t result;
- __asm__ ("dup %0.4h,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vdup_n_s8 (int32_t a)
-{
- int8x8_t result;
- __asm__ ("dup %0.8b,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vdup_n_s16 (int32_t a)
-{
- int16x4_t result;
- __asm__ ("dup %0.4h,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vdup_n_s32 (int32_t a)
-{
- int32x2_t result;
- __asm__ ("dup %0.2s,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vdup_n_s64 (int64_t a)
-{
- int64x1_t result;
- __asm__ ("ins %0.d[0],%x1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vdup_n_u8 (uint32_t a)
-{
- uint8x8_t result;
- __asm__ ("dup %0.8b,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vdup_n_u16 (uint32_t a)
-{
- uint16x4_t result;
- __asm__ ("dup %0.4h,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vdup_n_u32 (uint32_t a)
-{
- uint32x2_t result;
- __asm__ ("dup %0.2s,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vdup_n_u64 (uint64_t a)
-{
- uint64x1_t result;
- __asm__ ("ins %0.d[0],%x1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-#define vdupd_lane_f64(a, b) \
- __extension__ \
- ({ \
- float64x2_t a_ = (a); \
- float64_t result; \
- __asm__ ("dup %d0, %1.d[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdupq_lane_f32(a, b) \
- __extension__ \
- ({ \
- float32x2_t a_ = (a); \
- float32x4_t result; \
- __asm__ ("dup %0.4s,%1.s[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdupq_lane_f64(a, b) \
- __extension__ \
- ({ \
- float64x1_t a_ = (a); \
- float64x2_t result; \
- __asm__ ("dup %0.2d,%1.d[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdupq_lane_p8(a, b) \
- __extension__ \
- ({ \
- poly8x8_t a_ = (a); \
- poly8x16_t result; \
- __asm__ ("dup %0.16b,%1.b[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdupq_lane_p16(a, b) \
- __extension__ \
- ({ \
- poly16x4_t a_ = (a); \
- poly16x8_t result; \
- __asm__ ("dup %0.8h,%1.h[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdupq_lane_s8(a, b) \
- __extension__ \
- ({ \
- int8x8_t a_ = (a); \
- int8x16_t result; \
- __asm__ ("dup %0.16b,%1.b[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdupq_lane_s16(a, b) \
- __extension__ \
- ({ \
- int16x4_t a_ = (a); \
- int16x8_t result; \
- __asm__ ("dup %0.8h,%1.h[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdupq_lane_s32(a, b) \
- __extension__ \
- ({ \
- int32x2_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("dup %0.4s,%1.s[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdupq_lane_s64(a, b) \
- __extension__ \
- ({ \
- int64x1_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("dup %0.2d,%1.d[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdupq_lane_u8(a, b) \
- __extension__ \
- ({ \
- uint8x8_t a_ = (a); \
- uint8x16_t result; \
- __asm__ ("dup %0.16b,%1.b[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdupq_lane_u16(a, b) \
- __extension__ \
- ({ \
- uint16x4_t a_ = (a); \
- uint16x8_t result; \
- __asm__ ("dup %0.8h,%1.h[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdupq_lane_u32(a, b) \
- __extension__ \
- ({ \
- uint32x2_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("dup %0.4s,%1.s[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vdupq_lane_u64(a, b) \
- __extension__ \
- ({ \
- uint64x1_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("dup %0.2d,%1.d[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vdupq_n_f32 (float32_t a)
-{
- float32x4_t result;
- __asm__ ("dup %0.4s, %w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vdupq_n_f64 (float64_t a)
-{
- float64x2_t result;
- __asm__ ("dup %0.2d, %x1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vdupq_n_p8 (uint32_t a)
-{
- poly8x16_t result;
- __asm__ ("dup %0.16b,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vdupq_n_p16 (uint32_t a)
-{
- poly16x8_t result;
- __asm__ ("dup %0.8h,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vdupq_n_s8 (int32_t a)
-{
- int8x16_t result;
- __asm__ ("dup %0.16b,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vdupq_n_s16 (int32_t a)
-{
- int16x8_t result;
- __asm__ ("dup %0.8h,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vdupq_n_s32 (int32_t a)
-{
- int32x4_t result;
- __asm__ ("dup %0.4s,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vdupq_n_s64 (int64_t a)
-{
- int64x2_t result;
- __asm__ ("dup %0.2d,%x1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vdupq_n_u8 (uint32_t a)
-{
- uint8x16_t result;
- __asm__ ("dup %0.16b,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vdupq_n_u16 (uint32_t a)
-{
- uint16x8_t result;
- __asm__ ("dup %0.8h,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vdupq_n_u32 (uint32_t a)
-{
- uint32x4_t result;
- __asm__ ("dup %0.4s,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vdupq_n_u64 (uint64_t a)
-{
- uint64x2_t result;
- __asm__ ("dup %0.2d,%x1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-#define vdups_lane_f32(a, b) \
- __extension__ \
- ({ \
- float32x4_t a_ = (a); \
- float32_t result; \
- __asm__ ("dup %s0, %1.s[%2]" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vext_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x2_t b_ = (b); \
- float32x2_t a_ = (a); \
- float32x2_t result; \
- __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*4" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vext_f64(a, b, c) \
- __extension__ \
- ({ \
- float64x1_t b_ = (b); \
- float64x1_t a_ = (a); \
- float64x1_t result; \
- __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*8" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vext_p8(a, b, c) \
- __extension__ \
- ({ \
- poly8x8_t b_ = (b); \
- poly8x8_t a_ = (a); \
- poly8x8_t result; \
- __asm__ ("ext %0.8b,%1.8b,%2.8b,%3" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vext_p16(a, b, c) \
- __extension__ \
- ({ \
- poly16x4_t b_ = (b); \
- poly16x4_t a_ = (a); \
- poly16x4_t result; \
- __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*2" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vext_s8(a, b, c) \
- __extension__ \
- ({ \
- int8x8_t b_ = (b); \
- int8x8_t a_ = (a); \
- int8x8_t result; \
- __asm__ ("ext %0.8b,%1.8b,%2.8b,%3" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vext_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x4_t b_ = (b); \
- int16x4_t a_ = (a); \
- int16x4_t result; \
- __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*2" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vext_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x2_t b_ = (b); \
- int32x2_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*4" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vext_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x1_t b_ = (b); \
- int64x1_t a_ = (a); \
- int64x1_t result; \
- __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*8" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vext_u8(a, b, c) \
- __extension__ \
- ({ \
- uint8x8_t b_ = (b); \
- uint8x8_t a_ = (a); \
- uint8x8_t result; \
- __asm__ ("ext %0.8b,%1.8b,%2.8b,%3" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vext_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x4_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint16x4_t result; \
- __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*2" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vext_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x2_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*4" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vext_u64(a, b, c) \
- __extension__ \
- ({ \
- uint64x1_t b_ = (b); \
- uint64x1_t a_ = (a); \
- uint64x1_t result; \
- __asm__ ("ext %0.8b, %1.8b, %2.8b, #%3*8" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vextq_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x4_t b_ = (b); \
- float32x4_t a_ = (a); \
- float32x4_t result; \
- __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*4" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vextq_f64(a, b, c) \
- __extension__ \
- ({ \
- float64x2_t b_ = (b); \
- float64x2_t a_ = (a); \
- float64x2_t result; \
- __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*8" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vextq_p8(a, b, c) \
- __extension__ \
- ({ \
- poly8x16_t b_ = (b); \
- poly8x16_t a_ = (a); \
- poly8x16_t result; \
- __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vextq_p16(a, b, c) \
- __extension__ \
- ({ \
- poly16x8_t b_ = (b); \
- poly16x8_t a_ = (a); \
- poly16x8_t result; \
- __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*2" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vextq_s8(a, b, c) \
- __extension__ \
- ({ \
- int8x16_t b_ = (b); \
- int8x16_t a_ = (a); \
- int8x16_t result; \
- __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vextq_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- int16x8_t a_ = (a); \
- int16x8_t result; \
- __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*2" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vextq_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*4" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vextq_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x2_t b_ = (b); \
- int64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*8" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vextq_u8(a, b, c) \
- __extension__ \
- ({ \
- uint8x16_t b_ = (b); \
- uint8x16_t a_ = (a); \
- uint8x16_t result; \
- __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vextq_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x8_t b_ = (b); \
- uint16x8_t a_ = (a); \
- uint16x8_t result; \
- __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*2" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vextq_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x4_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*4" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vextq_u64(a, b, c) \
- __extension__ \
- ({ \
- uint64x2_t b_ = (b); \
- uint64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("ext %0.16b, %1.16b, %2.16b, #%3*8" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vfma_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
-{
- float32x2_t result;
- __asm__ ("fmla %0.2s,%2.2s,%3.2s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-#define vfma_lane_f32(a, b, c, d) \
- __extension__ \
- ({ \
- float32x2_t c_ = (c); \
- float32x2_t b_ = (b); \
- float32x2_t a_ = (a); \
- float32x2_t result; \
- __asm__ ("fmla %0.2s,%2.2s,%3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vfmad_lane_f64(a, b, c) \
- __extension__ \
- ({ \
- float64x2_t b_ = (b); \
- float64_t a_ = (a); \
- float64_t result; \
- __asm__ ("fmla %d0,%d1,%2.d[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vfmaq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
-{
- float32x4_t result;
- __asm__ ("fmla %0.4s,%2.4s,%3.4s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vfmaq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
-{
- float64x2_t result;
- __asm__ ("fmla %0.2d,%2.2d,%3.2d"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-#define vfmaq_lane_f32(a, b, c, d) \
- __extension__ \
- ({ \
- float32x4_t c_ = (c); \
- float32x4_t b_ = (b); \
- float32x4_t a_ = (a); \
- float32x4_t result; \
- __asm__ ("fmla %0.4s,%2.4s,%3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vfmaq_lane_f64(a, b, c, d) \
- __extension__ \
- ({ \
- float64x2_t c_ = (c); \
- float64x2_t b_ = (b); \
- float64x2_t a_ = (a); \
- float64x2_t result; \
- __asm__ ("fmla %0.2d,%2.2d,%3.d[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vfmas_lane_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x4_t b_ = (b); \
- float32_t a_ = (a); \
- float32_t result; \
- __asm__ ("fmla %s0,%s1,%2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vfma_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
-{
- float32x2_t result;
- __asm__ ("fmla %0.2s, %2.2s, %3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vfmaq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
-{
- float32x4_t result;
- __asm__ ("fmla %0.4s, %2.4s, %3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vfmaq_n_f64 (float64x2_t a, float64x2_t b, float64_t c)
-{
- float64x2_t result;
- __asm__ ("fmla %0.2d, %2.2d, %3.d[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vfms_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
-{
- float32x2_t result;
- __asm__ ("fmls %0.2s,%2.2s,%3.2s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-#define vfmsd_lane_f64(a, b, c) \
- __extension__ \
- ({ \
- float64x2_t b_ = (b); \
- float64_t a_ = (a); \
- float64_t result; \
- __asm__ ("fmls %d0,%d1,%2.d[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vfmsq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
-{
- float32x4_t result;
- __asm__ ("fmls %0.4s,%2.4s,%3.4s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vfmsq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
-{
- float64x2_t result;
- __asm__ ("fmls %0.2d,%2.2d,%3.2d"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-#define vfmss_lane_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x4_t b_ = (b); \
- float32_t a_ = (a); \
- float32_t result; \
- __asm__ ("fmls %s0,%s1,%2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vget_high_f32 (float32x4_t a)
-{
- float32x2_t result;
- __asm__ ("ins %0.d[0], %1.d[1]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
-vget_high_f64 (float64x2_t a)
-{
- float64x1_t result;
- __asm__ ("ins %0.d[0], %1.d[1]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vget_high_p8 (poly8x16_t a)
-{
- poly8x8_t result;
- __asm__ ("ins %0.d[0], %1.d[1]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vget_high_p16 (poly16x8_t a)
-{
- poly16x4_t result;
- __asm__ ("ins %0.d[0], %1.d[1]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vget_high_s8 (int8x16_t a)
-{
- int8x8_t result;
- __asm__ ("ins %0.d[0], %1.d[1]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vget_high_s16 (int16x8_t a)
-{
- int16x4_t result;
- __asm__ ("ins %0.d[0], %1.d[1]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vget_high_s32 (int32x4_t a)
-{
- int32x2_t result;
- __asm__ ("ins %0.d[0], %1.d[1]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vget_high_s64 (int64x2_t a)
-{
- int64x1_t result;
- __asm__ ("ins %0.d[0], %1.d[1]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vget_high_u8 (uint8x16_t a)
-{
- uint8x8_t result;
- __asm__ ("ins %0.d[0], %1.d[1]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vget_high_u16 (uint16x8_t a)
-{
- uint16x4_t result;
- __asm__ ("ins %0.d[0], %1.d[1]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vget_high_u32 (uint32x4_t a)
-{
- uint32x2_t result;
- __asm__ ("ins %0.d[0], %1.d[1]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vget_high_u64 (uint64x2_t a)
-{
- uint64x1_t result;
- __asm__ ("ins %0.d[0], %1.d[1]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-#define vget_lane_f64(a, b) \
- __extension__ \
- ({ \
- float64x1_t a_ = (a); \
- float64_t result; \
- __asm__ ("umov %x0, %1.d[%2]" \
- : "=r"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vget_low_f32 (float32x4_t a)
-{
- float32x2_t result;
- __asm__ ("ins %0.d[0], %1.d[0]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
-vget_low_f64 (float64x2_t a)
-{
- float64x1_t result;
- __asm__ ("ins %0.d[0], %1.d[0]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vget_low_p8 (poly8x16_t a)
-{
- poly8x8_t result;
- __asm__ ("ins %0.d[0], %1.d[0]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vget_low_p16 (poly16x8_t a)
-{
- poly16x4_t result;
- __asm__ ("ins %0.d[0], %1.d[0]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vget_low_s8 (int8x16_t a)
-{
- int8x8_t result;
- __asm__ ("ins %0.d[0], %1.d[0]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vget_low_s16 (int16x8_t a)
-{
- int16x4_t result;
- __asm__ ("ins %0.d[0], %1.d[0]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vget_low_s32 (int32x4_t a)
-{
- int32x2_t result;
- __asm__ ("ins %0.d[0], %1.d[0]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vget_low_s64 (int64x2_t a)
-{
- int64x1_t result;
- __asm__ ("ins %0.d[0], %1.d[0]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vget_low_u8 (uint8x16_t a)
-{
- uint8x8_t result;
- __asm__ ("ins %0.d[0], %1.d[0]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vget_low_u16 (uint16x8_t a)
-{
- uint16x4_t result;
- __asm__ ("ins %0.d[0], %1.d[0]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vget_low_u32 (uint32x4_t a)
-{
- uint32x2_t result;
- __asm__ ("ins %0.d[0], %1.d[0]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vget_low_u64 (uint64x2_t a)
-{
- uint64x1_t result;
- __asm__ ("ins %0.d[0], %1.d[0]"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vhsub_s8 (int8x8_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("shsub %0.8b, %1.8b, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vhsub_s16 (int16x4_t a, int16x4_t b)
-{
- int16x4_t result;
- __asm__ ("shsub %0.4h, %1.4h, %2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vhsub_s32 (int32x2_t a, int32x2_t b)
-{
- int32x2_t result;
- __asm__ ("shsub %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vhsub_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("uhsub %0.8b, %1.8b, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vhsub_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("uhsub %0.4h, %1.4h, %2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vhsub_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("uhsub %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vhsubq_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("shsub %0.16b, %1.16b, %2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vhsubq_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("shsub %0.8h, %1.8h, %2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vhsubq_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("shsub %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vhsubq_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("uhsub %0.16b, %1.16b, %2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vhsubq_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("uhsub %0.8h, %1.8h, %2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vhsubq_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("uhsub %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vld1_dup_f32 (const float32_t * a)
-{
- float32x2_t result;
- __asm__ ("ld1r {%0.2s}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
-vld1_dup_f64 (const float64_t * a)
-{
- float64x1_t result;
- __asm__ ("ld1r {%0.1d}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vld1_dup_p8 (const poly8_t * a)
-{
- poly8x8_t result;
- __asm__ ("ld1r {%0.8b}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vld1_dup_p16 (const poly16_t * a)
-{
- poly16x4_t result;
- __asm__ ("ld1r {%0.4h}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vld1_dup_s8 (const int8_t * a)
-{
- int8x8_t result;
- __asm__ ("ld1r {%0.8b}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vld1_dup_s16 (const int16_t * a)
-{
- int16x4_t result;
- __asm__ ("ld1r {%0.4h}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vld1_dup_s32 (const int32_t * a)
-{
- int32x2_t result;
- __asm__ ("ld1r {%0.2s}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vld1_dup_s64 (const int64_t * a)
-{
- int64x1_t result;
- __asm__ ("ld1r {%0.1d}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vld1_dup_u8 (const uint8_t * a)
-{
- uint8x8_t result;
- __asm__ ("ld1r {%0.8b}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vld1_dup_u16 (const uint16_t * a)
-{
- uint16x4_t result;
- __asm__ ("ld1r {%0.4h}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vld1_dup_u32 (const uint32_t * a)
-{
- uint32x2_t result;
- __asm__ ("ld1r {%0.2s}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vld1_dup_u64 (const uint64_t * a)
-{
- uint64x1_t result;
- __asm__ ("ld1r {%0.1d}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vld1_f32 (const float32_t * a)
-{
- float32x2_t result;
- __asm__ ("ld1 {%0.2s}, %1"
- : "=w"(result)
- : "Utv"(({const float32x2_t *_a = (float32x2_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
-vld1_f64 (const float64_t * a)
-{
- float64x1_t result;
- __asm__ ("ld1 {%0.1d}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-#define vld1_lane_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x2_t b_ = (b); \
- const float32_t * a_ = (a); \
- float32x2_t result; \
- __asm__ ("ld1 {%0.s}[%1], %2" \
- : "=w"(result) \
- : "i" (c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1_lane_f64(a, b, c) \
- __extension__ \
- ({ \
- float64x1_t b_ = (b); \
- const float64_t * a_ = (a); \
- float64x1_t result; \
- __asm__ ("ld1 {%0.d}[%1], %2" \
- : "=w"(result) \
- : "i" (c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1_lane_p8(a, b, c) \
- __extension__ \
- ({ \
- poly8x8_t b_ = (b); \
- const poly8_t * a_ = (a); \
- poly8x8_t result; \
- __asm__ ("ld1 {%0.b}[%1], %2" \
- : "=w"(result) \
- : "i" (c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1_lane_p16(a, b, c) \
- __extension__ \
- ({ \
- poly16x4_t b_ = (b); \
- const poly16_t * a_ = (a); \
- poly16x4_t result; \
- __asm__ ("ld1 {%0.h}[%1], %2" \
- : "=w"(result) \
- : "i" (c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1_lane_s8(a, b, c) \
- __extension__ \
- ({ \
- int8x8_t b_ = (b); \
- const int8_t * a_ = (a); \
- int8x8_t result; \
- __asm__ ("ld1 {%0.b}[%1], %2" \
- : "=w"(result) \
- : "i" (c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1_lane_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x4_t b_ = (b); \
- const int16_t * a_ = (a); \
- int16x4_t result; \
- __asm__ ("ld1 {%0.h}[%1], %2" \
- : "=w"(result) \
- : "i" (c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1_lane_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x2_t b_ = (b); \
- const int32_t * a_ = (a); \
- int32x2_t result; \
- __asm__ ("ld1 {%0.s}[%1], %2" \
- : "=w"(result) \
- : "i" (c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1_lane_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x1_t b_ = (b); \
- const int64_t * a_ = (a); \
- int64x1_t result; \
- __asm__ ("ld1 {%0.d}[%1], %2" \
- : "=w"(result) \
- : "i" (c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1_lane_u8(a, b, c) \
- __extension__ \
- ({ \
- uint8x8_t b_ = (b); \
- const uint8_t * a_ = (a); \
- uint8x8_t result; \
- __asm__ ("ld1 {%0.b}[%1], %2" \
- : "=w"(result) \
- : "i" (c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1_lane_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x4_t b_ = (b); \
- const uint16_t * a_ = (a); \
- uint16x4_t result; \
- __asm__ ("ld1 {%0.h}[%1], %2" \
- : "=w"(result) \
- : "i" (c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1_lane_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x2_t b_ = (b); \
- const uint32_t * a_ = (a); \
- uint32x2_t result; \
- __asm__ ("ld1 {%0.s}[%1], %2" \
- : "=w"(result) \
- : "i" (c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1_lane_u64(a, b, c) \
- __extension__ \
- ({ \
- uint64x1_t b_ = (b); \
- const uint64_t * a_ = (a); \
- uint64x1_t result; \
- __asm__ ("ld1 {%0.d}[%1], %2" \
- : "=w"(result) \
- : "i" (c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vld1_p8 (const poly8_t * a)
-{
- poly8x8_t result;
- __asm__ ("ld1 {%0.8b}, %1"
- : "=w"(result)
- : "Utv"(({const poly8x8_t *_a = (poly8x8_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vld1_p16 (const poly16_t * a)
-{
- poly16x4_t result;
- __asm__ ("ld1 {%0.4h}, %1"
- : "=w"(result)
- : "Utv"(({const poly16x4_t *_a = (poly16x4_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vld1_s8 (const int8_t * a)
-{
- int8x8_t result;
- __asm__ ("ld1 {%0.8b}, %1"
- : "=w"(result)
- : "Utv"(({const int8x8_t *_a = (int8x8_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vld1_s16 (const int16_t * a)
-{
- int16x4_t result;
- __asm__ ("ld1 {%0.4h}, %1"
- : "=w"(result)
- : "Utv"(({const int16x4_t *_a = (int16x4_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vld1_s32 (const int32_t * a)
-{
- int32x2_t result;
- __asm__ ("ld1 {%0.2s}, %1"
- : "=w"(result)
- : "Utv"(({const int32x2_t *_a = (int32x2_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vld1_s64 (const int64_t * a)
-{
- int64x1_t result;
- __asm__ ("ld1 {%0.1d}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vld1_u8 (const uint8_t * a)
-{
- uint8x8_t result;
- __asm__ ("ld1 {%0.8b}, %1"
- : "=w"(result)
- : "Utv"(({const uint8x8_t *_a = (uint8x8_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vld1_u16 (const uint16_t * a)
-{
- uint16x4_t result;
- __asm__ ("ld1 {%0.4h}, %1"
- : "=w"(result)
- : "Utv"(({const uint16x4_t *_a = (uint16x4_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vld1_u32 (const uint32_t * a)
-{
- uint32x2_t result;
- __asm__ ("ld1 {%0.2s}, %1"
- : "=w"(result)
- : "Utv"(({const uint32x2_t *_a = (uint32x2_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vld1_u64 (const uint64_t * a)
-{
- uint64x1_t result;
- __asm__ ("ld1 {%0.1d}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vld1q_dup_f32 (const float32_t * a)
-{
- float32x4_t result;
- __asm__ ("ld1r {%0.4s}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vld1q_dup_f64 (const float64_t * a)
-{
- float64x2_t result;
- __asm__ ("ld1r {%0.2d}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vld1q_dup_p8 (const poly8_t * a)
-{
- poly8x16_t result;
- __asm__ ("ld1r {%0.16b}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vld1q_dup_p16 (const poly16_t * a)
-{
- poly16x8_t result;
- __asm__ ("ld1r {%0.8h}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vld1q_dup_s8 (const int8_t * a)
-{
- int8x16_t result;
- __asm__ ("ld1r {%0.16b}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vld1q_dup_s16 (const int16_t * a)
-{
- int16x8_t result;
- __asm__ ("ld1r {%0.8h}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vld1q_dup_s32 (const int32_t * a)
-{
- int32x4_t result;
- __asm__ ("ld1r {%0.4s}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vld1q_dup_s64 (const int64_t * a)
-{
- int64x2_t result;
- __asm__ ("ld1r {%0.2d}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vld1q_dup_u8 (const uint8_t * a)
-{
- uint8x16_t result;
- __asm__ ("ld1r {%0.16b}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vld1q_dup_u16 (const uint16_t * a)
-{
- uint16x8_t result;
- __asm__ ("ld1r {%0.8h}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vld1q_dup_u32 (const uint32_t * a)
-{
- uint32x4_t result;
- __asm__ ("ld1r {%0.4s}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vld1q_dup_u64 (const uint64_t * a)
-{
- uint64x2_t result;
- __asm__ ("ld1r {%0.2d}, %1"
- : "=w"(result)
- : "Utv"(*a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vld1q_f32 (const float32_t * a)
-{
- float32x4_t result;
- __asm__ ("ld1 {%0.4s}, %1"
- : "=w"(result)
- : "Utv"(({const float32x4_t *_a = (float32x4_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vld1q_f64 (const float64_t * a)
-{
- float64x2_t result;
- __asm__ ("ld1 {%0.2d}, %1"
- : "=w"(result)
- : "Utv"(({const float64x2_t *_a = (float64x2_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-#define vld1q_lane_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x4_t b_ = (b); \
- const float32_t * a_ = (a); \
- float32x4_t result; \
- __asm__ ("ld1 {%0.s}[%1], %2" \
- : "=w"(result) \
- : "i"(c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1q_lane_f64(a, b, c) \
- __extension__ \
- ({ \
- float64x2_t b_ = (b); \
- const float64_t * a_ = (a); \
- float64x2_t result; \
- __asm__ ("ld1 {%0.d}[%1], %2" \
- : "=w"(result) \
- : "i"(c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1q_lane_p8(a, b, c) \
- __extension__ \
- ({ \
- poly8x16_t b_ = (b); \
- const poly8_t * a_ = (a); \
- poly8x16_t result; \
- __asm__ ("ld1 {%0.b}[%1], %2" \
- : "=w"(result) \
- : "i"(c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1q_lane_p16(a, b, c) \
- __extension__ \
- ({ \
- poly16x8_t b_ = (b); \
- const poly16_t * a_ = (a); \
- poly16x8_t result; \
- __asm__ ("ld1 {%0.h}[%1], %2" \
- : "=w"(result) \
- : "i"(c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1q_lane_s8(a, b, c) \
- __extension__ \
- ({ \
- int8x16_t b_ = (b); \
- const int8_t * a_ = (a); \
- int8x16_t result; \
- __asm__ ("ld1 {%0.b}[%1], %2" \
- : "=w"(result) \
- : "i"(c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1q_lane_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- const int16_t * a_ = (a); \
- int16x8_t result; \
- __asm__ ("ld1 {%0.h}[%1], %2" \
- : "=w"(result) \
- : "i"(c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1q_lane_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- const int32_t * a_ = (a); \
- int32x4_t result; \
- __asm__ ("ld1 {%0.s}[%1], %2" \
- : "=w"(result) \
- : "i"(c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1q_lane_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x2_t b_ = (b); \
- const int64_t * a_ = (a); \
- int64x2_t result; \
- __asm__ ("ld1 {%0.d}[%1], %2" \
- : "=w"(result) \
- : "i"(c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1q_lane_u8(a, b, c) \
- __extension__ \
- ({ \
- uint8x16_t b_ = (b); \
- const uint8_t * a_ = (a); \
- uint8x16_t result; \
- __asm__ ("ld1 {%0.b}[%1], %2" \
- : "=w"(result) \
- : "i"(c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1q_lane_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x8_t b_ = (b); \
- const uint16_t * a_ = (a); \
- uint16x8_t result; \
- __asm__ ("ld1 {%0.h}[%1], %2" \
- : "=w"(result) \
- : "i"(c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1q_lane_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x4_t b_ = (b); \
- const uint32_t * a_ = (a); \
- uint32x4_t result; \
- __asm__ ("ld1 {%0.s}[%1], %2" \
- : "=w"(result) \
- : "i"(c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vld1q_lane_u64(a, b, c) \
- __extension__ \
- ({ \
- uint64x2_t b_ = (b); \
- const uint64_t * a_ = (a); \
- uint64x2_t result; \
- __asm__ ("ld1 {%0.d}[%1], %2" \
- : "=w"(result) \
- : "i"(c), "Utv"(*a_), "0"(b_) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vld1q_p8 (const poly8_t * a)
-{
- poly8x16_t result;
- __asm__ ("ld1 {%0.16b}, %1"
- : "=w"(result)
- : "Utv"(({const poly8x16_t *_a = (poly8x16_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vld1q_p16 (const poly16_t * a)
-{
- poly16x8_t result;
- __asm__ ("ld1 {%0.16b}, %1"
- : "=w"(result)
- : "Utv"(({const poly16x8_t *_a = (poly16x8_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vld1q_s8 (const int8_t * a)
-{
- int8x16_t result;
- __asm__ ("ld1 {%0.16b}, %1"
- : "=w"(result)
- : "Utv"(({const int8x16_t *_a = (int8x16_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vld1q_s16 (const int16_t * a)
-{
- int16x8_t result;
- __asm__ ("ld1 {%0.8h}, %1"
- : "=w"(result)
- : "Utv"(({const int16x8_t *_a = (int16x8_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vld1q_s32 (const int32_t * a)
-{
- int32x4_t result;
- __asm__ ("ld1 {%0.4s}, %1"
- : "=w"(result)
- : "Utv"(({const int32x4_t *_a = (int32x4_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vld1q_s64 (const int64_t * a)
-{
- int64x2_t result;
- __asm__ ("ld1 {%0.2d}, %1"
- : "=w"(result)
- : "Utv"(({const int64x2_t *_a = (int64x2_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vld1q_u8 (const uint8_t * a)
-{
- uint8x16_t result;
- __asm__ ("ld1 {%0.16b}, %1"
- : "=w"(result)
- : "Utv"(({const uint8x16_t *_a = (uint8x16_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vld1q_u16 (const uint16_t * a)
-{
- uint16x8_t result;
- __asm__ ("ld1 {%0.8h}, %1"
- : "=w"(result)
- : "Utv"(({const uint16x8_t *_a = (uint16x8_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vld1q_u32 (const uint32_t * a)
-{
- uint32x4_t result;
- __asm__ ("ld1 {%0.4s}, %1"
- : "=w"(result)
- : "Utv"(({const uint32x4_t *_a = (uint32x4_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vld1q_u64 (const uint64_t * a)
-{
- uint64x2_t result;
- __asm__ ("ld1 {%0.2d}, %1"
- : "=w"(result)
- : "Utv"(({const uint64x2_t *_a = (uint64x2_t *) a; *_a;}))
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vmaxnm_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("fmaxnm %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vmaxnmq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("fmaxnm %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vmaxnmq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("fmaxnm %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vmaxnmvq_f32 (float32x4_t a)
-{
- float32_t result;
- __asm__ ("fmaxnmv %s0,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
-vmaxv_s8 (int8x8_t a)
-{
- int8_t result;
- __asm__ ("smaxv %b0,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
-vmaxv_s16 (int16x4_t a)
-{
- int16_t result;
- __asm__ ("smaxv %h0,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
-vmaxv_u8 (uint8x8_t a)
-{
- uint8_t result;
- __asm__ ("umaxv %b0,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
-vmaxv_u16 (uint16x4_t a)
-{
- uint16_t result;
- __asm__ ("umaxv %h0,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vmaxvq_f32 (float32x4_t a)
-{
- float32_t result;
- __asm__ ("fmaxv %s0,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
-vmaxvq_s8 (int8x16_t a)
-{
- int8_t result;
- __asm__ ("smaxv %b0,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
-vmaxvq_s16 (int16x8_t a)
-{
- int16_t result;
- __asm__ ("smaxv %h0,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
-vmaxvq_s32 (int32x4_t a)
-{
- int32_t result;
- __asm__ ("smaxv %s0,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
-vmaxvq_u8 (uint8x16_t a)
-{
- uint8_t result;
- __asm__ ("umaxv %b0,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
-vmaxvq_u16 (uint16x8_t a)
-{
- uint16_t result;
- __asm__ ("umaxv %h0,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
-vmaxvq_u32 (uint32x4_t a)
-{
- uint32_t result;
- __asm__ ("umaxv %s0,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vminnmvq_f32 (float32x4_t a)
-{
- float32_t result;
- __asm__ ("fminnmv %s0,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
-vminv_s8 (int8x8_t a)
-{
- int8_t result;
- __asm__ ("sminv %b0,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
-vminv_s16 (int16x4_t a)
-{
- int16_t result;
- __asm__ ("sminv %h0,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
-vminv_u8 (uint8x8_t a)
-{
- uint8_t result;
- __asm__ ("uminv %b0,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
-vminv_u16 (uint16x4_t a)
-{
- uint16_t result;
- __asm__ ("uminv %h0,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vminvq_f32 (float32x4_t a)
-{
- float32_t result;
- __asm__ ("fminv %s0,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
-vminvq_s8 (int8x16_t a)
-{
- int8_t result;
- __asm__ ("sminv %b0,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
-vminvq_s16 (int16x8_t a)
-{
- int16_t result;
- __asm__ ("sminv %h0,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
-vminvq_s32 (int32x4_t a)
-{
- int32_t result;
- __asm__ ("sminv %s0,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
-vminvq_u8 (uint8x16_t a)
-{
- uint8_t result;
- __asm__ ("uminv %b0,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
-vminvq_u16 (uint16x8_t a)
-{
- uint16_t result;
- __asm__ ("uminv %h0,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
-vminvq_u32 (uint32x4_t a)
-{
- uint32_t result;
- __asm__ ("uminv %s0,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-#define vmla_lane_f32(a, b, c, d) \
- __extension__ \
- ({ \
- float32x2_t c_ = (c); \
- float32x2_t b_ = (b); \
- float32x2_t a_ = (a); \
- float32x2_t result; \
- float32x2_t t1; \
- __asm__ ("fmul %1.2s, %3.2s, %4.s[%5]; fadd %0.2s, %0.2s, %1.2s" \
- : "=w"(result), "=w"(t1) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmla_lane_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x4_t c_ = (c); \
- int16x4_t b_ = (b); \
- int16x4_t a_ = (a); \
- int16x4_t result; \
- __asm__ ("mla %0.4h, %2.4h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmla_lane_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x2_t c_ = (c); \
- int32x2_t b_ = (b); \
- int32x2_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("mla %0.2s, %2.2s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmla_lane_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x4_t c_ = (c); \
- uint16x4_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint16x4_t result; \
- __asm__ ("mla %0.4h, %2.4h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmla_lane_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x2_t c_ = (c); \
- uint32x2_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("mla %0.2s, %2.2s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmla_laneq_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x8_t c_ = (c); \
- int16x4_t b_ = (b); \
- int16x4_t a_ = (a); \
- int16x4_t result; \
- __asm__ ("mla %0.4h, %2.4h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmla_laneq_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x4_t c_ = (c); \
- int32x2_t b_ = (b); \
- int32x2_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("mla %0.2s, %2.2s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmla_laneq_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x8_t c_ = (c); \
- uint16x4_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint16x4_t result; \
- __asm__ ("mla %0.4h, %2.4h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmla_laneq_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x4_t c_ = (c); \
- uint32x2_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("mla %0.2s, %2.2s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vmla_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
-{
- float32x2_t result;
- float32x2_t t1;
- __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s"
- : "=w"(result), "=w"(t1)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmla_n_s16 (int16x4_t a, int16x4_t b, int16_t c)
-{
- int16x4_t result;
- __asm__ ("mla %0.4h,%2.4h,%3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmla_n_s32 (int32x2_t a, int32x2_t b, int32_t c)
-{
- int32x2_t result;
- __asm__ ("mla %0.2s,%2.2s,%3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmla_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c)
-{
- uint16x4_t result;
- __asm__ ("mla %0.4h,%2.4h,%3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmla_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c)
-{
- uint32x2_t result;
- __asm__ ("mla %0.2s,%2.2s,%3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vmla_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
-{
- int8x8_t result;
- __asm__ ("mla %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmla_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
-{
- int16x4_t result;
- __asm__ ("mla %0.4h, %2.4h, %3.4h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmla_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
-{
- int32x2_t result;
- __asm__ ("mla %0.2s, %2.2s, %3.2s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vmla_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
-{
- uint8x8_t result;
- __asm__ ("mla %0.8b, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmla_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
-{
- uint16x4_t result;
- __asm__ ("mla %0.4h, %2.4h, %3.4h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmla_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
-{
- uint32x2_t result;
- __asm__ ("mla %0.2s, %2.2s, %3.2s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-#define vmlal_high_lane_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x8_t c_ = (c); \
- int16x8_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("smlal2 %0.4s, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlal_high_lane_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x4_t c_ = (c); \
- int32x4_t b_ = (b); \
- int64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("smlal2 %0.2d, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlal_high_lane_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x8_t c_ = (c); \
- uint16x8_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("umlal2 %0.4s, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlal_high_lane_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x4_t c_ = (c); \
- uint32x4_t b_ = (b); \
- uint64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("umlal2 %0.2d, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlal_high_laneq_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x8_t c_ = (c); \
- int16x8_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("smlal2 %0.4s, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlal_high_laneq_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x4_t c_ = (c); \
- int32x4_t b_ = (b); \
- int64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("smlal2 %0.2d, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlal_high_laneq_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x8_t c_ = (c); \
- uint16x8_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("umlal2 %0.4s, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlal_high_laneq_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x4_t c_ = (c); \
- uint32x4_t b_ = (b); \
- uint64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("umlal2 %0.2d, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmlal_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c)
-{
- int32x4_t result;
- __asm__ ("smlal2 %0.4s,%2.8h,%3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmlal_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c)
-{
- int64x2_t result;
- __asm__ ("smlal2 %0.2d,%2.4s,%3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmlal_high_n_u16 (uint32x4_t a, uint16x8_t b, uint16_t c)
-{
- uint32x4_t result;
- __asm__ ("umlal2 %0.4s,%2.8h,%3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmlal_high_n_u32 (uint64x2_t a, uint32x4_t b, uint32_t c)
-{
- uint64x2_t result;
- __asm__ ("umlal2 %0.2d,%2.4s,%3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmlal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
-{
- int16x8_t result;
- __asm__ ("smlal2 %0.8h,%2.16b,%3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmlal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
-{
- int32x4_t result;
- __asm__ ("smlal2 %0.4s,%2.8h,%3.8h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmlal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
-{
- int64x2_t result;
- __asm__ ("smlal2 %0.2d,%2.4s,%3.4s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmlal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
-{
- uint16x8_t result;
- __asm__ ("umlal2 %0.8h,%2.16b,%3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmlal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
-{
- uint32x4_t result;
- __asm__ ("umlal2 %0.4s,%2.8h,%3.8h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmlal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
-{
- uint64x2_t result;
- __asm__ ("umlal2 %0.2d,%2.4s,%3.4s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-#define vmlal_lane_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x4_t c_ = (c); \
- int16x4_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("smlal %0.4s,%2.4h,%3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlal_lane_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x2_t c_ = (c); \
- int32x2_t b_ = (b); \
- int64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("smlal %0.2d,%2.2s,%3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlal_lane_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x4_t c_ = (c); \
- uint16x4_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("umlal %0.4s,%2.4h,%3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlal_lane_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x2_t c_ = (c); \
- uint32x2_t b_ = (b); \
- uint64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("umlal %0.2d, %2.2s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlal_laneq_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x8_t c_ = (c); \
- int16x4_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("smlal %0.4s, %2.4h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlal_laneq_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x4_t c_ = (c); \
- int32x2_t b_ = (b); \
- int64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("smlal %0.2d, %2.2s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlal_laneq_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x8_t c_ = (c); \
- uint16x4_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("umlal %0.4s, %2.4h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlal_laneq_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x4_t c_ = (c); \
- uint32x2_t b_ = (b); \
- uint64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("umlal %0.2d, %2.2s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmlal_n_s16 (int32x4_t a, int16x4_t b, int16_t c)
-{
- int32x4_t result;
- __asm__ ("smlal %0.4s,%2.4h,%3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmlal_n_s32 (int64x2_t a, int32x2_t b, int32_t c)
-{
- int64x2_t result;
- __asm__ ("smlal %0.2d,%2.2s,%3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmlal_n_u16 (uint32x4_t a, uint16x4_t b, uint16_t c)
-{
- uint32x4_t result;
- __asm__ ("umlal %0.4s,%2.4h,%3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmlal_n_u32 (uint64x2_t a, uint32x2_t b, uint32_t c)
-{
- uint64x2_t result;
- __asm__ ("umlal %0.2d,%2.2s,%3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmlal_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
-{
- int16x8_t result;
- __asm__ ("smlal %0.8h,%2.8b,%3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmlal_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
-{
- int32x4_t result;
- __asm__ ("smlal %0.4s,%2.4h,%3.4h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmlal_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
-{
- int64x2_t result;
- __asm__ ("smlal %0.2d,%2.2s,%3.2s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmlal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
-{
- uint16x8_t result;
- __asm__ ("umlal %0.8h,%2.8b,%3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmlal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
-{
- uint32x4_t result;
- __asm__ ("umlal %0.4s,%2.4h,%3.4h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmlal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
-{
- uint64x2_t result;
- __asm__ ("umlal %0.2d,%2.2s,%3.2s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-#define vmlaq_lane_f32(a, b, c, d) \
- __extension__ \
- ({ \
- float32x4_t c_ = (c); \
- float32x4_t b_ = (b); \
- float32x4_t a_ = (a); \
- float32x4_t result; \
- float32x4_t t1; \
- __asm__ ("fmul %1.4s, %3.4s, %4.s[%5]; fadd %0.4s, %0.4s, %1.4s" \
- : "=w"(result), "=w"(t1) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlaq_lane_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x8_t c_ = (c); \
- int16x8_t b_ = (b); \
- int16x8_t a_ = (a); \
- int16x8_t result; \
- __asm__ ("mla %0.8h, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlaq_lane_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x4_t c_ = (c); \
- int32x4_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("mla %0.4s, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlaq_lane_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x8_t c_ = (c); \
- uint16x8_t b_ = (b); \
- uint16x8_t a_ = (a); \
- uint16x8_t result; \
- __asm__ ("mla %0.8h, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlaq_lane_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x4_t c_ = (c); \
- uint32x4_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("mla %0.4s, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlaq_laneq_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x8_t c_ = (c); \
- int16x8_t b_ = (b); \
- int16x8_t a_ = (a); \
- int16x8_t result; \
- __asm__ ("mla %0.8h, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlaq_laneq_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x4_t c_ = (c); \
- int32x4_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("mla %0.4s, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlaq_laneq_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x8_t c_ = (c); \
- uint16x8_t b_ = (b); \
- uint16x8_t a_ = (a); \
- uint16x8_t result; \
- __asm__ ("mla %0.8h, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlaq_laneq_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x4_t c_ = (c); \
- uint32x4_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("mla %0.4s, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vmlaq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
-{
- float32x4_t result;
- float32x4_t t1;
- __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s"
- : "=w"(result), "=w"(t1)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vmlaq_n_f64 (float64x2_t a, float64x2_t b, float64_t c)
-{
- float64x2_t result;
- float64x2_t t1;
- __asm__ ("fmul %1.2d, %3.2d, %4.d[0]; fadd %0.2d, %0.2d, %1.2d"
- : "=w"(result), "=w"(t1)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmlaq_n_s16 (int16x8_t a, int16x8_t b, int16_t c)
-{
- int16x8_t result;
- __asm__ ("mla %0.8h,%2.8h,%3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmlaq_n_s32 (int32x4_t a, int32x4_t b, int32_t c)
-{
- int32x4_t result;
- __asm__ ("mla %0.4s,%2.4s,%3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmlaq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c)
-{
- uint16x8_t result;
- __asm__ ("mla %0.8h,%2.8h,%3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmlaq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c)
-{
- uint32x4_t result;
- __asm__ ("mla %0.4s,%2.4s,%3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vmlaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
-{
- int8x16_t result;
- __asm__ ("mla %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmlaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
-{
- int16x8_t result;
- __asm__ ("mla %0.8h, %2.8h, %3.8h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmlaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
-{
- int32x4_t result;
- __asm__ ("mla %0.4s, %2.4s, %3.4s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vmlaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
-{
- uint8x16_t result;
- __asm__ ("mla %0.16b, %2.16b, %3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmlaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
-{
- uint16x8_t result;
- __asm__ ("mla %0.8h, %2.8h, %3.8h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmlaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
-{
- uint32x4_t result;
- __asm__ ("mla %0.4s, %2.4s, %3.4s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-#define vmls_lane_f32(a, b, c, d) \
- __extension__ \
- ({ \
- float32x2_t c_ = (c); \
- float32x2_t b_ = (b); \
- float32x2_t a_ = (a); \
- float32x2_t result; \
- float32x2_t t1; \
- __asm__ ("fmul %1.2s, %3.2s, %4.s[%5]; fsub %0.2s, %0.2s, %1.2s" \
- : "=w"(result), "=w"(t1) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmls_lane_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x4_t c_ = (c); \
- int16x4_t b_ = (b); \
- int16x4_t a_ = (a); \
- int16x4_t result; \
- __asm__ ("mls %0.4h,%2.4h,%3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmls_lane_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x2_t c_ = (c); \
- int32x2_t b_ = (b); \
- int32x2_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("mls %0.2s,%2.2s,%3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmls_lane_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x4_t c_ = (c); \
- uint16x4_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint16x4_t result; \
- __asm__ ("mls %0.4h,%2.4h,%3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmls_lane_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x2_t c_ = (c); \
- uint32x2_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("mls %0.2s,%2.2s,%3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vmls_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
-{
- float32x2_t result;
- float32x2_t t1;
- __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s"
- : "=w"(result), "=w"(t1)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmls_n_s16 (int16x4_t a, int16x4_t b, int16_t c)
-{
- int16x4_t result;
- __asm__ ("mls %0.4h, %2.4h, %3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmls_n_s32 (int32x2_t a, int32x2_t b, int32_t c)
-{
- int32x2_t result;
- __asm__ ("mls %0.2s, %2.2s, %3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmls_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c)
-{
- uint16x4_t result;
- __asm__ ("mls %0.4h, %2.4h, %3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmls_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c)
-{
- uint32x2_t result;
- __asm__ ("mls %0.2s, %2.2s, %3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vmls_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
-{
- int8x8_t result;
- __asm__ ("mls %0.8b,%2.8b,%3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmls_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
-{
- int16x4_t result;
- __asm__ ("mls %0.4h,%2.4h,%3.4h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmls_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
-{
- int32x2_t result;
- __asm__ ("mls %0.2s,%2.2s,%3.2s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vmls_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
-{
- uint8x8_t result;
- __asm__ ("mls %0.8b,%2.8b,%3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmls_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
-{
- uint16x4_t result;
- __asm__ ("mls %0.4h,%2.4h,%3.4h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmls_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
-{
- uint32x2_t result;
- __asm__ ("mls %0.2s,%2.2s,%3.2s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-#define vmlsl_high_lane_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x8_t c_ = (c); \
- int16x8_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsl_high_lane_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x4_t c_ = (c); \
- int32x4_t b_ = (b); \
- int64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsl_high_lane_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x8_t c_ = (c); \
- uint16x8_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsl_high_lane_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x4_t c_ = (c); \
- uint32x4_t b_ = (b); \
- uint64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsl_high_laneq_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x8_t c_ = (c); \
- int16x8_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsl_high_laneq_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x4_t c_ = (c); \
- int32x4_t b_ = (b); \
- int64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsl_high_laneq_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x8_t c_ = (c); \
- uint16x8_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsl_high_laneq_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x4_t c_ = (c); \
- uint32x4_t b_ = (b); \
- uint64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmlsl_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c)
-{
- int32x4_t result;
- __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmlsl_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c)
-{
- int64x2_t result;
- __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmlsl_high_n_u16 (uint32x4_t a, uint16x8_t b, uint16_t c)
-{
- uint32x4_t result;
- __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmlsl_high_n_u32 (uint64x2_t a, uint32x4_t b, uint32_t c)
-{
- uint64x2_t result;
- __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmlsl_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
-{
- int16x8_t result;
- __asm__ ("smlsl2 %0.8h,%2.16b,%3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmlsl_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
-{
- int32x4_t result;
- __asm__ ("smlsl2 %0.4s,%2.8h,%3.8h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmlsl_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
-{
- int64x2_t result;
- __asm__ ("smlsl2 %0.2d,%2.4s,%3.4s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmlsl_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
-{
- uint16x8_t result;
- __asm__ ("umlsl2 %0.8h,%2.16b,%3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmlsl_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
-{
- uint32x4_t result;
- __asm__ ("umlsl2 %0.4s,%2.8h,%3.8h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmlsl_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
-{
- uint64x2_t result;
- __asm__ ("umlsl2 %0.2d,%2.4s,%3.4s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-#define vmlsl_lane_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x4_t c_ = (c); \
- int16x4_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("smlsl %0.4s, %2.4h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsl_lane_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x2_t c_ = (c); \
- int32x2_t b_ = (b); \
- int64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("smlsl %0.2d, %2.2s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsl_lane_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x4_t c_ = (c); \
- uint16x4_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("umlsl %0.4s, %2.4h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsl_lane_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x2_t c_ = (c); \
- uint32x2_t b_ = (b); \
- uint64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("umlsl %0.2d, %2.2s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsl_laneq_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x8_t c_ = (c); \
- int16x4_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("smlsl %0.4s, %2.4h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsl_laneq_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x4_t c_ = (c); \
- int32x2_t b_ = (b); \
- int64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("smlsl %0.2d, %2.2s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsl_laneq_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x8_t c_ = (c); \
- uint16x4_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("umlsl %0.4s, %2.4h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsl_laneq_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x4_t c_ = (c); \
- uint32x2_t b_ = (b); \
- uint64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("umlsl %0.2d, %2.2s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmlsl_n_s16 (int32x4_t a, int16x4_t b, int16_t c)
-{
- int32x4_t result;
- __asm__ ("smlsl %0.4s, %2.4h, %3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmlsl_n_s32 (int64x2_t a, int32x2_t b, int32_t c)
-{
- int64x2_t result;
- __asm__ ("smlsl %0.2d, %2.2s, %3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmlsl_n_u16 (uint32x4_t a, uint16x4_t b, uint16_t c)
-{
- uint32x4_t result;
- __asm__ ("umlsl %0.4s, %2.4h, %3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmlsl_n_u32 (uint64x2_t a, uint32x2_t b, uint32_t c)
-{
- uint64x2_t result;
- __asm__ ("umlsl %0.2d, %2.2s, %3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmlsl_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
-{
- int16x8_t result;
- __asm__ ("smlsl %0.8h, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmlsl_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
-{
- int32x4_t result;
- __asm__ ("smlsl %0.4s, %2.4h, %3.4h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmlsl_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
-{
- int64x2_t result;
- __asm__ ("smlsl %0.2d, %2.2s, %3.2s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmlsl_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
-{
- uint16x8_t result;
- __asm__ ("umlsl %0.8h, %2.8b, %3.8b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmlsl_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
-{
- uint32x4_t result;
- __asm__ ("umlsl %0.4s, %2.4h, %3.4h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmlsl_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
-{
- uint64x2_t result;
- __asm__ ("umlsl %0.2d, %2.2s, %3.2s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-#define vmlsq_lane_f32(a, b, c, d) \
- __extension__ \
- ({ \
- float32x4_t c_ = (c); \
- float32x4_t b_ = (b); \
- float32x4_t a_ = (a); \
- float32x4_t result; \
- float32x4_t t1; \
- __asm__ ("fmul %1.4s, %3.4s, %4.s[%5]; fsub %0.4s, %0.4s, %1.4s" \
- : "=w"(result), "=w"(t1) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsq_lane_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x8_t c_ = (c); \
- int16x8_t b_ = (b); \
- int16x8_t a_ = (a); \
- int16x8_t result; \
- __asm__ ("mls %0.8h,%2.8h,%3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsq_lane_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x4_t c_ = (c); \
- int32x4_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("mls %0.4s,%2.4s,%3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsq_lane_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x8_t c_ = (c); \
- uint16x8_t b_ = (b); \
- uint16x8_t a_ = (a); \
- uint16x8_t result; \
- __asm__ ("mls %0.8h,%2.8h,%3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsq_lane_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x4_t c_ = (c); \
- uint32x4_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("mls %0.4s,%2.4s,%3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmlsq_laneq_f32(__a, __b, __c, __d) \
- __extension__ \
- ({ \
- float32x4_t __c_ = (__c); \
- float32x4_t __b_ = (__b); \
- float32x4_t __a_ = (__a); \
- float32x4_t __result; \
- float32x4_t __t1; \
- __asm__ ("fmul %1.4s, %3.4s, %4.s[%5]; fsub %0.4s, %0.4s, %1.4s" \
- : "=w"(__result), "=w"(__t1) \
- : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d) \
- : /* No clobbers */); \
- __result; \
- })
-
-#define vmlsq_laneq_s16(__a, __b, __c, __d) \
- __extension__ \
- ({ \
- int16x8_t __c_ = (__c); \
- int16x8_t __b_ = (__b); \
- int16x8_t __a_ = (__a); \
- int16x8_t __result; \
- __asm__ ("mls %0.8h, %2.8h, %3.h[%4]" \
- : "=w"(__result) \
- : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d) \
- : /* No clobbers */); \
- __result; \
- })
-
-#define vmlsq_laneq_s32(__a, __b, __c, __d) \
- __extension__ \
- ({ \
- int32x4_t __c_ = (__c); \
- int32x4_t __b_ = (__b); \
- int32x4_t __a_ = (__a); \
- int32x4_t __result; \
- __asm__ ("mls %0.4s, %2.4s, %3.s[%4]" \
- : "=w"(__result) \
- : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d) \
- : /* No clobbers */); \
- __result; \
- })
-
-#define vmlsq_laneq_u16(__a, __b, __c, __d) \
- __extension__ \
- ({ \
- uint16x8_t __c_ = (__c); \
- uint16x8_t __b_ = (__b); \
- uint16x8_t __a_ = (__a); \
- uint16x8_t __result; \
- __asm__ ("mls %0.8h, %2.8h, %3.h[%4]" \
- : "=w"(__result) \
- : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d) \
- : /* No clobbers */); \
- __result; \
- })
-
-#define vmlsq_laneq_u32(__a, __b, __c, __d) \
- __extension__ \
- ({ \
- uint32x4_t __c_ = (__c); \
- uint32x4_t __b_ = (__b); \
- uint32x4_t __a_ = (__a); \
- uint32x4_t __result; \
- __asm__ ("mls %0.4s, %2.4s, %3.s[%4]" \
- : "=w"(__result) \
- : "0"(__a_), "w"(__b_), "w"(__c_), "i"(__d) \
- : /* No clobbers */); \
- __result; \
- })
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vmlsq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
-{
- float32x4_t result;
- float32x4_t t1;
- __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s"
- : "=w"(result), "=w"(t1)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vmlsq_n_f64 (float64x2_t a, float64x2_t b, float64_t c)
-{
- float64x2_t result;
- float64x2_t t1;
- __asm__ ("fmul %1.2d, %3.2d, %4.d[0]; fsub %0.2d, %0.2d, %1.2d"
- : "=w"(result), "=w"(t1)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmlsq_n_s16 (int16x8_t a, int16x8_t b, int16_t c)
-{
- int16x8_t result;
- __asm__ ("mls %0.8h, %2.8h, %3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmlsq_n_s32 (int32x4_t a, int32x4_t b, int32_t c)
-{
- int32x4_t result;
- __asm__ ("mls %0.4s, %2.4s, %3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmlsq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c)
-{
- uint16x8_t result;
- __asm__ ("mls %0.8h, %2.8h, %3.h[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmlsq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c)
-{
- uint32x4_t result;
- __asm__ ("mls %0.4s, %2.4s, %3.s[0]"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vmlsq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
-{
- int8x16_t result;
- __asm__ ("mls %0.16b,%2.16b,%3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmlsq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
-{
- int16x8_t result;
- __asm__ ("mls %0.8h,%2.8h,%3.8h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmlsq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
-{
- int32x4_t result;
- __asm__ ("mls %0.4s,%2.4s,%3.4s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vmlsq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
-{
- uint8x16_t result;
- __asm__ ("mls %0.16b,%2.16b,%3.16b"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmlsq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
-{
- uint16x8_t result;
- __asm__ ("mls %0.8h,%2.8h,%3.8h"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmlsq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
-{
- uint32x4_t result;
- __asm__ ("mls %0.4s,%2.4s,%3.4s"
- : "=w"(result)
- : "0"(a), "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vmov_n_f32 (float32_t a)
-{
- float32x2_t result;
- __asm__ ("dup %0.2s, %w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vmov_n_p8 (uint32_t a)
-{
- poly8x8_t result;
- __asm__ ("dup %0.8b,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vmov_n_p16 (uint32_t a)
-{
- poly16x4_t result;
- __asm__ ("dup %0.4h,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vmov_n_s8 (int32_t a)
-{
- int8x8_t result;
- __asm__ ("dup %0.8b,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmov_n_s16 (int32_t a)
-{
- int16x4_t result;
- __asm__ ("dup %0.4h,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmov_n_s32 (int32_t a)
-{
- int32x2_t result;
- __asm__ ("dup %0.2s,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vmov_n_s64 (int64_t a)
-{
- int64x1_t result;
- __asm__ ("ins %0.d[0],%x1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vmov_n_u8 (uint32_t a)
-{
- uint8x8_t result;
- __asm__ ("dup %0.8b,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmov_n_u16 (uint32_t a)
-{
- uint16x4_t result;
- __asm__ ("dup %0.4h,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmov_n_u32 (uint32_t a)
-{
- uint32x2_t result;
- __asm__ ("dup %0.2s,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vmov_n_u64 (uint64_t a)
-{
- uint64x1_t result;
- __asm__ ("ins %0.d[0],%x1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmovl_high_s8 (int8x16_t a)
-{
- int16x8_t result;
- __asm__ ("sshll2 %0.8h,%1.16b,#0"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmovl_high_s16 (int16x8_t a)
-{
- int32x4_t result;
- __asm__ ("sshll2 %0.4s,%1.8h,#0"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmovl_high_s32 (int32x4_t a)
-{
- int64x2_t result;
- __asm__ ("sshll2 %0.2d,%1.4s,#0"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmovl_high_u8 (uint8x16_t a)
-{
- uint16x8_t result;
- __asm__ ("ushll2 %0.8h,%1.16b,#0"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmovl_high_u16 (uint16x8_t a)
-{
- uint32x4_t result;
- __asm__ ("ushll2 %0.4s,%1.8h,#0"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmovl_high_u32 (uint32x4_t a)
-{
- uint64x2_t result;
- __asm__ ("ushll2 %0.2d,%1.4s,#0"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmovl_s8 (int8x8_t a)
-{
- int16x8_t result;
- __asm__ ("sshll %0.8h,%1.8b,#0"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmovl_s16 (int16x4_t a)
-{
- int32x4_t result;
- __asm__ ("sshll %0.4s,%1.4h,#0"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmovl_s32 (int32x2_t a)
-{
- int64x2_t result;
- __asm__ ("sshll %0.2d,%1.2s,#0"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmovl_u8 (uint8x8_t a)
-{
- uint16x8_t result;
- __asm__ ("ushll %0.8h,%1.8b,#0"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmovl_u16 (uint16x4_t a)
-{
- uint32x4_t result;
- __asm__ ("ushll %0.4s,%1.4h,#0"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmovl_u32 (uint32x2_t a)
-{
- uint64x2_t result;
- __asm__ ("ushll %0.2d,%1.2s,#0"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vmovn_high_s16 (int8x8_t a, int16x8_t b)
-{
- int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
- __asm__ ("xtn2 %0.16b,%1.8h"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmovn_high_s32 (int16x4_t a, int32x4_t b)
-{
- int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
- __asm__ ("xtn2 %0.8h,%1.4s"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmovn_high_s64 (int32x2_t a, int64x2_t b)
-{
- int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
- __asm__ ("xtn2 %0.4s,%1.2d"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vmovn_high_u16 (uint8x8_t a, uint16x8_t b)
-{
- uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
- __asm__ ("xtn2 %0.16b,%1.8h"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmovn_high_u32 (uint16x4_t a, uint32x4_t b)
-{
- uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
- __asm__ ("xtn2 %0.8h,%1.4s"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmovn_high_u64 (uint32x2_t a, uint64x2_t b)
-{
- uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
- __asm__ ("xtn2 %0.4s,%1.2d"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vmovn_s16 (int16x8_t a)
-{
- int8x8_t result;
- __asm__ ("xtn %0.8b,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmovn_s32 (int32x4_t a)
-{
- int16x4_t result;
- __asm__ ("xtn %0.4h,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmovn_s64 (int64x2_t a)
-{
- int32x2_t result;
- __asm__ ("xtn %0.2s,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vmovn_u16 (uint16x8_t a)
-{
- uint8x8_t result;
- __asm__ ("xtn %0.8b,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmovn_u32 (uint32x4_t a)
-{
- uint16x4_t result;
- __asm__ ("xtn %0.4h,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmovn_u64 (uint64x2_t a)
-{
- uint32x2_t result;
- __asm__ ("xtn %0.2s,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vmovq_n_f32 (float32_t a)
-{
- float32x4_t result;
- __asm__ ("dup %0.4s, %w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vmovq_n_f64 (float64_t a)
-{
- return (float64x2_t) {a, a};
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vmovq_n_p8 (uint32_t a)
-{
- poly8x16_t result;
- __asm__ ("dup %0.16b,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vmovq_n_p16 (uint32_t a)
-{
- poly16x8_t result;
- __asm__ ("dup %0.8h,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vmovq_n_s8 (int32_t a)
-{
- int8x16_t result;
- __asm__ ("dup %0.16b,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmovq_n_s16 (int32_t a)
-{
- int16x8_t result;
- __asm__ ("dup %0.8h,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmovq_n_s32 (int32_t a)
-{
- int32x4_t result;
- __asm__ ("dup %0.4s,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmovq_n_s64 (int64_t a)
-{
- int64x2_t result;
- __asm__ ("dup %0.2d,%x1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vmovq_n_u8 (uint32_t a)
-{
- uint8x16_t result;
- __asm__ ("dup %0.16b,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmovq_n_u16 (uint32_t a)
-{
- uint16x8_t result;
- __asm__ ("dup %0.8h,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmovq_n_u32 (uint32_t a)
-{
- uint32x4_t result;
- __asm__ ("dup %0.4s,%w1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmovq_n_u64 (uint64_t a)
-{
- uint64x2_t result;
- __asm__ ("dup %0.2d,%x1"
- : "=w"(result)
- : "r"(a)
- : /* No clobbers */);
- return result;
-}
-
-#define vmul_lane_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x2_t b_ = (b); \
- float32x2_t a_ = (a); \
- float32x2_t result; \
- __asm__ ("fmul %0.2s,%1.2s,%2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmul_lane_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x4_t b_ = (b); \
- int16x4_t a_ = (a); \
- int16x4_t result; \
- __asm__ ("mul %0.4h,%1.4h,%2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmul_lane_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x2_t b_ = (b); \
- int32x2_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("mul %0.2s,%1.2s,%2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmul_lane_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x4_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint16x4_t result; \
- __asm__ ("mul %0.4h,%1.4h,%2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmul_lane_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x2_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("mul %0.2s, %1.2s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmul_laneq_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x4_t b_ = (b); \
- float32x2_t a_ = (a); \
- float32x2_t result; \
- __asm__ ("fmul %0.2s, %1.2s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmul_laneq_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- int16x4_t a_ = (a); \
- int16x4_t result; \
- __asm__ ("mul %0.4h, %1.4h, %2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmul_laneq_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- int32x2_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("mul %0.2s, %1.2s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmul_laneq_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x8_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint16x4_t result; \
- __asm__ ("mul %0.4h, %1.4h, %2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmul_laneq_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x4_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("mul %0.2s, %1.2s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vmul_n_f32 (float32x2_t a, float32_t b)
-{
- float32x2_t result;
- __asm__ ("fmul %0.2s,%1.2s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmul_n_s16 (int16x4_t a, int16_t b)
-{
- int16x4_t result;
- __asm__ ("mul %0.4h,%1.4h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmul_n_s32 (int32x2_t a, int32_t b)
-{
- int32x2_t result;
- __asm__ ("mul %0.2s,%1.2s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmul_n_u16 (uint16x4_t a, uint16_t b)
-{
- uint16x4_t result;
- __asm__ ("mul %0.4h,%1.4h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmul_n_u32 (uint32x2_t a, uint32_t b)
-{
- uint32x2_t result;
- __asm__ ("mul %0.2s,%1.2s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-#define vmuld_lane_f64(a, b, c) \
- __extension__ \
- ({ \
- float64x2_t b_ = (b); \
- float64_t a_ = (a); \
- float64_t result; \
- __asm__ ("fmul %d0,%d1,%2.d[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_high_lane_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- int16x8_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("smull2 %0.4s, %1.8h, %2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_high_lane_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- int32x4_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("smull2 %0.2d, %1.4s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_high_lane_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x8_t b_ = (b); \
- uint16x8_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("umull2 %0.4s, %1.8h, %2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_high_lane_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x4_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("umull2 %0.2d, %1.4s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_high_laneq_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- int16x8_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("smull2 %0.4s, %1.8h, %2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_high_laneq_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- int32x4_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("smull2 %0.2d, %1.4s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_high_laneq_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x8_t b_ = (b); \
- uint16x8_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("umull2 %0.4s, %1.8h, %2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_high_laneq_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x4_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("umull2 %0.2d, %1.4s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmull_high_n_s16 (int16x8_t a, int16_t b)
-{
- int32x4_t result;
- __asm__ ("smull2 %0.4s,%1.8h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmull_high_n_s32 (int32x4_t a, int32_t b)
-{
- int64x2_t result;
- __asm__ ("smull2 %0.2d,%1.4s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmull_high_n_u16 (uint16x8_t a, uint16_t b)
-{
- uint32x4_t result;
- __asm__ ("umull2 %0.4s,%1.8h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmull_high_n_u32 (uint32x4_t a, uint32_t b)
-{
- uint64x2_t result;
- __asm__ ("umull2 %0.2d,%1.4s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vmull_high_p8 (poly8x16_t a, poly8x16_t b)
-{
- poly16x8_t result;
- __asm__ ("pmull2 %0.8h,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmull_high_s8 (int8x16_t a, int8x16_t b)
-{
- int16x8_t result;
- __asm__ ("smull2 %0.8h,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmull_high_s16 (int16x8_t a, int16x8_t b)
-{
- int32x4_t result;
- __asm__ ("smull2 %0.4s,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmull_high_s32 (int32x4_t a, int32x4_t b)
-{
- int64x2_t result;
- __asm__ ("smull2 %0.2d,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmull_high_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint16x8_t result;
- __asm__ ("umull2 %0.8h,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmull_high_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint32x4_t result;
- __asm__ ("umull2 %0.4s,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmull_high_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint64x2_t result;
- __asm__ ("umull2 %0.2d,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-#define vmull_lane_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x4_t b_ = (b); \
- int16x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("smull %0.4s,%1.4h,%2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_lane_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x2_t b_ = (b); \
- int32x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("smull %0.2d,%1.2s,%2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_lane_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x4_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("umull %0.4s,%1.4h,%2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_lane_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x2_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("umull %0.2d, %1.2s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_laneq_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- int16x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("smull %0.4s, %1.4h, %2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_laneq_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- int32x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("smull %0.2d, %1.2s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_laneq_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x8_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("umull %0.4s, %1.4h, %2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmull_laneq_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x4_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("umull %0.2d, %1.2s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmull_n_s16 (int16x4_t a, int16_t b)
-{
- int32x4_t result;
- __asm__ ("smull %0.4s,%1.4h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmull_n_s32 (int32x2_t a, int32_t b)
-{
- int64x2_t result;
- __asm__ ("smull %0.2d,%1.2s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmull_n_u16 (uint16x4_t a, uint16_t b)
-{
- uint32x4_t result;
- __asm__ ("umull %0.4s,%1.4h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmull_n_u32 (uint32x2_t a, uint32_t b)
-{
- uint64x2_t result;
- __asm__ ("umull %0.2d,%1.2s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vmull_p8 (poly8x8_t a, poly8x8_t b)
-{
- poly16x8_t result;
- __asm__ ("pmull %0.8h, %1.8b, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmull_s8 (int8x8_t a, int8x8_t b)
-{
- int16x8_t result;
- __asm__ ("smull %0.8h, %1.8b, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmull_s16 (int16x4_t a, int16x4_t b)
-{
- int32x4_t result;
- __asm__ ("smull %0.4s, %1.4h, %2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vmull_s32 (int32x2_t a, int32x2_t b)
-{
- int64x2_t result;
- __asm__ ("smull %0.2d, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmull_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint16x8_t result;
- __asm__ ("umull %0.8h, %1.8b, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmull_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint32x4_t result;
- __asm__ ("umull %0.4s, %1.4h, %2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vmull_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint64x2_t result;
- __asm__ ("umull %0.2d, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-#define vmulq_lane_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x2_t b_ = (b); \
- float32x4_t a_ = (a); \
- float32x4_t result; \
- __asm__ ("fmul %0.4s, %1.4s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmulq_lane_f64(a, b, c) \
- __extension__ \
- ({ \
- float64x1_t b_ = (b); \
- float64x2_t a_ = (a); \
- float64x2_t result; \
- __asm__ ("fmul %0.2d,%1.2d,%2.d[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmulq_lane_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x4_t b_ = (b); \
- int16x8_t a_ = (a); \
- int16x8_t result; \
- __asm__ ("mul %0.8h,%1.8h,%2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmulq_lane_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x2_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("mul %0.4s,%1.4s,%2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmulq_lane_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x4_t b_ = (b); \
- uint16x8_t a_ = (a); \
- uint16x8_t result; \
- __asm__ ("mul %0.8h,%1.8h,%2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmulq_lane_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x2_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("mul %0.4s, %1.4s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmulq_laneq_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x4_t b_ = (b); \
- float32x4_t a_ = (a); \
- float32x4_t result; \
- __asm__ ("fmul %0.4s, %1.4s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmulq_laneq_f64(a, b, c) \
- __extension__ \
- ({ \
- float64x2_t b_ = (b); \
- float64x2_t a_ = (a); \
- float64x2_t result; \
- __asm__ ("fmul %0.2d,%1.2d,%2.d[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmulq_laneq_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- int16x8_t a_ = (a); \
- int16x8_t result; \
- __asm__ ("mul %0.8h, %1.8h, %2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmulq_laneq_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("mul %0.4s, %1.4s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmulq_laneq_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x8_t b_ = (b); \
- uint16x8_t a_ = (a); \
- uint16x8_t result; \
- __asm__ ("mul %0.8h, %1.8h, %2.h[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmulq_laneq_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x4_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("mul %0.4s, %1.4s, %2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vmulq_n_f32 (float32x4_t a, float32_t b)
-{
- float32x4_t result;
- __asm__ ("fmul %0.4s,%1.4s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vmulq_n_f64 (float64x2_t a, float64_t b)
-{
- float64x2_t result;
- __asm__ ("fmul %0.2d,%1.2d,%2.d[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmulq_n_s16 (int16x8_t a, int16_t b)
-{
- int16x8_t result;
- __asm__ ("mul %0.8h,%1.8h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmulq_n_s32 (int32x4_t a, int32_t b)
-{
- int32x4_t result;
- __asm__ ("mul %0.4s,%1.4s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmulq_n_u16 (uint16x8_t a, uint16_t b)
-{
- uint16x8_t result;
- __asm__ ("mul %0.8h,%1.8h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmulq_n_u32 (uint32x4_t a, uint32_t b)
-{
- uint32x4_t result;
- __asm__ ("mul %0.4s,%1.4s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-#define vmuls_lane_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x4_t b_ = (b); \
- float32_t a_ = (a); \
- float32_t result; \
- __asm__ ("fmul %s0,%s1,%2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vmulx_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("fmulx %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-#define vmulx_lane_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x4_t b_ = (b); \
- float32x2_t a_ = (a); \
- float32x2_t result; \
- __asm__ ("fmulx %0.2s,%1.2s,%2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vmulxd_f64 (float64_t a, float64_t b)
-{
- float64_t result;
- __asm__ ("fmulx %d0, %d1, %d2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vmulxq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("fmulx %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vmulxq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("fmulx %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-#define vmulxq_lane_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x4_t b_ = (b); \
- float32x4_t a_ = (a); \
- float32x4_t result; \
- __asm__ ("fmulx %0.4s,%1.4s,%2.s[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vmulxq_lane_f64(a, b, c) \
- __extension__ \
- ({ \
- float64x2_t b_ = (b); \
- float64x2_t a_ = (a); \
- float64x2_t result; \
- __asm__ ("fmulx %0.2d,%1.2d,%2.d[%3]" \
- : "=w"(result) \
- : "w"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vmulxs_f32 (float32_t a, float32_t b)
-{
- float32_t result;
- __asm__ ("fmulx %s0, %s1, %s2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vmvn_p8 (poly8x8_t a)
-{
- poly8x8_t result;
- __asm__ ("mvn %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vmvn_s8 (int8x8_t a)
-{
- int8x8_t result;
- __asm__ ("mvn %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmvn_s16 (int16x4_t a)
-{
- int16x4_t result;
- __asm__ ("mvn %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmvn_s32 (int32x2_t a)
-{
- int32x2_t result;
- __asm__ ("mvn %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vmvn_u8 (uint8x8_t a)
-{
- uint8x8_t result;
- __asm__ ("mvn %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmvn_u16 (uint16x4_t a)
-{
- uint16x4_t result;
- __asm__ ("mvn %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmvn_u32 (uint32x2_t a)
-{
- uint32x2_t result;
- __asm__ ("mvn %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vmvnq_p8 (poly8x16_t a)
-{
- poly8x16_t result;
- __asm__ ("mvn %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vmvnq_s8 (int8x16_t a)
-{
- int8x16_t result;
- __asm__ ("mvn %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmvnq_s16 (int16x8_t a)
-{
- int16x8_t result;
- __asm__ ("mvn %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmvnq_s32 (int32x4_t a)
-{
- int32x4_t result;
- __asm__ ("mvn %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vmvnq_u8 (uint8x16_t a)
-{
- uint8x16_t result;
- __asm__ ("mvn %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmvnq_u16 (uint16x8_t a)
-{
- uint16x8_t result;
- __asm__ ("mvn %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmvnq_u32 (uint32x4_t a)
-{
- uint32x4_t result;
- __asm__ ("mvn %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vneg_f32 (float32x2_t a)
-{
- float32x2_t result;
- __asm__ ("fneg %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vneg_s8 (int8x8_t a)
-{
- int8x8_t result;
- __asm__ ("neg %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vneg_s16 (int16x4_t a)
-{
- int16x4_t result;
- __asm__ ("neg %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vneg_s32 (int32x2_t a)
-{
- int32x2_t result;
- __asm__ ("neg %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vnegq_f32 (float32x4_t a)
-{
- float32x4_t result;
- __asm__ ("fneg %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vnegq_f64 (float64x2_t a)
-{
- float64x2_t result;
- __asm__ ("fneg %0.2d,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vnegq_s8 (int8x16_t a)
-{
- int8x16_t result;
- __asm__ ("neg %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vnegq_s16 (int16x8_t a)
-{
- int16x8_t result;
- __asm__ ("neg %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vnegq_s32 (int32x4_t a)
-{
- int32x4_t result;
- __asm__ ("neg %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vnegq_s64 (int64x2_t a)
-{
- int64x2_t result;
- __asm__ ("neg %0.2d,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vpadal_s8 (int16x4_t a, int8x8_t b)
-{
- int16x4_t result;
- __asm__ ("sadalp %0.4h,%2.8b"
- : "=w"(result)
- : "0"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vpadal_s16 (int32x2_t a, int16x4_t b)
-{
- int32x2_t result;
- __asm__ ("sadalp %0.2s,%2.4h"
- : "=w"(result)
- : "0"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vpadal_s32 (int64x1_t a, int32x2_t b)
-{
- int64x1_t result;
- __asm__ ("sadalp %0.1d,%2.2s"
- : "=w"(result)
- : "0"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vpadal_u8 (uint16x4_t a, uint8x8_t b)
-{
- uint16x4_t result;
- __asm__ ("uadalp %0.4h,%2.8b"
- : "=w"(result)
- : "0"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vpadal_u16 (uint32x2_t a, uint16x4_t b)
-{
- uint32x2_t result;
- __asm__ ("uadalp %0.2s,%2.4h"
- : "=w"(result)
- : "0"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vpadal_u32 (uint64x1_t a, uint32x2_t b)
-{
- uint64x1_t result;
- __asm__ ("uadalp %0.1d,%2.2s"
- : "=w"(result)
- : "0"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vpadalq_s8 (int16x8_t a, int8x16_t b)
-{
- int16x8_t result;
- __asm__ ("sadalp %0.8h,%2.16b"
- : "=w"(result)
- : "0"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vpadalq_s16 (int32x4_t a, int16x8_t b)
-{
- int32x4_t result;
- __asm__ ("sadalp %0.4s,%2.8h"
- : "=w"(result)
- : "0"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vpadalq_s32 (int64x2_t a, int32x4_t b)
-{
- int64x2_t result;
- __asm__ ("sadalp %0.2d,%2.4s"
- : "=w"(result)
- : "0"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vpadalq_u8 (uint16x8_t a, uint8x16_t b)
-{
- uint16x8_t result;
- __asm__ ("uadalp %0.8h,%2.16b"
- : "=w"(result)
- : "0"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vpadalq_u16 (uint32x4_t a, uint16x8_t b)
-{
- uint32x4_t result;
- __asm__ ("uadalp %0.4s,%2.8h"
- : "=w"(result)
- : "0"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vpadalq_u32 (uint64x2_t a, uint32x4_t b)
-{
- uint64x2_t result;
- __asm__ ("uadalp %0.2d,%2.4s"
- : "=w"(result)
- : "0"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vpadd_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("faddp %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vpadd_s8 (int8x8_t __a, int8x8_t __b)
-{
- return __builtin_aarch64_addpv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vpadd_s16 (int16x4_t __a, int16x4_t __b)
-{
- return __builtin_aarch64_addpv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vpadd_s32 (int32x2_t __a, int32x2_t __b)
-{
- return __builtin_aarch64_addpv2si (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vpadd_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_addpv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vpadd_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_addpv4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vpadd_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_addpv2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vpaddd_f64 (float64x2_t a)
-{
- float64_t result;
- __asm__ ("faddp %d0,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vpaddl_s8 (int8x8_t a)
-{
- int16x4_t result;
- __asm__ ("saddlp %0.4h,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vpaddl_s16 (int16x4_t a)
-{
- int32x2_t result;
- __asm__ ("saddlp %0.2s,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vpaddl_s32 (int32x2_t a)
-{
- int64x1_t result;
- __asm__ ("saddlp %0.1d,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vpaddl_u8 (uint8x8_t a)
-{
- uint16x4_t result;
- __asm__ ("uaddlp %0.4h,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vpaddl_u16 (uint16x4_t a)
-{
- uint32x2_t result;
- __asm__ ("uaddlp %0.2s,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vpaddl_u32 (uint32x2_t a)
-{
- uint64x1_t result;
- __asm__ ("uaddlp %0.1d,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vpaddlq_s8 (int8x16_t a)
-{
- int16x8_t result;
- __asm__ ("saddlp %0.8h,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vpaddlq_s16 (int16x8_t a)
-{
- int32x4_t result;
- __asm__ ("saddlp %0.4s,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vpaddlq_s32 (int32x4_t a)
-{
- int64x2_t result;
- __asm__ ("saddlp %0.2d,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vpaddlq_u8 (uint8x16_t a)
-{
- uint16x8_t result;
- __asm__ ("uaddlp %0.8h,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vpaddlq_u16 (uint16x8_t a)
-{
- uint32x4_t result;
- __asm__ ("uaddlp %0.4s,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vpaddlq_u32 (uint32x4_t a)
-{
- uint64x2_t result;
- __asm__ ("uaddlp %0.2d,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vpaddq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("faddp %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vpaddq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("faddp %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vpaddq_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("addp %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vpaddq_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("addp %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vpaddq_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("addp %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vpaddq_s64 (int64x2_t a, int64x2_t b)
-{
- int64x2_t result;
- __asm__ ("addp %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vpaddq_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("addp %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vpaddq_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("addp %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vpaddq_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("addp %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vpaddq_u64 (uint64x2_t a, uint64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("addp %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vpadds_f32 (float32x2_t a)
-{
- float32_t result;
- __asm__ ("faddp %s0,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vpmax_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("fmaxp %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vpmax_s8 (int8x8_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("smaxp %0.8b, %1.8b, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vpmax_s16 (int16x4_t a, int16x4_t b)
-{
- int16x4_t result;
- __asm__ ("smaxp %0.4h, %1.4h, %2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vpmax_s32 (int32x2_t a, int32x2_t b)
-{
- int32x2_t result;
- __asm__ ("smaxp %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vpmax_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("umaxp %0.8b, %1.8b, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vpmax_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("umaxp %0.4h, %1.4h, %2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vpmax_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("umaxp %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vpmaxnm_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("fmaxnmp %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vpmaxnmq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("fmaxnmp %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vpmaxnmq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("fmaxnmp %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vpmaxnmqd_f64 (float64x2_t a)
-{
- float64_t result;
- __asm__ ("fmaxnmp %d0,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vpmaxnms_f32 (float32x2_t a)
-{
- float32_t result;
- __asm__ ("fmaxnmp %s0,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vpmaxq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("fmaxp %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vpmaxq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("fmaxp %0.2d, %1.2d, %2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vpmaxq_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("smaxp %0.16b, %1.16b, %2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vpmaxq_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("smaxp %0.8h, %1.8h, %2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vpmaxq_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("smaxp %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vpmaxq_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("umaxp %0.16b, %1.16b, %2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vpmaxq_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("umaxp %0.8h, %1.8h, %2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vpmaxq_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("umaxp %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vpmaxqd_f64 (float64x2_t a)
-{
- float64_t result;
- __asm__ ("fmaxp %d0,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vpmaxs_f32 (float32x2_t a)
-{
- float32_t result;
- __asm__ ("fmaxp %s0,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vpmin_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("fminp %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vpmin_s8 (int8x8_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("sminp %0.8b, %1.8b, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vpmin_s16 (int16x4_t a, int16x4_t b)
-{
- int16x4_t result;
- __asm__ ("sminp %0.4h, %1.4h, %2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vpmin_s32 (int32x2_t a, int32x2_t b)
-{
- int32x2_t result;
- __asm__ ("sminp %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vpmin_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("uminp %0.8b, %1.8b, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vpmin_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("uminp %0.4h, %1.4h, %2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vpmin_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("uminp %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vpminnm_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("fminnmp %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vpminnmq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("fminnmp %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vpminnmq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("fminnmp %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vpminnmqd_f64 (float64x2_t a)
-{
- float64_t result;
- __asm__ ("fminnmp %d0,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vpminnms_f32 (float32x2_t a)
-{
- float32_t result;
- __asm__ ("fminnmp %s0,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vpminq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("fminp %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vpminq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("fminp %0.2d, %1.2d, %2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vpminq_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("sminp %0.16b, %1.16b, %2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vpminq_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("sminp %0.8h, %1.8h, %2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vpminq_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("sminp %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vpminq_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("uminp %0.16b, %1.16b, %2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vpminq_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("uminp %0.8h, %1.8h, %2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vpminq_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("uminp %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vpminqd_f64 (float64x2_t a)
-{
- float64_t result;
- __asm__ ("fminp %d0,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vpmins_f32 (float32x2_t a)
-{
- float32_t result;
- __asm__ ("fminp %s0,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqdmulh_n_s16 (int16x4_t a, int16_t b)
-{
- int16x4_t result;
- __asm__ ("sqdmulh %0.4h,%1.4h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqdmulh_n_s32 (int32x2_t a, int32_t b)
-{
- int32x2_t result;
- __asm__ ("sqdmulh %0.2s,%1.2s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqdmulhq_n_s16 (int16x8_t a, int16_t b)
-{
- int16x8_t result;
- __asm__ ("sqdmulh %0.8h,%1.8h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmulhq_n_s32 (int32x4_t a, int32_t b)
-{
- int32x4_t result;
- __asm__ ("sqdmulh %0.4s,%1.4s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqmovn_high_s16 (int8x8_t a, int16x8_t b)
-{
- int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
- __asm__ ("sqxtn2 %0.16b, %1.8h"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqmovn_high_s32 (int16x4_t a, int32x4_t b)
-{
- int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
- __asm__ ("sqxtn2 %0.8h, %1.4s"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqmovn_high_s64 (int32x2_t a, int64x2_t b)
-{
- int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
- __asm__ ("sqxtn2 %0.4s, %1.2d"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqmovn_high_u16 (uint8x8_t a, uint16x8_t b)
-{
- uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
- __asm__ ("uqxtn2 %0.16b, %1.8h"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vqmovn_high_u32 (uint16x4_t a, uint32x4_t b)
-{
- uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
- __asm__ ("uqxtn2 %0.8h, %1.4s"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vqmovn_high_u64 (uint32x2_t a, uint64x2_t b)
-{
- uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
- __asm__ ("uqxtn2 %0.4s, %1.2d"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqmovun_high_s16 (uint8x8_t a, int16x8_t b)
-{
- uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
- __asm__ ("sqxtun2 %0.16b, %1.8h"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vqmovun_high_s32 (uint16x4_t a, int32x4_t b)
-{
- uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
- __asm__ ("sqxtun2 %0.8h, %1.4s"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vqmovun_high_s64 (uint32x2_t a, int64x2_t b)
-{
- uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
- __asm__ ("sqxtun2 %0.4s, %1.2d"
- : "+w"(result)
- : "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqrdmulh_n_s16 (int16x4_t a, int16_t b)
-{
- int16x4_t result;
- __asm__ ("sqrdmulh %0.4h,%1.4h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqrdmulh_n_s32 (int32x2_t a, int32_t b)
-{
- int32x2_t result;
- __asm__ ("sqrdmulh %0.2s,%1.2s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqrdmulhq_n_s16 (int16x8_t a, int16_t b)
-{
- int16x8_t result;
- __asm__ ("sqrdmulh %0.8h,%1.8h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqrdmulhq_n_s32 (int32x4_t a, int32_t b)
-{
- int32x4_t result;
- __asm__ ("sqrdmulh %0.4s,%1.4s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-#define vqrshrn_high_n_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- int8x8_t a_ = (a); \
- int8x16_t result = vcombine_s8 \
- (a_, vcreate_s8 (UINT64_C (0x0))); \
- __asm__ ("sqrshrn2 %0.16b, %1.8h, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqrshrn_high_n_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- int16x4_t a_ = (a); \
- int16x8_t result = vcombine_s16 \
- (a_, vcreate_s16 (UINT64_C (0x0))); \
- __asm__ ("sqrshrn2 %0.8h, %1.4s, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqrshrn_high_n_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x2_t b_ = (b); \
- int32x2_t a_ = (a); \
- int32x4_t result = vcombine_s32 \
- (a_, vcreate_s32 (UINT64_C (0x0))); \
- __asm__ ("sqrshrn2 %0.4s, %1.2d, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqrshrn_high_n_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x8_t b_ = (b); \
- uint8x8_t a_ = (a); \
- uint8x16_t result = vcombine_u8 \
- (a_, vcreate_u8 (UINT64_C (0x0))); \
- __asm__ ("uqrshrn2 %0.16b, %1.8h, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqrshrn_high_n_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x4_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint16x8_t result = vcombine_u16 \
- (a_, vcreate_u16 (UINT64_C (0x0))); \
- __asm__ ("uqrshrn2 %0.8h, %1.4s, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqrshrn_high_n_u64(a, b, c) \
- __extension__ \
- ({ \
- uint64x2_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint32x4_t result = vcombine_u32 \
- (a_, vcreate_u32 (UINT64_C (0x0))); \
- __asm__ ("uqrshrn2 %0.4s, %1.2d, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqrshrun_high_n_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- uint8x8_t a_ = (a); \
- uint8x16_t result = vcombine_u8 \
- (a_, vcreate_u8 (UINT64_C (0x0))); \
- __asm__ ("sqrshrun2 %0.16b, %1.8h, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqrshrun_high_n_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint16x8_t result = vcombine_u16 \
- (a_, vcreate_u16 (UINT64_C (0x0))); \
- __asm__ ("sqrshrun2 %0.8h, %1.4s, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqrshrun_high_n_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x2_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint32x4_t result = vcombine_u32 \
- (a_, vcreate_u32 (UINT64_C (0x0))); \
- __asm__ ("sqrshrun2 %0.4s, %1.2d, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqshrn_high_n_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- int8x8_t a_ = (a); \
- int8x16_t result = vcombine_s8 \
- (a_, vcreate_s8 (UINT64_C (0x0))); \
- __asm__ ("sqshrn2 %0.16b, %1.8h, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqshrn_high_n_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- int16x4_t a_ = (a); \
- int16x8_t result = vcombine_s16 \
- (a_, vcreate_s16 (UINT64_C (0x0))); \
- __asm__ ("sqshrn2 %0.8h, %1.4s, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqshrn_high_n_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x2_t b_ = (b); \
- int32x2_t a_ = (a); \
- int32x4_t result = vcombine_s32 \
- (a_, vcreate_s32 (UINT64_C (0x0))); \
- __asm__ ("sqshrn2 %0.4s, %1.2d, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqshrn_high_n_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x8_t b_ = (b); \
- uint8x8_t a_ = (a); \
- uint8x16_t result = vcombine_u8 \
- (a_, vcreate_u8 (UINT64_C (0x0))); \
- __asm__ ("uqshrn2 %0.16b, %1.8h, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqshrn_high_n_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x4_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint16x8_t result = vcombine_u16 \
- (a_, vcreate_u16 (UINT64_C (0x0))); \
- __asm__ ("uqshrn2 %0.8h, %1.4s, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqshrn_high_n_u64(a, b, c) \
- __extension__ \
- ({ \
- uint64x2_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint32x4_t result = vcombine_u32 \
- (a_, vcreate_u32 (UINT64_C (0x0))); \
- __asm__ ("uqshrn2 %0.4s, %1.2d, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqshrun_high_n_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- uint8x8_t a_ = (a); \
- uint8x16_t result = vcombine_u8 \
- (a_, vcreate_u8 (UINT64_C (0x0))); \
- __asm__ ("sqshrun2 %0.16b, %1.8h, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqshrun_high_n_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint16x8_t result = vcombine_u16 \
- (a_, vcreate_u16 (UINT64_C (0x0))); \
- __asm__ ("sqshrun2 %0.8h, %1.4s, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vqshrun_high_n_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x2_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint32x4_t result = vcombine_u32 \
- (a_, vcreate_u32 (UINT64_C (0x0))); \
- __asm__ ("sqshrun2 %0.4s, %1.2d, #%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vrbit_s8 (int8x8_t a)
-{
- int8x8_t result;
- __asm__ ("rbit %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vrbit_u8 (uint8x8_t a)
-{
- uint8x8_t result;
- __asm__ ("rbit %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vrbitq_s8 (int8x16_t a)
-{
- int8x16_t result;
- __asm__ ("rbit %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vrbitq_u8 (uint8x16_t a)
-{
- uint8x16_t result;
- __asm__ ("rbit %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vrecpe_f32 (float32x2_t a)
-{
- float32x2_t result;
- __asm__ ("frecpe %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vrecpe_u32 (uint32x2_t a)
-{
- uint32x2_t result;
- __asm__ ("urecpe %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vrecped_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("frecpe %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vrecpeq_f32 (float32x4_t a)
-{
- float32x4_t result;
- __asm__ ("frecpe %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vrecpeq_f64 (float64x2_t a)
-{
- float64x2_t result;
- __asm__ ("frecpe %0.2d,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vrecpeq_u32 (uint32x4_t a)
-{
- uint32x4_t result;
- __asm__ ("urecpe %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vrecpes_f32 (float32_t a)
-{
- float32_t result;
- __asm__ ("frecpe %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vrecps_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("frecps %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vrecpsd_f64 (float64_t a, float64_t b)
-{
- float64_t result;
- __asm__ ("frecps %d0,%d1,%d2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vrecpsq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("frecps %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vrecpsq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("frecps %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vrecpss_f32 (float32_t a, float32_t b)
-{
- float32_t result;
- __asm__ ("frecps %s0,%s1,%s2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vrecpxd_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("frecpe %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vrecpxs_f32 (float32_t a)
-{
- float32_t result;
- __asm__ ("frecpe %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vrev16_p8 (poly8x8_t a)
-{
- poly8x8_t result;
- __asm__ ("rev16 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vrev16_s8 (int8x8_t a)
-{
- int8x8_t result;
- __asm__ ("rev16 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vrev16_u8 (uint8x8_t a)
-{
- uint8x8_t result;
- __asm__ ("rev16 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vrev16q_p8 (poly8x16_t a)
-{
- poly8x16_t result;
- __asm__ ("rev16 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vrev16q_s8 (int8x16_t a)
-{
- int8x16_t result;
- __asm__ ("rev16 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vrev16q_u8 (uint8x16_t a)
-{
- uint8x16_t result;
- __asm__ ("rev16 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vrev32_p8 (poly8x8_t a)
-{
- poly8x8_t result;
- __asm__ ("rev32 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vrev32_p16 (poly16x4_t a)
-{
- poly16x4_t result;
- __asm__ ("rev32 %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vrev32_s8 (int8x8_t a)
-{
- int8x8_t result;
- __asm__ ("rev32 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vrev32_s16 (int16x4_t a)
-{
- int16x4_t result;
- __asm__ ("rev32 %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vrev32_u8 (uint8x8_t a)
-{
- uint8x8_t result;
- __asm__ ("rev32 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vrev32_u16 (uint16x4_t a)
-{
- uint16x4_t result;
- __asm__ ("rev32 %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vrev32q_p8 (poly8x16_t a)
-{
- poly8x16_t result;
- __asm__ ("rev32 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vrev32q_p16 (poly16x8_t a)
-{
- poly16x8_t result;
- __asm__ ("rev32 %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vrev32q_s8 (int8x16_t a)
-{
- int8x16_t result;
- __asm__ ("rev32 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vrev32q_s16 (int16x8_t a)
-{
- int16x8_t result;
- __asm__ ("rev32 %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vrev32q_u8 (uint8x16_t a)
-{
- uint8x16_t result;
- __asm__ ("rev32 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vrev32q_u16 (uint16x8_t a)
-{
- uint16x8_t result;
- __asm__ ("rev32 %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vrev64_f32 (float32x2_t a)
-{
- float32x2_t result;
- __asm__ ("rev64 %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vrev64_p8 (poly8x8_t a)
-{
- poly8x8_t result;
- __asm__ ("rev64 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vrev64_p16 (poly16x4_t a)
-{
- poly16x4_t result;
- __asm__ ("rev64 %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vrev64_s8 (int8x8_t a)
-{
- int8x8_t result;
- __asm__ ("rev64 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vrev64_s16 (int16x4_t a)
-{
- int16x4_t result;
- __asm__ ("rev64 %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vrev64_s32 (int32x2_t a)
-{
- int32x2_t result;
- __asm__ ("rev64 %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vrev64_u8 (uint8x8_t a)
-{
- uint8x8_t result;
- __asm__ ("rev64 %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vrev64_u16 (uint16x4_t a)
-{
- uint16x4_t result;
- __asm__ ("rev64 %0.4h,%1.4h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vrev64_u32 (uint32x2_t a)
-{
- uint32x2_t result;
- __asm__ ("rev64 %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vrev64q_f32 (float32x4_t a)
-{
- float32x4_t result;
- __asm__ ("rev64 %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vrev64q_p8 (poly8x16_t a)
-{
- poly8x16_t result;
- __asm__ ("rev64 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vrev64q_p16 (poly16x8_t a)
-{
- poly16x8_t result;
- __asm__ ("rev64 %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vrev64q_s8 (int8x16_t a)
-{
- int8x16_t result;
- __asm__ ("rev64 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vrev64q_s16 (int16x8_t a)
-{
- int16x8_t result;
- __asm__ ("rev64 %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vrev64q_s32 (int32x4_t a)
-{
- int32x4_t result;
- __asm__ ("rev64 %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vrev64q_u8 (uint8x16_t a)
-{
- uint8x16_t result;
- __asm__ ("rev64 %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vrev64q_u16 (uint16x8_t a)
-{
- uint16x8_t result;
- __asm__ ("rev64 %0.8h,%1.8h"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vrev64q_u32 (uint32x4_t a)
-{
- uint32x4_t result;
- __asm__ ("rev64 %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vrnd_f32 (float32x2_t a)
-{
- float32x2_t result;
- __asm__ ("frintz %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vrnda_f32 (float32x2_t a)
-{
- float32x2_t result;
- __asm__ ("frinta %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vrndm_f32 (float32x2_t a)
-{
- float32x2_t result;
- __asm__ ("frintm %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vrndn_f32 (float32x2_t a)
-{
- float32x2_t result;
- __asm__ ("frintn %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vrndp_f32 (float32x2_t a)
-{
- float32x2_t result;
- __asm__ ("frintp %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vrndq_f32 (float32x4_t a)
-{
- float32x4_t result;
- __asm__ ("frintz %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vrndq_f64 (float64x2_t a)
-{
- float64x2_t result;
- __asm__ ("frintz %0.2d,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vrndqa_f32 (float32x4_t a)
-{
- float32x4_t result;
- __asm__ ("frinta %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vrndqa_f64 (float64x2_t a)
-{
- float64x2_t result;
- __asm__ ("frinta %0.2d,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vrndqm_f32 (float32x4_t a)
-{
- float32x4_t result;
- __asm__ ("frintm %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vrndqm_f64 (float64x2_t a)
-{
- float64x2_t result;
- __asm__ ("frintm %0.2d,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vrndqn_f32 (float32x4_t a)
-{
- float32x4_t result;
- __asm__ ("frintn %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vrndqn_f64 (float64x2_t a)
-{
- float64x2_t result;
- __asm__ ("frintn %0.2d,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vrndqp_f32 (float32x4_t a)
-{
- float32x4_t result;
- __asm__ ("frintp %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vrndqp_f64 (float64x2_t a)
-{
- float64x2_t result;
- __asm__ ("frintp %0.2d,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-#define vrshrn_high_n_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- int8x8_t a_ = (a); \
- int8x16_t result = vcombine_s8 \
- (a_, vcreate_s8 (UINT64_C (0x0))); \
- __asm__ ("rshrn2 %0.16b,%1.8h,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vrshrn_high_n_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- int16x4_t a_ = (a); \
- int16x8_t result = vcombine_s16 \
- (a_, vcreate_s16 (UINT64_C (0x0))); \
- __asm__ ("rshrn2 %0.8h,%1.4s,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vrshrn_high_n_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x2_t b_ = (b); \
- int32x2_t a_ = (a); \
- int32x4_t result = vcombine_s32 \
- (a_, vcreate_s32 (UINT64_C (0x0))); \
- __asm__ ("rshrn2 %0.4s,%1.2d,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vrshrn_high_n_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x8_t b_ = (b); \
- uint8x8_t a_ = (a); \
- uint8x16_t result = vcombine_u8 \
- (a_, vcreate_u8 (UINT64_C (0x0))); \
- __asm__ ("rshrn2 %0.16b,%1.8h,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vrshrn_high_n_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x4_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint16x8_t result = vcombine_u16 \
- (a_, vcreate_u16 (UINT64_C (0x0))); \
- __asm__ ("rshrn2 %0.8h,%1.4s,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vrshrn_high_n_u64(a, b, c) \
- __extension__ \
- ({ \
- uint64x2_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint32x4_t result = vcombine_u32 \
- (a_, vcreate_u32 (UINT64_C (0x0))); \
- __asm__ ("rshrn2 %0.4s,%1.2d,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vrshrn_n_s16(a, b) \
- __extension__ \
- ({ \
- int16x8_t a_ = (a); \
- int8x8_t result; \
- __asm__ ("rshrn %0.8b,%1.8h,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vrshrn_n_s32(a, b) \
- __extension__ \
- ({ \
- int32x4_t a_ = (a); \
- int16x4_t result; \
- __asm__ ("rshrn %0.4h,%1.4s,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vrshrn_n_s64(a, b) \
- __extension__ \
- ({ \
- int64x2_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("rshrn %0.2s,%1.2d,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vrshrn_n_u16(a, b) \
- __extension__ \
- ({ \
- uint16x8_t a_ = (a); \
- uint8x8_t result; \
- __asm__ ("rshrn %0.8b,%1.8h,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vrshrn_n_u32(a, b) \
- __extension__ \
- ({ \
- uint32x4_t a_ = (a); \
- uint16x4_t result; \
- __asm__ ("rshrn %0.4h,%1.4s,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vrshrn_n_u64(a, b) \
- __extension__ \
- ({ \
- uint64x2_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("rshrn %0.2s,%1.2d,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vrsqrte_f32 (float32x2_t a)
-{
- float32x2_t result;
- __asm__ ("frsqrte %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vrsqrte_f64 (float64x2_t a)
-{
- float64x2_t result;
- __asm__ ("frsqrte %0.2d,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vrsqrte_u32 (uint32x2_t a)
-{
- uint32x2_t result;
- __asm__ ("ursqrte %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vrsqrted_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("frsqrte %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vrsqrteq_f32 (float32x4_t a)
-{
- float32x4_t result;
- __asm__ ("frsqrte %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vrsqrteq_f64 (float64x2_t a)
-{
- float64x2_t result;
- __asm__ ("frsqrte %0.2d,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vrsqrteq_u32 (uint32x4_t a)
-{
- uint32x4_t result;
- __asm__ ("ursqrte %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vrsqrtes_f32 (float32_t a)
-{
- float32_t result;
- __asm__ ("frsqrte %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vrsqrts_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("frsqrts %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vrsqrtsd_f64 (float64_t a, float64_t b)
-{
- float64_t result;
- __asm__ ("frsqrts %d0,%d1,%d2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vrsqrtsq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("frsqrts %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vrsqrtsq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("frsqrts %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vrsqrtss_f32 (float32_t a, float32_t b)
-{
- float32_t result;
- __asm__ ("frsqrts %s0,%s1,%s2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vrsrtsq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("frsqrts %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vrsubhn_high_s16 (int8x8_t a, int16x8_t b, int16x8_t c)
-{
- int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
- __asm__ ("rsubhn2 %0.16b, %1.8h, %2.8h"
- : "+w"(result)
- : "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vrsubhn_high_s32 (int16x4_t a, int32x4_t b, int32x4_t c)
-{
- int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
- __asm__ ("rsubhn2 %0.8h, %1.4s, %2.4s"
- : "+w"(result)
- : "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vrsubhn_high_s64 (int32x2_t a, int64x2_t b, int64x2_t c)
-{
- int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
- __asm__ ("rsubhn2 %0.4s, %1.2d, %2.2d"
- : "+w"(result)
- : "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vrsubhn_high_u16 (uint8x8_t a, uint16x8_t b, uint16x8_t c)
-{
- uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
- __asm__ ("rsubhn2 %0.16b, %1.8h, %2.8h"
- : "+w"(result)
- : "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vrsubhn_high_u32 (uint16x4_t a, uint32x4_t b, uint32x4_t c)
-{
- uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
- __asm__ ("rsubhn2 %0.8h, %1.4s, %2.4s"
- : "+w"(result)
- : "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vrsubhn_high_u64 (uint32x2_t a, uint64x2_t b, uint64x2_t c)
-{
- uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
- __asm__ ("rsubhn2 %0.4s, %1.2d, %2.2d"
- : "+w"(result)
- : "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vrsubhn_s16 (int16x8_t a, int16x8_t b)
-{
- int8x8_t result;
- __asm__ ("rsubhn %0.8b, %1.8h, %2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vrsubhn_s32 (int32x4_t a, int32x4_t b)
-{
- int16x4_t result;
- __asm__ ("rsubhn %0.4h, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vrsubhn_s64 (int64x2_t a, int64x2_t b)
-{
- int32x2_t result;
- __asm__ ("rsubhn %0.2s, %1.2d, %2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vrsubhn_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint8x8_t result;
- __asm__ ("rsubhn %0.8b, %1.8h, %2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vrsubhn_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint16x4_t result;
- __asm__ ("rsubhn %0.4h, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vrsubhn_u64 (uint64x2_t a, uint64x2_t b)
-{
- uint32x2_t result;
- __asm__ ("rsubhn %0.2s, %1.2d, %2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-#define vset_lane_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x2_t b_ = (b); \
- float32_t a_ = (a); \
- float32x2_t result; \
- __asm__ ("ins %0.s[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vset_lane_f64(a, b, c) \
- __extension__ \
- ({ \
- float64x1_t b_ = (b); \
- float64_t a_ = (a); \
- float64x1_t result; \
- __asm__ ("ins %0.d[%3], %x1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vset_lane_p8(a, b, c) \
- __extension__ \
- ({ \
- poly8x8_t b_ = (b); \
- poly8_t a_ = (a); \
- poly8x8_t result; \
- __asm__ ("ins %0.b[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vset_lane_p16(a, b, c) \
- __extension__ \
- ({ \
- poly16x4_t b_ = (b); \
- poly16_t a_ = (a); \
- poly16x4_t result; \
- __asm__ ("ins %0.h[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vset_lane_s8(a, b, c) \
- __extension__ \
- ({ \
- int8x8_t b_ = (b); \
- int8_t a_ = (a); \
- int8x8_t result; \
- __asm__ ("ins %0.b[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vset_lane_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x4_t b_ = (b); \
- int16_t a_ = (a); \
- int16x4_t result; \
- __asm__ ("ins %0.h[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vset_lane_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x2_t b_ = (b); \
- int32_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("ins %0.s[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vset_lane_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x1_t b_ = (b); \
- int64_t a_ = (a); \
- int64x1_t result; \
- __asm__ ("ins %0.d[%3], %x1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vset_lane_u8(a, b, c) \
- __extension__ \
- ({ \
- uint8x8_t b_ = (b); \
- uint8_t a_ = (a); \
- uint8x8_t result; \
- __asm__ ("ins %0.b[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vset_lane_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x4_t b_ = (b); \
- uint16_t a_ = (a); \
- uint16x4_t result; \
- __asm__ ("ins %0.h[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vset_lane_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x2_t b_ = (b); \
- uint32_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("ins %0.s[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vset_lane_u64(a, b, c) \
- __extension__ \
- ({ \
- uint64x1_t b_ = (b); \
- uint64_t a_ = (a); \
- uint64x1_t result; \
- __asm__ ("ins %0.d[%3], %x1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsetq_lane_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x4_t b_ = (b); \
- float32_t a_ = (a); \
- float32x4_t result; \
- __asm__ ("ins %0.s[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsetq_lane_f64(a, b, c) \
- __extension__ \
- ({ \
- float64x2_t b_ = (b); \
- float64_t a_ = (a); \
- float64x2_t result; \
- __asm__ ("ins %0.d[%3], %x1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsetq_lane_p8(a, b, c) \
- __extension__ \
- ({ \
- poly8x16_t b_ = (b); \
- poly8_t a_ = (a); \
- poly8x16_t result; \
- __asm__ ("ins %0.b[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsetq_lane_p16(a, b, c) \
- __extension__ \
- ({ \
- poly16x8_t b_ = (b); \
- poly16_t a_ = (a); \
- poly16x8_t result; \
- __asm__ ("ins %0.h[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsetq_lane_s8(a, b, c) \
- __extension__ \
- ({ \
- int8x16_t b_ = (b); \
- int8_t a_ = (a); \
- int8x16_t result; \
- __asm__ ("ins %0.b[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsetq_lane_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- int16_t a_ = (a); \
- int16x8_t result; \
- __asm__ ("ins %0.h[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsetq_lane_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- int32_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("ins %0.s[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsetq_lane_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x2_t b_ = (b); \
- int64_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("ins %0.d[%3], %x1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsetq_lane_u8(a, b, c) \
- __extension__ \
- ({ \
- uint8x16_t b_ = (b); \
- uint8_t a_ = (a); \
- uint8x16_t result; \
- __asm__ ("ins %0.b[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsetq_lane_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x8_t b_ = (b); \
- uint16_t a_ = (a); \
- uint16x8_t result; \
- __asm__ ("ins %0.h[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsetq_lane_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x4_t b_ = (b); \
- uint32_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("ins %0.s[%3], %w1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsetq_lane_u64(a, b, c) \
- __extension__ \
- ({ \
- uint64x2_t b_ = (b); \
- uint64_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("ins %0.d[%3], %x1" \
- : "=w"(result) \
- : "r"(a_), "0"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_high_n_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- int8x8_t a_ = (a); \
- int8x16_t result = vcombine_s8 \
- (a_, vcreate_s8 (UINT64_C (0x0))); \
- __asm__ ("shrn2 %0.16b,%1.8h,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_high_n_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- int16x4_t a_ = (a); \
- int16x8_t result = vcombine_s16 \
- (a_, vcreate_s16 (UINT64_C (0x0))); \
- __asm__ ("shrn2 %0.8h,%1.4s,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_high_n_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x2_t b_ = (b); \
- int32x2_t a_ = (a); \
- int32x4_t result = vcombine_s32 \
- (a_, vcreate_s32 (UINT64_C (0x0))); \
- __asm__ ("shrn2 %0.4s,%1.2d,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_high_n_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x8_t b_ = (b); \
- uint8x8_t a_ = (a); \
- uint8x16_t result = vcombine_u8 \
- (a_, vcreate_u8 (UINT64_C (0x0))); \
- __asm__ ("shrn2 %0.16b,%1.8h,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_high_n_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x4_t b_ = (b); \
- uint16x4_t a_ = (a); \
- uint16x8_t result = vcombine_u16 \
- (a_, vcreate_u16 (UINT64_C (0x0))); \
- __asm__ ("shrn2 %0.8h,%1.4s,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_high_n_u64(a, b, c) \
- __extension__ \
- ({ \
- uint64x2_t b_ = (b); \
- uint32x2_t a_ = (a); \
- uint32x4_t result = vcombine_u32 \
- (a_, vcreate_u32 (UINT64_C (0x0))); \
- __asm__ ("shrn2 %0.4s,%1.2d,#%2" \
- : "+w"(result) \
- : "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_s16(a, b) \
- __extension__ \
- ({ \
- int16x8_t a_ = (a); \
- int8x8_t result; \
- __asm__ ("shrn %0.8b,%1.8h,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_s32(a, b) \
- __extension__ \
- ({ \
- int32x4_t a_ = (a); \
- int16x4_t result; \
- __asm__ ("shrn %0.4h,%1.4s,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_s64(a, b) \
- __extension__ \
- ({ \
- int64x2_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("shrn %0.2s,%1.2d,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_u16(a, b) \
- __extension__ \
- ({ \
- uint16x8_t a_ = (a); \
- uint8x8_t result; \
- __asm__ ("shrn %0.8b,%1.8h,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_u32(a, b) \
- __extension__ \
- ({ \
- uint32x4_t a_ = (a); \
- uint16x4_t result; \
- __asm__ ("shrn %0.4h,%1.4s,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_u64(a, b) \
- __extension__ \
- ({ \
- uint64x2_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("shrn %0.2s,%1.2d,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsli_n_p8(a, b, c) \
- __extension__ \
- ({ \
- poly8x8_t b_ = (b); \
- poly8x8_t a_ = (a); \
- poly8x8_t result; \
- __asm__ ("sli %0.8b,%2.8b,%3" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsli_n_p16(a, b, c) \
- __extension__ \
- ({ \
- poly16x4_t b_ = (b); \
- poly16x4_t a_ = (a); \
- poly16x4_t result; \
- __asm__ ("sli %0.4h,%2.4h,%3" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsliq_n_p8(a, b, c) \
- __extension__ \
- ({ \
- poly8x16_t b_ = (b); \
- poly8x16_t a_ = (a); \
- poly8x16_t result; \
- __asm__ ("sli %0.16b,%2.16b,%3" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsliq_n_p16(a, b, c) \
- __extension__ \
- ({ \
- poly16x8_t b_ = (b); \
- poly16x8_t a_ = (a); \
- poly16x8_t result; \
- __asm__ ("sli %0.8h,%2.8h,%3" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsri_n_p8(a, b, c) \
- __extension__ \
- ({ \
- poly8x8_t b_ = (b); \
- poly8x8_t a_ = (a); \
- poly8x8_t result; \
- __asm__ ("sri %0.8b,%2.8b,%3" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsri_n_p16(a, b, c) \
- __extension__ \
- ({ \
- poly16x4_t b_ = (b); \
- poly16x4_t a_ = (a); \
- poly16x4_t result; \
- __asm__ ("sri %0.4h,%2.4h,%3" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsriq_n_p8(a, b, c) \
- __extension__ \
- ({ \
- poly8x16_t b_ = (b); \
- poly8x16_t a_ = (a); \
- poly8x16_t result; \
- __asm__ ("sri %0.16b,%2.16b,%3" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vsriq_n_p16(a, b, c) \
- __extension__ \
- ({ \
- poly16x8_t b_ = (b); \
- poly16x8_t a_ = (a); \
- poly16x8_t result; \
- __asm__ ("sri %0.8h,%2.8h,%3" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "i"(c) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1_f32 (float32_t * a, float32x2_t b)
-{
- __asm__ ("st1 {%1.2s},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1_f64 (float64_t * a, float64x1_t b)
-{
- __asm__ ("st1 {%1.1d},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-#define vst1_lane_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x2_t b_ = (b); \
- float32_t * a_ = (a); \
- __asm__ ("st1 {%1.s}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1_lane_f64(a, b, c) \
- __extension__ \
- ({ \
- float64x1_t b_ = (b); \
- float64_t * a_ = (a); \
- __asm__ ("st1 {%1.d}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1_lane_p8(a, b, c) \
- __extension__ \
- ({ \
- poly8x8_t b_ = (b); \
- poly8_t * a_ = (a); \
- __asm__ ("st1 {%1.b}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1_lane_p16(a, b, c) \
- __extension__ \
- ({ \
- poly16x4_t b_ = (b); \
- poly16_t * a_ = (a); \
- __asm__ ("st1 {%1.h}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1_lane_s8(a, b, c) \
- __extension__ \
- ({ \
- int8x8_t b_ = (b); \
- int8_t * a_ = (a); \
- __asm__ ("st1 {%1.b}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1_lane_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x4_t b_ = (b); \
- int16_t * a_ = (a); \
- __asm__ ("st1 {%1.h}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1_lane_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x2_t b_ = (b); \
- int32_t * a_ = (a); \
- __asm__ ("st1 {%1.s}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1_lane_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x1_t b_ = (b); \
- int64_t * a_ = (a); \
- __asm__ ("st1 {%1.d}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1_lane_u8(a, b, c) \
- __extension__ \
- ({ \
- uint8x8_t b_ = (b); \
- uint8_t * a_ = (a); \
- __asm__ ("st1 {%1.b}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1_lane_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x4_t b_ = (b); \
- uint16_t * a_ = (a); \
- __asm__ ("st1 {%1.h}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1_lane_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x2_t b_ = (b); \
- uint32_t * a_ = (a); \
- __asm__ ("st1 {%1.s}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1_lane_u64(a, b, c) \
- __extension__ \
- ({ \
- uint64x1_t b_ = (b); \
- uint64_t * a_ = (a); \
- __asm__ ("st1 {%1.d}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1_p8 (poly8_t * a, poly8x8_t b)
-{
- __asm__ ("st1 {%1.8b},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1_p16 (poly16_t * a, poly16x4_t b)
-{
- __asm__ ("st1 {%1.4h},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1_s8 (int8_t * a, int8x8_t b)
-{
- __asm__ ("st1 {%1.8b},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1_s16 (int16_t * a, int16x4_t b)
-{
- __asm__ ("st1 {%1.4h},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1_s32 (int32_t * a, int32x2_t b)
-{
- __asm__ ("st1 {%1.2s},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1_s64 (int64_t * a, int64x1_t b)
-{
- __asm__ ("st1 {%1.1d},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1_u8 (uint8_t * a, uint8x8_t b)
-{
- __asm__ ("st1 {%1.8b},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1_u16 (uint16_t * a, uint16x4_t b)
-{
- __asm__ ("st1 {%1.4h},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1_u32 (uint32_t * a, uint32x2_t b)
-{
- __asm__ ("st1 {%1.2s},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1_u64 (uint64_t * a, uint64x1_t b)
-{
- __asm__ ("st1 {%1.1d},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1q_f32 (float32_t * a, float32x4_t b)
-{
- __asm__ ("st1 {%1.4s},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1q_f64 (float64_t * a, float64x2_t b)
-{
- __asm__ ("st1 {%1.2d},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-#define vst1q_lane_f32(a, b, c) \
- __extension__ \
- ({ \
- float32x4_t b_ = (b); \
- float32_t * a_ = (a); \
- __asm__ ("st1 {%1.s}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1q_lane_f64(a, b, c) \
- __extension__ \
- ({ \
- float64x2_t b_ = (b); \
- float64_t * a_ = (a); \
- __asm__ ("st1 {%1.d}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1q_lane_p8(a, b, c) \
- __extension__ \
- ({ \
- poly8x16_t b_ = (b); \
- poly8_t * a_ = (a); \
- __asm__ ("st1 {%1.b}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1q_lane_p16(a, b, c) \
- __extension__ \
- ({ \
- poly16x8_t b_ = (b); \
- poly16_t * a_ = (a); \
- __asm__ ("st1 {%1.h}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1q_lane_s8(a, b, c) \
- __extension__ \
- ({ \
- int8x16_t b_ = (b); \
- int8_t * a_ = (a); \
- __asm__ ("st1 {%1.b}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1q_lane_s16(a, b, c) \
- __extension__ \
- ({ \
- int16x8_t b_ = (b); \
- int16_t * a_ = (a); \
- __asm__ ("st1 {%1.h}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1q_lane_s32(a, b, c) \
- __extension__ \
- ({ \
- int32x4_t b_ = (b); \
- int32_t * a_ = (a); \
- __asm__ ("st1 {%1.s}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1q_lane_s64(a, b, c) \
- __extension__ \
- ({ \
- int64x2_t b_ = (b); \
- int64_t * a_ = (a); \
- __asm__ ("st1 {%1.d}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1q_lane_u8(a, b, c) \
- __extension__ \
- ({ \
- uint8x16_t b_ = (b); \
- uint8_t * a_ = (a); \
- __asm__ ("st1 {%1.b}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1q_lane_u16(a, b, c) \
- __extension__ \
- ({ \
- uint16x8_t b_ = (b); \
- uint16_t * a_ = (a); \
- __asm__ ("st1 {%1.h}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1q_lane_u32(a, b, c) \
- __extension__ \
- ({ \
- uint32x4_t b_ = (b); \
- uint32_t * a_ = (a); \
- __asm__ ("st1 {%1.s}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-#define vst1q_lane_u64(a, b, c) \
- __extension__ \
- ({ \
- uint64x2_t b_ = (b); \
- uint64_t * a_ = (a); \
- __asm__ ("st1 {%1.d}[%2],[%0]" \
- : \
- : "r"(a_), "w"(b_), "i"(c) \
- : "memory"); \
- })
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1q_p8 (poly8_t * a, poly8x16_t b)
-{
- __asm__ ("st1 {%1.16b},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1q_p16 (poly16_t * a, poly16x8_t b)
-{
- __asm__ ("st1 {%1.8h},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1q_s8 (int8_t * a, int8x16_t b)
-{
- __asm__ ("st1 {%1.16b},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1q_s16 (int16_t * a, int16x8_t b)
-{
- __asm__ ("st1 {%1.8h},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1q_s32 (int32_t * a, int32x4_t b)
-{
- __asm__ ("st1 {%1.4s},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1q_s64 (int64_t * a, int64x2_t b)
-{
- __asm__ ("st1 {%1.2d},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1q_u8 (uint8_t * a, uint8x16_t b)
-{
- __asm__ ("st1 {%1.16b},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1q_u16 (uint16_t * a, uint16x8_t b)
-{
- __asm__ ("st1 {%1.8h},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1q_u32 (uint32_t * a, uint32x4_t b)
-{
- __asm__ ("st1 {%1.4s},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst1q_u64 (uint64_t * a, uint64x2_t b)
-{
- __asm__ ("st1 {%1.2d},[%0]"
- :
- : "r"(a), "w"(b)
- : "memory");
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vsubhn_high_s16 (int8x8_t a, int16x8_t b, int16x8_t c)
-{
- int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
- __asm__ ("subhn2 %0.16b, %1.8h, %2.8h"
- : "+w"(result)
- : "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vsubhn_high_s32 (int16x4_t a, int32x4_t b, int32x4_t c)
-{
- int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
- __asm__ ("subhn2 %0.8h, %1.4s, %2.4s"
- : "+w"(result)
- : "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vsubhn_high_s64 (int32x2_t a, int64x2_t b, int64x2_t c)
-{
- int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
- __asm__ ("subhn2 %0.4s, %1.2d, %2.2d"
- : "+w"(result)
- : "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vsubhn_high_u16 (uint8x8_t a, uint16x8_t b, uint16x8_t c)
-{
- uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
- __asm__ ("subhn2 %0.16b, %1.8h, %2.8h"
- : "+w"(result)
- : "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vsubhn_high_u32 (uint16x4_t a, uint32x4_t b, uint32x4_t c)
-{
- uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
- __asm__ ("subhn2 %0.8h, %1.4s, %2.4s"
- : "+w"(result)
- : "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vsubhn_high_u64 (uint32x2_t a, uint64x2_t b, uint64x2_t c)
-{
- uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
- __asm__ ("subhn2 %0.4s, %1.2d, %2.2d"
- : "+w"(result)
- : "w"(b), "w"(c)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vsubhn_s16 (int16x8_t a, int16x8_t b)
-{
- int8x8_t result;
- __asm__ ("subhn %0.8b, %1.8h, %2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vsubhn_s32 (int32x4_t a, int32x4_t b)
-{
- int16x4_t result;
- __asm__ ("subhn %0.4h, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vsubhn_s64 (int64x2_t a, int64x2_t b)
-{
- int32x2_t result;
- __asm__ ("subhn %0.2s, %1.2d, %2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vsubhn_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint8x8_t result;
- __asm__ ("subhn %0.8b, %1.8h, %2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vsubhn_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint16x4_t result;
- __asm__ ("subhn %0.4h, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vsubhn_u64 (uint64x2_t a, uint64x2_t b)
-{
- uint32x2_t result;
- __asm__ ("subhn %0.2s, %1.2d, %2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vtrn1_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("trn1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vtrn1_p8 (poly8x8_t a, poly8x8_t b)
-{
- poly8x8_t result;
- __asm__ ("trn1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vtrn1_p16 (poly16x4_t a, poly16x4_t b)
-{
- poly16x4_t result;
- __asm__ ("trn1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vtrn1_s8 (int8x8_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("trn1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vtrn1_s16 (int16x4_t a, int16x4_t b)
-{
- int16x4_t result;
- __asm__ ("trn1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vtrn1_s32 (int32x2_t a, int32x2_t b)
-{
- int32x2_t result;
- __asm__ ("trn1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vtrn1_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("trn1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vtrn1_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("trn1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vtrn1_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("trn1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vtrn1q_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("trn1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vtrn1q_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("trn1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vtrn1q_p8 (poly8x16_t a, poly8x16_t b)
-{
- poly8x16_t result;
- __asm__ ("trn1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vtrn1q_p16 (poly16x8_t a, poly16x8_t b)
-{
- poly16x8_t result;
- __asm__ ("trn1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vtrn1q_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("trn1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vtrn1q_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("trn1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vtrn1q_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("trn1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vtrn1q_s64 (int64x2_t a, int64x2_t b)
-{
- int64x2_t result;
- __asm__ ("trn1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vtrn1q_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("trn1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vtrn1q_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("trn1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vtrn1q_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("trn1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vtrn1q_u64 (uint64x2_t a, uint64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("trn1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vtrn2_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("trn2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vtrn2_p8 (poly8x8_t a, poly8x8_t b)
-{
- poly8x8_t result;
- __asm__ ("trn2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vtrn2_p16 (poly16x4_t a, poly16x4_t b)
-{
- poly16x4_t result;
- __asm__ ("trn2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vtrn2_s8 (int8x8_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("trn2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vtrn2_s16 (int16x4_t a, int16x4_t b)
-{
- int16x4_t result;
- __asm__ ("trn2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vtrn2_s32 (int32x2_t a, int32x2_t b)
-{
- int32x2_t result;
- __asm__ ("trn2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vtrn2_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("trn2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vtrn2_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("trn2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vtrn2_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("trn2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vtrn2q_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("trn2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vtrn2q_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("trn2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vtrn2q_p8 (poly8x16_t a, poly8x16_t b)
-{
- poly8x16_t result;
- __asm__ ("trn2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vtrn2q_p16 (poly16x8_t a, poly16x8_t b)
-{
- poly16x8_t result;
- __asm__ ("trn2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vtrn2q_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("trn2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vtrn2q_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("trn2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vtrn2q_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("trn2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vtrn2q_s64 (int64x2_t a, int64x2_t b)
-{
- int64x2_t result;
- __asm__ ("trn2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vtrn2q_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("trn2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vtrn2q_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("trn2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vtrn2q_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("trn2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vtrn2q_u64 (uint64x2_t a, uint64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("trn2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vtst_p8 (poly8x8_t a, poly8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("cmtst %0.8b, %1.8b, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vtst_p16 (poly16x4_t a, poly16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("cmtst %0.4h, %1.4h, %2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vtstq_p8 (poly8x16_t a, poly8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("cmtst %0.16b, %1.16b, %2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vtstq_p16 (poly16x8_t a, poly16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("cmtst %0.8h, %1.8h, %2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vuzp1_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vuzp1_p8 (poly8x8_t a, poly8x8_t b)
-{
- poly8x8_t result;
- __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vuzp1_p16 (poly16x4_t a, poly16x4_t b)
-{
- poly16x4_t result;
- __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vuzp1_s8 (int8x8_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vuzp1_s16 (int16x4_t a, int16x4_t b)
-{
- int16x4_t result;
- __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vuzp1_s32 (int32x2_t a, int32x2_t b)
-{
- int32x2_t result;
- __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vuzp1_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("uzp1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vuzp1_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("uzp1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vuzp1_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("uzp1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vuzp1q_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vuzp1q_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vuzp1q_p8 (poly8x16_t a, poly8x16_t b)
-{
- poly8x16_t result;
- __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vuzp1q_p16 (poly16x8_t a, poly16x8_t b)
-{
- poly16x8_t result;
- __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vuzp1q_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vuzp1q_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vuzp1q_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vuzp1q_s64 (int64x2_t a, int64x2_t b)
-{
- int64x2_t result;
- __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vuzp1q_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("uzp1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vuzp1q_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("uzp1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vuzp1q_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("uzp1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vuzp1q_u64 (uint64x2_t a, uint64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("uzp1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vuzp2_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vuzp2_p8 (poly8x8_t a, poly8x8_t b)
-{
- poly8x8_t result;
- __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vuzp2_p16 (poly16x4_t a, poly16x4_t b)
-{
- poly16x4_t result;
- __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vuzp2_s8 (int8x8_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vuzp2_s16 (int16x4_t a, int16x4_t b)
-{
- int16x4_t result;
- __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vuzp2_s32 (int32x2_t a, int32x2_t b)
-{
- int32x2_t result;
- __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vuzp2_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("uzp2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vuzp2_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("uzp2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vuzp2_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("uzp2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vuzp2q_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vuzp2q_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vuzp2q_p8 (poly8x16_t a, poly8x16_t b)
-{
- poly8x16_t result;
- __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vuzp2q_p16 (poly16x8_t a, poly16x8_t b)
-{
- poly16x8_t result;
- __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vuzp2q_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vuzp2q_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vuzp2q_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vuzp2q_s64 (int64x2_t a, int64x2_t b)
-{
- int64x2_t result;
- __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vuzp2q_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("uzp2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vuzp2q_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("uzp2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vuzp2q_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("uzp2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vuzp2q_u64 (uint64x2_t a, uint64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("uzp2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vzip1_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vzip1_p8 (poly8x8_t a, poly8x8_t b)
-{
- poly8x8_t result;
- __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vzip1_p16 (poly16x4_t a, poly16x4_t b)
-{
- poly16x4_t result;
- __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vzip1_s8 (int8x8_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vzip1_s16 (int16x4_t a, int16x4_t b)
-{
- int16x4_t result;
- __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vzip1_s32 (int32x2_t a, int32x2_t b)
-{
- int32x2_t result;
- __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vzip1_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("zip1 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vzip1_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("zip1 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vzip1_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("zip1 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vzip1q_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vzip1q_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vzip1q_p8 (poly8x16_t a, poly8x16_t b)
-{
- poly8x16_t result;
- __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vzip1q_p16 (poly16x8_t a, poly16x8_t b)
-{
- poly16x8_t result;
- __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vzip1q_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vzip1q_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vzip1q_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vzip1q_s64 (int64x2_t a, int64x2_t b)
-{
- int64x2_t result;
- __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vzip1q_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("zip1 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vzip1q_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("zip1 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vzip1q_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("zip1 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vzip1q_u64 (uint64x2_t a, uint64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("zip1 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vzip2_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vzip2_p8 (poly8x8_t a, poly8x8_t b)
-{
- poly8x8_t result;
- __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
-vzip2_p16 (poly16x4_t a, poly16x4_t b)
-{
- poly16x4_t result;
- __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vzip2_s8 (int8x8_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vzip2_s16 (int16x4_t a, int16x4_t b)
-{
- int16x4_t result;
- __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vzip2_s32 (int32x2_t a, int32x2_t b)
-{
- int32x2_t result;
- __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vzip2_u8 (uint8x8_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("zip2 %0.8b,%1.8b,%2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vzip2_u16 (uint16x4_t a, uint16x4_t b)
-{
- uint16x4_t result;
- __asm__ ("zip2 %0.4h,%1.4h,%2.4h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vzip2_u32 (uint32x2_t a, uint32x2_t b)
-{
- uint32x2_t result;
- __asm__ ("zip2 %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vzip2q_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vzip2q_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vzip2q_p8 (poly8x16_t a, poly8x16_t b)
-{
- poly8x16_t result;
- __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
-vzip2q_p16 (poly16x8_t a, poly16x8_t b)
-{
- poly16x8_t result;
- __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vzip2q_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vzip2q_s16 (int16x8_t a, int16x8_t b)
-{
- int16x8_t result;
- __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vzip2q_s32 (int32x4_t a, int32x4_t b)
-{
- int32x4_t result;
- __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vzip2q_s64 (int64x2_t a, int64x2_t b)
-{
- int64x2_t result;
- __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vzip2q_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("zip2 %0.16b,%1.16b,%2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vzip2q_u16 (uint16x8_t a, uint16x8_t b)
-{
- uint16x8_t result;
- __asm__ ("zip2 %0.8h,%1.8h,%2.8h"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vzip2q_u32 (uint32x4_t a, uint32x4_t b)
-{
- uint32x4_t result;
- __asm__ ("zip2 %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vzip2q_u64 (uint64x2_t a, uint64x2_t b)
-{
- uint64x2_t result;
- __asm__ ("zip2 %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-/* End of temporary inline asm implementations. */
-
-/* Start of temporary inline asm for vldn, vstn and friends. */
-
-/* Create struct element types for duplicating loads.
-
- Create 2 element structures of:
-
- +------+----+----+----+----+
- | | 8 | 16 | 32 | 64 |
- +------+----+----+----+----+
- |int | Y | Y | N | N |
- +------+----+----+----+----+
- |uint | Y | Y | N | N |
- +------+----+----+----+----+
- |float | - | - | N | N |
- +------+----+----+----+----+
- |poly | Y | Y | - | - |
- +------+----+----+----+----+
-
- Create 3 element structures of:
-
- +------+----+----+----+----+
- | | 8 | 16 | 32 | 64 |
- +------+----+----+----+----+
- |int | Y | Y | Y | Y |
- +------+----+----+----+----+
- |uint | Y | Y | Y | Y |
- +------+----+----+----+----+
- |float | - | - | Y | Y |
- +------+----+----+----+----+
- |poly | Y | Y | - | - |
- +------+----+----+----+----+
-
- Create 4 element structures of:
-
- +------+----+----+----+----+
- | | 8 | 16 | 32 | 64 |
- +------+----+----+----+----+
- |int | Y | N | N | Y |
- +------+----+----+----+----+
- |uint | Y | N | N | Y |
- +------+----+----+----+----+
- |float | - | - | N | Y |
- +------+----+----+----+----+
- |poly | Y | N | - | - |
- +------+----+----+----+----+
-
- This is required for casting memory reference. */
-#define __STRUCTN(t, sz, nelem) \
- typedef struct t ## sz ## x ## nelem ## _t { \
- t ## sz ## _t val[nelem]; \
- } t ## sz ## x ## nelem ## _t;
-
-/* 2-element structs. */
-__STRUCTN (int, 8, 2)
-__STRUCTN (int, 16, 2)
-__STRUCTN (uint, 8, 2)
-__STRUCTN (uint, 16, 2)
-__STRUCTN (poly, 8, 2)
-__STRUCTN (poly, 16, 2)
-/* 3-element structs. */
-__STRUCTN (int, 8, 3)
-__STRUCTN (int, 16, 3)
-__STRUCTN (int, 32, 3)
-__STRUCTN (int, 64, 3)
-__STRUCTN (uint, 8, 3)
-__STRUCTN (uint, 16, 3)
-__STRUCTN (uint, 32, 3)
-__STRUCTN (uint, 64, 3)
-__STRUCTN (float, 32, 3)
-__STRUCTN (float, 64, 3)
-__STRUCTN (poly, 8, 3)
-__STRUCTN (poly, 16, 3)
-/* 4-element structs. */
-__STRUCTN (int, 8, 4)
-__STRUCTN (int, 64, 4)
-__STRUCTN (uint, 8, 4)
-__STRUCTN (uint, 64, 4)
-__STRUCTN (poly, 8, 4)
-__STRUCTN (float, 64, 4)
-#undef __STRUCTN
-
-#define __LD2R_FUNC(rettype, structtype, ptrtype, \
- regsuffix, funcsuffix, Q) \
- __extension__ static __inline rettype \
- __attribute__ ((__always_inline__)) \
- vld2 ## Q ## _dup_ ## funcsuffix (const ptrtype *ptr) \
- { \
- rettype result; \
- __asm__ ("ld2r {v16." #regsuffix ", v17." #regsuffix "}, %1\n\t" \
- "st1 {v16." #regsuffix ", v17." #regsuffix "}, %0\n\t" \
- : "=Q"(result) \
- : "Q"(*(const structtype *)ptr) \
- : "memory", "v16", "v17"); \
- return result; \
- }
-
-__LD2R_FUNC (float32x2x2_t, float32x2_t, float32_t, 2s, f32,)
-__LD2R_FUNC (float64x1x2_t, float64x2_t, float64_t, 1d, f64,)
-__LD2R_FUNC (poly8x8x2_t, poly8x2_t, poly8_t, 8b, p8,)
-__LD2R_FUNC (poly16x4x2_t, poly16x2_t, poly16_t, 4h, p16,)
-__LD2R_FUNC (int8x8x2_t, int8x2_t, int8_t, 8b, s8,)
-__LD2R_FUNC (int16x4x2_t, int16x2_t, int16_t, 4h, s16,)
-__LD2R_FUNC (int32x2x2_t, int32x2_t, int32_t, 2s, s32,)
-__LD2R_FUNC (int64x1x2_t, int64x2_t, int64_t, 1d, s64,)
-__LD2R_FUNC (uint8x8x2_t, uint8x2_t, uint8_t, 8b, u8,)
-__LD2R_FUNC (uint16x4x2_t, uint16x2_t, uint16_t, 4h, u16,)
-__LD2R_FUNC (uint32x2x2_t, uint32x2_t, uint32_t, 2s, u32,)
-__LD2R_FUNC (uint64x1x2_t, uint64x2_t, uint64_t, 1d, u64,)
-__LD2R_FUNC (float32x4x2_t, float32x2_t, float32_t, 4s, f32, q)
-__LD2R_FUNC (float64x2x2_t, float64x2_t, float64_t, 2d, f64, q)
-__LD2R_FUNC (poly8x16x2_t, poly8x2_t, poly8_t, 16b, p8, q)
-__LD2R_FUNC (poly16x8x2_t, poly16x2_t, poly16_t, 8h, p16, q)
-__LD2R_FUNC (int8x16x2_t, int8x2_t, int8_t, 16b, s8, q)
-__LD2R_FUNC (int16x8x2_t, int16x2_t, int16_t, 8h, s16, q)
-__LD2R_FUNC (int32x4x2_t, int32x2_t, int32_t, 4s, s32, q)
-__LD2R_FUNC (int64x2x2_t, int64x2_t, int64_t, 2d, s64, q)
-__LD2R_FUNC (uint8x16x2_t, uint8x2_t, uint8_t, 16b, u8, q)
-__LD2R_FUNC (uint16x8x2_t, uint16x2_t, uint16_t, 8h, u16, q)
-__LD2R_FUNC (uint32x4x2_t, uint32x2_t, uint32_t, 4s, u32, q)
-__LD2R_FUNC (uint64x2x2_t, uint64x2_t, uint64_t, 2d, u64, q)
-
-#define __LD2_LANE_FUNC(rettype, ptrtype, regsuffix, \
- lnsuffix, funcsuffix, Q) \
- __extension__ static __inline rettype \
- __attribute__ ((__always_inline__)) \
- vld2 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
- rettype b, const int c) \
- { \
- rettype result; \
- __asm__ ("ld1 {v16." #regsuffix ", v17." #regsuffix "}, %1\n\t" \
- "ld2 {v16." #lnsuffix ", v17." #lnsuffix "}[%3], %2\n\t" \
- "st1 {v16." #regsuffix ", v17." #regsuffix "}, %0\n\t" \
- : "=Q"(result) \
- : "Q"(b), "Q"(*(const rettype *)ptr), "i"(c) \
- : "memory", "v16", "v17"); \
- return result; \
- }
-
-__LD2_LANE_FUNC (int8x8x2_t, uint8_t, 8b, b, s8,)
-__LD2_LANE_FUNC (float32x2x2_t, float32_t, 2s, s, f32,)
-__LD2_LANE_FUNC (float64x1x2_t, float64_t, 1d, d, f64,)
-__LD2_LANE_FUNC (poly8x8x2_t, poly8_t, 8b, b, p8,)
-__LD2_LANE_FUNC (poly16x4x2_t, poly16_t, 4h, h, p16,)
-__LD2_LANE_FUNC (int16x4x2_t, int16_t, 4h, h, s16,)
-__LD2_LANE_FUNC (int32x2x2_t, int32_t, 2s, s, s32,)
-__LD2_LANE_FUNC (int64x1x2_t, int64_t, 1d, d, s64,)
-__LD2_LANE_FUNC (uint8x8x2_t, uint8_t, 8b, b, u8,)
-__LD2_LANE_FUNC (uint16x4x2_t, uint16_t, 4h, h, u16,)
-__LD2_LANE_FUNC (uint32x2x2_t, uint32_t, 2s, s, u32,)
-__LD2_LANE_FUNC (uint64x1x2_t, uint64_t, 1d, d, u64,)
-__LD2_LANE_FUNC (float32x4x2_t, float32_t, 4s, s, f32, q)
-__LD2_LANE_FUNC (float64x2x2_t, float64_t, 2d, d, f64, q)
-__LD2_LANE_FUNC (poly8x16x2_t, poly8_t, 16b, b, p8, q)
-__LD2_LANE_FUNC (poly16x8x2_t, poly16_t, 8h, h, p16, q)
-__LD2_LANE_FUNC (int8x16x2_t, int8_t, 16b, b, s8, q)
-__LD2_LANE_FUNC (int16x8x2_t, int16_t, 8h, h, s16, q)
-__LD2_LANE_FUNC (int32x4x2_t, int32_t, 4s, s, s32, q)
-__LD2_LANE_FUNC (int64x2x2_t, int64_t, 2d, d, s64, q)
-__LD2_LANE_FUNC (uint8x16x2_t, uint8_t, 16b, b, u8, q)
-__LD2_LANE_FUNC (uint16x8x2_t, uint16_t, 8h, h, u16, q)
-__LD2_LANE_FUNC (uint32x4x2_t, uint32_t, 4s, s, u32, q)
-__LD2_LANE_FUNC (uint64x2x2_t, uint64_t, 2d, d, u64, q)
-
-#define __LD3R_FUNC(rettype, structtype, ptrtype, \
- regsuffix, funcsuffix, Q) \
- __extension__ static __inline rettype \
- __attribute__ ((__always_inline__)) \
- vld3 ## Q ## _dup_ ## funcsuffix (const ptrtype *ptr) \
- { \
- rettype result; \
- __asm__ ("ld3r {v16." #regsuffix " - v18." #regsuffix "}, %1\n\t" \
- "st1 {v16." #regsuffix " - v18." #regsuffix "}, %0\n\t" \
- : "=Q"(result) \
- : "Q"(*(const structtype *)ptr) \
- : "memory", "v16", "v17", "v18"); \
- return result; \
- }
-
-__LD3R_FUNC (float32x2x3_t, float32x3_t, float32_t, 2s, f32,)
-__LD3R_FUNC (float64x1x3_t, float64x3_t, float64_t, 1d, f64,)
-__LD3R_FUNC (poly8x8x3_t, poly8x3_t, poly8_t, 8b, p8,)
-__LD3R_FUNC (poly16x4x3_t, poly16x3_t, poly16_t, 4h, p16,)
-__LD3R_FUNC (int8x8x3_t, int8x3_t, int8_t, 8b, s8,)
-__LD3R_FUNC (int16x4x3_t, int16x3_t, int16_t, 4h, s16,)
-__LD3R_FUNC (int32x2x3_t, int32x3_t, int32_t, 2s, s32,)
-__LD3R_FUNC (int64x1x3_t, int64x3_t, int64_t, 1d, s64,)
-__LD3R_FUNC (uint8x8x3_t, uint8x3_t, uint8_t, 8b, u8,)
-__LD3R_FUNC (uint16x4x3_t, uint16x3_t, uint16_t, 4h, u16,)
-__LD3R_FUNC (uint32x2x3_t, uint32x3_t, uint32_t, 2s, u32,)
-__LD3R_FUNC (uint64x1x3_t, uint64x3_t, uint64_t, 1d, u64,)
-__LD3R_FUNC (float32x4x3_t, float32x3_t, float32_t, 4s, f32, q)
-__LD3R_FUNC (float64x2x3_t, float64x3_t, float64_t, 2d, f64, q)
-__LD3R_FUNC (poly8x16x3_t, poly8x3_t, poly8_t, 16b, p8, q)
-__LD3R_FUNC (poly16x8x3_t, poly16x3_t, poly16_t, 8h, p16, q)
-__LD3R_FUNC (int8x16x3_t, int8x3_t, int8_t, 16b, s8, q)
-__LD3R_FUNC (int16x8x3_t, int16x3_t, int16_t, 8h, s16, q)
-__LD3R_FUNC (int32x4x3_t, int32x3_t, int32_t, 4s, s32, q)
-__LD3R_FUNC (int64x2x3_t, int64x3_t, int64_t, 2d, s64, q)
-__LD3R_FUNC (uint8x16x3_t, uint8x3_t, uint8_t, 16b, u8, q)
-__LD3R_FUNC (uint16x8x3_t, uint16x3_t, uint16_t, 8h, u16, q)
-__LD3R_FUNC (uint32x4x3_t, uint32x3_t, uint32_t, 4s, u32, q)
-__LD3R_FUNC (uint64x2x3_t, uint64x3_t, uint64_t, 2d, u64, q)
-
-#define __LD3_LANE_FUNC(rettype, ptrtype, regsuffix, \
- lnsuffix, funcsuffix, Q) \
- __extension__ static __inline rettype \
- __attribute__ ((__always_inline__)) \
- vld3 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
- rettype b, const int c) \
- { \
- rettype result; \
- __asm__ ("ld1 {v16." #regsuffix " - v18." #regsuffix "}, %1\n\t" \
- "ld3 {v16." #lnsuffix " - v18." #lnsuffix "}[%3], %2\n\t" \
- "st1 {v16." #regsuffix " - v18." #regsuffix "}, %0\n\t" \
- : "=Q"(result) \
- : "Q"(b), "Q"(*(const rettype *)ptr), "i"(c) \
- : "memory", "v16", "v17", "v18"); \
- return result; \
- }
-
-__LD3_LANE_FUNC (int8x8x3_t, uint8_t, 8b, b, s8,)
-__LD3_LANE_FUNC (float32x2x3_t, float32_t, 2s, s, f32,)
-__LD3_LANE_FUNC (float64x1x3_t, float64_t, 1d, d, f64,)
-__LD3_LANE_FUNC (poly8x8x3_t, poly8_t, 8b, b, p8,)
-__LD3_LANE_FUNC (poly16x4x3_t, poly16_t, 4h, h, p16,)
-__LD3_LANE_FUNC (int16x4x3_t, int16_t, 4h, h, s16,)
-__LD3_LANE_FUNC (int32x2x3_t, int32_t, 2s, s, s32,)
-__LD3_LANE_FUNC (int64x1x3_t, int64_t, 1d, d, s64,)
-__LD3_LANE_FUNC (uint8x8x3_t, uint8_t, 8b, b, u8,)
-__LD3_LANE_FUNC (uint16x4x3_t, uint16_t, 4h, h, u16,)
-__LD3_LANE_FUNC (uint32x2x3_t, uint32_t, 2s, s, u32,)
-__LD3_LANE_FUNC (uint64x1x3_t, uint64_t, 1d, d, u64,)
-__LD3_LANE_FUNC (float32x4x3_t, float32_t, 4s, s, f32, q)
-__LD3_LANE_FUNC (float64x2x3_t, float64_t, 2d, d, f64, q)
-__LD3_LANE_FUNC (poly8x16x3_t, poly8_t, 16b, b, p8, q)
-__LD3_LANE_FUNC (poly16x8x3_t, poly16_t, 8h, h, p16, q)
-__LD3_LANE_FUNC (int8x16x3_t, int8_t, 16b, b, s8, q)
-__LD3_LANE_FUNC (int16x8x3_t, int16_t, 8h, h, s16, q)
-__LD3_LANE_FUNC (int32x4x3_t, int32_t, 4s, s, s32, q)
-__LD3_LANE_FUNC (int64x2x3_t, int64_t, 2d, d, s64, q)
-__LD3_LANE_FUNC (uint8x16x3_t, uint8_t, 16b, b, u8, q)
-__LD3_LANE_FUNC (uint16x8x3_t, uint16_t, 8h, h, u16, q)
-__LD3_LANE_FUNC (uint32x4x3_t, uint32_t, 4s, s, u32, q)
-__LD3_LANE_FUNC (uint64x2x3_t, uint64_t, 2d, d, u64, q)
-
-#define __LD4R_FUNC(rettype, structtype, ptrtype, \
- regsuffix, funcsuffix, Q) \
- __extension__ static __inline rettype \
- __attribute__ ((__always_inline__)) \
- vld4 ## Q ## _dup_ ## funcsuffix (const ptrtype *ptr) \
- { \
- rettype result; \
- __asm__ ("ld4r {v16." #regsuffix " - v19." #regsuffix "}, %1\n\t" \
- "st1 {v16." #regsuffix " - v19." #regsuffix "}, %0\n\t" \
- : "=Q"(result) \
- : "Q"(*(const structtype *)ptr) \
- : "memory", "v16", "v17", "v18", "v19"); \
- return result; \
- }
-
-__LD4R_FUNC (float32x2x4_t, float32x4_t, float32_t, 2s, f32,)
-__LD4R_FUNC (float64x1x4_t, float64x4_t, float64_t, 1d, f64,)
-__LD4R_FUNC (poly8x8x4_t, poly8x4_t, poly8_t, 8b, p8,)
-__LD4R_FUNC (poly16x4x4_t, poly16x4_t, poly16_t, 4h, p16,)
-__LD4R_FUNC (int8x8x4_t, int8x4_t, int8_t, 8b, s8,)
-__LD4R_FUNC (int16x4x4_t, int16x4_t, int16_t, 4h, s16,)
-__LD4R_FUNC (int32x2x4_t, int32x4_t, int32_t, 2s, s32,)
-__LD4R_FUNC (int64x1x4_t, int64x4_t, int64_t, 1d, s64,)
-__LD4R_FUNC (uint8x8x4_t, uint8x4_t, uint8_t, 8b, u8,)
-__LD4R_FUNC (uint16x4x4_t, uint16x4_t, uint16_t, 4h, u16,)
-__LD4R_FUNC (uint32x2x4_t, uint32x4_t, uint32_t, 2s, u32,)
-__LD4R_FUNC (uint64x1x4_t, uint64x4_t, uint64_t, 1d, u64,)
-__LD4R_FUNC (float32x4x4_t, float32x4_t, float32_t, 4s, f32, q)
-__LD4R_FUNC (float64x2x4_t, float64x4_t, float64_t, 2d, f64, q)
-__LD4R_FUNC (poly8x16x4_t, poly8x4_t, poly8_t, 16b, p8, q)
-__LD4R_FUNC (poly16x8x4_t, poly16x4_t, poly16_t, 8h, p16, q)
-__LD4R_FUNC (int8x16x4_t, int8x4_t, int8_t, 16b, s8, q)
-__LD4R_FUNC (int16x8x4_t, int16x4_t, int16_t, 8h, s16, q)
-__LD4R_FUNC (int32x4x4_t, int32x4_t, int32_t, 4s, s32, q)
-__LD4R_FUNC (int64x2x4_t, int64x4_t, int64_t, 2d, s64, q)
-__LD4R_FUNC (uint8x16x4_t, uint8x4_t, uint8_t, 16b, u8, q)
-__LD4R_FUNC (uint16x8x4_t, uint16x4_t, uint16_t, 8h, u16, q)
-__LD4R_FUNC (uint32x4x4_t, uint32x4_t, uint32_t, 4s, u32, q)
-__LD4R_FUNC (uint64x2x4_t, uint64x4_t, uint64_t, 2d, u64, q)
-
-#define __LD4_LANE_FUNC(rettype, ptrtype, regsuffix, \
- lnsuffix, funcsuffix, Q) \
- __extension__ static __inline rettype \
- __attribute__ ((__always_inline__)) \
- vld4 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
- rettype b, const int c) \
- { \
- rettype result; \
- __asm__ ("ld1 {v16." #regsuffix " - v19." #regsuffix "}, %1\n\t" \
- "ld4 {v16." #lnsuffix " - v19." #lnsuffix "}[%3], %2\n\t" \
- "st1 {v16." #regsuffix " - v19." #regsuffix "}, %0\n\t" \
- : "=Q"(result) \
- : "Q"(b), "Q"(*(const rettype *)ptr), "i"(c) \
- : "memory", "v16", "v17", "v18", "v19"); \
- return result; \
- }
-
-__LD4_LANE_FUNC (int8x8x4_t, uint8_t, 8b, b, s8,)
-__LD4_LANE_FUNC (float32x2x4_t, float32_t, 2s, s, f32,)
-__LD4_LANE_FUNC (float64x1x4_t, float64_t, 1d, d, f64,)
-__LD4_LANE_FUNC (poly8x8x4_t, poly8_t, 8b, b, p8,)
-__LD4_LANE_FUNC (poly16x4x4_t, poly16_t, 4h, h, p16,)
-__LD4_LANE_FUNC (int16x4x4_t, int16_t, 4h, h, s16,)
-__LD4_LANE_FUNC (int32x2x4_t, int32_t, 2s, s, s32,)
-__LD4_LANE_FUNC (int64x1x4_t, int64_t, 1d, d, s64,)
-__LD4_LANE_FUNC (uint8x8x4_t, uint8_t, 8b, b, u8,)
-__LD4_LANE_FUNC (uint16x4x4_t, uint16_t, 4h, h, u16,)
-__LD4_LANE_FUNC (uint32x2x4_t, uint32_t, 2s, s, u32,)
-__LD4_LANE_FUNC (uint64x1x4_t, uint64_t, 1d, d, u64,)
-__LD4_LANE_FUNC (float32x4x4_t, float32_t, 4s, s, f32, q)
-__LD4_LANE_FUNC (float64x2x4_t, float64_t, 2d, d, f64, q)
-__LD4_LANE_FUNC (poly8x16x4_t, poly8_t, 16b, b, p8, q)
-__LD4_LANE_FUNC (poly16x8x4_t, poly16_t, 8h, h, p16, q)
-__LD4_LANE_FUNC (int8x16x4_t, int8_t, 16b, b, s8, q)
-__LD4_LANE_FUNC (int16x8x4_t, int16_t, 8h, h, s16, q)
-__LD4_LANE_FUNC (int32x4x4_t, int32_t, 4s, s, s32, q)
-__LD4_LANE_FUNC (int64x2x4_t, int64_t, 2d, d, s64, q)
-__LD4_LANE_FUNC (uint8x16x4_t, uint8_t, 16b, b, u8, q)
-__LD4_LANE_FUNC (uint16x8x4_t, uint16_t, 8h, h, u16, q)
-__LD4_LANE_FUNC (uint32x4x4_t, uint32_t, 4s, s, u32, q)
-__LD4_LANE_FUNC (uint64x2x4_t, uint64_t, 2d, d, u64, q)
-
-#define __ST2_LANE_FUNC(intype, ptrtype, regsuffix, \
- lnsuffix, funcsuffix, Q) \
- __extension__ static __inline void \
- __attribute__ ((__always_inline__)) \
- vst2 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
- intype b, const int c) \
- { \
- __asm__ ("ld1 {v16." #regsuffix ", v17." #regsuffix "}, %1\n\t" \
- "st2 {v16." #lnsuffix ", v17." #lnsuffix "}[%2], %0\n\t" \
- : "=Q"(*(intype *) ptr) \
- : "Q"(b), "i"(c) \
- : "memory", "v16", "v17"); \
- }
-
-__ST2_LANE_FUNC (int8x8x2_t, int8_t, 8b, b, s8,)
-__ST2_LANE_FUNC (float32x2x2_t, float32_t, 2s, s, f32,)
-__ST2_LANE_FUNC (float64x1x2_t, float64_t, 1d, d, f64,)
-__ST2_LANE_FUNC (poly8x8x2_t, poly8_t, 8b, b, p8,)
-__ST2_LANE_FUNC (poly16x4x2_t, poly16_t, 4h, h, p16,)
-__ST2_LANE_FUNC (int16x4x2_t, int16_t, 4h, h, s16,)
-__ST2_LANE_FUNC (int32x2x2_t, int32_t, 2s, s, s32,)
-__ST2_LANE_FUNC (int64x1x2_t, int64_t, 1d, d, s64,)
-__ST2_LANE_FUNC (uint8x8x2_t, uint8_t, 8b, b, u8,)
-__ST2_LANE_FUNC (uint16x4x2_t, uint16_t, 4h, h, u16,)
-__ST2_LANE_FUNC (uint32x2x2_t, uint32_t, 2s, s, u32,)
-__ST2_LANE_FUNC (uint64x1x2_t, uint64_t, 1d, d, u64,)
-__ST2_LANE_FUNC (float32x4x2_t, float32_t, 4s, s, f32, q)
-__ST2_LANE_FUNC (float64x2x2_t, float64_t, 2d, d, f64, q)
-__ST2_LANE_FUNC (poly8x16x2_t, poly8_t, 16b, b, p8, q)
-__ST2_LANE_FUNC (poly16x8x2_t, poly16_t, 8h, h, p16, q)
-__ST2_LANE_FUNC (int8x16x2_t, int8_t, 16b, b, s8, q)
-__ST2_LANE_FUNC (int16x8x2_t, int16_t, 8h, h, s16, q)
-__ST2_LANE_FUNC (int32x4x2_t, int32_t, 4s, s, s32, q)
-__ST2_LANE_FUNC (int64x2x2_t, int64_t, 2d, d, s64, q)
-__ST2_LANE_FUNC (uint8x16x2_t, uint8_t, 16b, b, u8, q)
-__ST2_LANE_FUNC (uint16x8x2_t, uint16_t, 8h, h, u16, q)
-__ST2_LANE_FUNC (uint32x4x2_t, uint32_t, 4s, s, u32, q)
-__ST2_LANE_FUNC (uint64x2x2_t, uint64_t, 2d, d, u64, q)
-
-#define __ST3_LANE_FUNC(intype, ptrtype, regsuffix, \
- lnsuffix, funcsuffix, Q) \
- __extension__ static __inline void \
- __attribute__ ((__always_inline__)) \
- vst3 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
- intype b, const int c) \
- { \
- __asm__ ("ld1 {v16." #regsuffix " - v18." #regsuffix "}, %1\n\t" \
- "st3 {v16." #lnsuffix " - v18." #lnsuffix "}[%2], %0\n\t" \
- : "=Q"(*(intype *) ptr) \
- : "Q"(b), "i"(c) \
- : "memory", "v16", "v17", "v18"); \
- }
-
-__ST3_LANE_FUNC (int8x8x3_t, int8_t, 8b, b, s8,)
-__ST3_LANE_FUNC (float32x2x3_t, float32_t, 2s, s, f32,)
-__ST3_LANE_FUNC (float64x1x3_t, float64_t, 1d, d, f64,)
-__ST3_LANE_FUNC (poly8x8x3_t, poly8_t, 8b, b, p8,)
-__ST3_LANE_FUNC (poly16x4x3_t, poly16_t, 4h, h, p16,)
-__ST3_LANE_FUNC (int16x4x3_t, int16_t, 4h, h, s16,)
-__ST3_LANE_FUNC (int32x2x3_t, int32_t, 2s, s, s32,)
-__ST3_LANE_FUNC (int64x1x3_t, int64_t, 1d, d, s64,)
-__ST3_LANE_FUNC (uint8x8x3_t, uint8_t, 8b, b, u8,)
-__ST3_LANE_FUNC (uint16x4x3_t, uint16_t, 4h, h, u16,)
-__ST3_LANE_FUNC (uint32x2x3_t, uint32_t, 2s, s, u32,)
-__ST3_LANE_FUNC (uint64x1x3_t, uint64_t, 1d, d, u64,)
-__ST3_LANE_FUNC (float32x4x3_t, float32_t, 4s, s, f32, q)
-__ST3_LANE_FUNC (float64x2x3_t, float64_t, 2d, d, f64, q)
-__ST3_LANE_FUNC (poly8x16x3_t, poly8_t, 16b, b, p8, q)
-__ST3_LANE_FUNC (poly16x8x3_t, poly16_t, 8h, h, p16, q)
-__ST3_LANE_FUNC (int8x16x3_t, int8_t, 16b, b, s8, q)
-__ST3_LANE_FUNC (int16x8x3_t, int16_t, 8h, h, s16, q)
-__ST3_LANE_FUNC (int32x4x3_t, int32_t, 4s, s, s32, q)
-__ST3_LANE_FUNC (int64x2x3_t, int64_t, 2d, d, s64, q)
-__ST3_LANE_FUNC (uint8x16x3_t, uint8_t, 16b, b, u8, q)
-__ST3_LANE_FUNC (uint16x8x3_t, uint16_t, 8h, h, u16, q)
-__ST3_LANE_FUNC (uint32x4x3_t, uint32_t, 4s, s, u32, q)
-__ST3_LANE_FUNC (uint64x2x3_t, uint64_t, 2d, d, u64, q)
-
-#define __ST4_LANE_FUNC(intype, ptrtype, regsuffix, \
- lnsuffix, funcsuffix, Q) \
- __extension__ static __inline void \
- __attribute__ ((__always_inline__)) \
- vst4 ## Q ## _lane_ ## funcsuffix (const ptrtype *ptr, \
- intype b, const int c) \
- { \
- __asm__ ("ld1 {v16." #regsuffix " - v19." #regsuffix "}, %1\n\t" \
- "st4 {v16." #lnsuffix " - v19." #lnsuffix "}[%2], %0\n\t" \
- : "=Q"(*(intype *) ptr) \
- : "Q"(b), "i"(c) \
- : "memory", "v16", "v17", "v18", "v19"); \
- }
-
-__ST4_LANE_FUNC (int8x8x4_t, int8_t, 8b, b, s8,)
-__ST4_LANE_FUNC (float32x2x4_t, float32_t, 2s, s, f32,)
-__ST4_LANE_FUNC (float64x1x4_t, float64_t, 1d, d, f64,)
-__ST4_LANE_FUNC (poly8x8x4_t, poly8_t, 8b, b, p8,)
-__ST4_LANE_FUNC (poly16x4x4_t, poly16_t, 4h, h, p16,)
-__ST4_LANE_FUNC (int16x4x4_t, int16_t, 4h, h, s16,)
-__ST4_LANE_FUNC (int32x2x4_t, int32_t, 2s, s, s32,)
-__ST4_LANE_FUNC (int64x1x4_t, int64_t, 1d, d, s64,)
-__ST4_LANE_FUNC (uint8x8x4_t, uint8_t, 8b, b, u8,)
-__ST4_LANE_FUNC (uint16x4x4_t, uint16_t, 4h, h, u16,)
-__ST4_LANE_FUNC (uint32x2x4_t, uint32_t, 2s, s, u32,)
-__ST4_LANE_FUNC (uint64x1x4_t, uint64_t, 1d, d, u64,)
-__ST4_LANE_FUNC (float32x4x4_t, float32_t, 4s, s, f32, q)
-__ST4_LANE_FUNC (float64x2x4_t, float64_t, 2d, d, f64, q)
-__ST4_LANE_FUNC (poly8x16x4_t, poly8_t, 16b, b, p8, q)
-__ST4_LANE_FUNC (poly16x8x4_t, poly16_t, 8h, h, p16, q)
-__ST4_LANE_FUNC (int8x16x4_t, int8_t, 16b, b, s8, q)
-__ST4_LANE_FUNC (int16x8x4_t, int16_t, 8h, h, s16, q)
-__ST4_LANE_FUNC (int32x4x4_t, int32_t, 4s, s, s32, q)
-__ST4_LANE_FUNC (int64x2x4_t, int64_t, 2d, d, s64, q)
-__ST4_LANE_FUNC (uint8x16x4_t, uint8_t, 16b, b, u8, q)
-__ST4_LANE_FUNC (uint16x8x4_t, uint16_t, 8h, h, u16, q)
-__ST4_LANE_FUNC (uint32x4x4_t, uint32_t, 4s, s, u32, q)
-__ST4_LANE_FUNC (uint64x2x4_t, uint64_t, 2d, d, u64, q)
-
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
-vaddlv_s32 (int32x2_t a)
-{
- int64_t result;
- __asm__ ("saddlp %0.1d, %1.2s" : "=w"(result) : "w"(a) : );
- return result;
-}
-
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
-vaddlv_u32 (uint32x2_t a)
-{
- uint64_t result;
- __asm__ ("uaddlp %0.1d, %1.2s" : "=w"(result) : "w"(a) : );
- return result;
-}
-
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
-vaddv_s32 (int32x2_t a)
-{
- int32_t result;
- __asm__ ("addp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
- return result;
-}
-
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
-vaddv_u32 (uint32x2_t a)
-{
- uint32_t result;
- __asm__ ("addp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vmaxnmv_f32 (float32x2_t a)
-{
- float32_t result;
- __asm__ ("fmaxnmp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vminnmv_f32 (float32x2_t a)
-{
- float32_t result;
- __asm__ ("fminnmp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vmaxnmvq_f64 (float64x2_t a)
-{
- float64_t result;
- __asm__ ("fmaxnmp %0.2d, %1.2d, %1.2d" : "=w"(result) : "w"(a) : );
- return result;
-}
-
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
-vmaxv_s32 (int32x2_t a)
-{
- int32_t result;
- __asm__ ("smaxp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
- return result;
-}
-
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
-vmaxv_u32 (uint32x2_t a)
-{
- uint32_t result;
- __asm__ ("umaxp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vminnmvq_f64 (float64x2_t a)
-{
- float64_t result;
- __asm__ ("fminnmp %0.2d, %1.2d, %1.2d" : "=w"(result) : "w"(a) : );
- return result;
-}
-
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
-vminv_s32 (int32x2_t a)
-{
- int32_t result;
- __asm__ ("sminp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
- return result;
-}
-
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
-vminv_u32 (uint32x2_t a)
-{
- uint32_t result;
- __asm__ ("uminp %0.2s, %1.2s, %1.2s" : "=w"(result) : "w"(a) : );
- return result;
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vpaddd_s64 (int64x2_t __a)
-{
- return __builtin_aarch64_addpdi (__a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
-{
- return __builtin_aarch64_sqdmulh_laneqv4hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
-{
- return __builtin_aarch64_sqdmulh_laneqv2si (__a, __b, __c);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
-{
- return __builtin_aarch64_sqdmulh_laneqv8hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
-{
- return __builtin_aarch64_sqdmulh_laneqv4si (__a, __b, __c);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqrdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
-{
- return __builtin_aarch64_sqrdmulh_laneqv4hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqrdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
-{
- return __builtin_aarch64_sqrdmulh_laneqv2si (__a, __b, __c);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqrdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
-{
- return __builtin_aarch64_sqrdmulh_laneqv8hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqrdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
-{
- return __builtin_aarch64_sqrdmulh_laneqv4si (__a, __b, __c);
-}
-
-/* Table intrinsics. */
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vqtbl1_p8 (poly8x16_t a, uint8x8_t b)
-{
- poly8x8_t result;
- __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqtbl1_s8 (int8x16_t a, int8x8_t b)
-{
- int8x8_t result;
- __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqtbl1_u8 (uint8x16_t a, uint8x8_t b)
-{
- uint8x8_t result;
- __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vqtbl1q_p8 (poly8x16_t a, uint8x16_t b)
-{
- poly8x16_t result;
- __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqtbl1q_s8 (int8x16_t a, int8x16_t b)
-{
- int8x16_t result;
- __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqtbl1q_u8 (uint8x16_t a, uint8x16_t b)
-{
- uint8x16_t result;
- __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqtbl2_s8 (int8x16x2_t tab, int8x8_t idx)
-{
- int8x8_t result;
- __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
- "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17");
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqtbl2_u8 (uint8x16x2_t tab, uint8x8_t idx)
-{
- uint8x8_t result;
- __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
- "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17");
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vqtbl2_p8 (poly8x16x2_t tab, uint8x8_t idx)
-{
- poly8x8_t result;
- __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
- "tbl %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17");
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqtbl2q_s8 (int8x16x2_t tab, int8x16_t idx)
-{
- int8x16_t result;
- __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
- "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17");
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqtbl2q_u8 (uint8x16x2_t tab, uint8x16_t idx)
-{
- uint8x16_t result;
- __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
- "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17");
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vqtbl2q_p8 (poly8x16x2_t tab, uint8x16_t idx)
-{
- poly8x16_t result;
- __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
- "tbl %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17");
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqtbl3_s8 (int8x16x3_t tab, int8x8_t idx)
-{
- int8x8_t result;
- __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
- "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18");
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqtbl3_u8 (uint8x16x3_t tab, uint8x8_t idx)
-{
- uint8x8_t result;
- __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
- "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18");
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vqtbl3_p8 (poly8x16x3_t tab, uint8x8_t idx)
-{
- poly8x8_t result;
- __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
- "tbl %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18");
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqtbl3q_s8 (int8x16x3_t tab, int8x16_t idx)
-{
- int8x16_t result;
- __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
- "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18");
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqtbl3q_u8 (uint8x16x3_t tab, uint8x16_t idx)
-{
- uint8x16_t result;
- __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
- "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18");
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vqtbl3q_p8 (poly8x16x3_t tab, uint8x16_t idx)
-{
- poly8x16_t result;
- __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
- "tbl %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18");
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqtbl4_s8 (int8x16x4_t tab, int8x8_t idx)
-{
- int8x8_t result;
- __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
- "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18", "v19");
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqtbl4_u8 (uint8x16x4_t tab, uint8x8_t idx)
-{
- uint8x8_t result;
- __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
- "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18", "v19");
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vqtbl4_p8 (poly8x16x4_t tab, uint8x8_t idx)
-{
- poly8x8_t result;
- __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
- "tbl %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18", "v19");
- return result;
-}
-
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqtbl4q_s8 (int8x16x4_t tab, int8x16_t idx)
-{
- int8x16_t result;
- __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
- "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18", "v19");
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqtbl4q_u8 (uint8x16x4_t tab, uint8x16_t idx)
-{
- uint8x16_t result;
- __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
- "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18", "v19");
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vqtbl4q_p8 (poly8x16x4_t tab, uint8x16_t idx)
-{
- poly8x16_t result;
- __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
- "tbl %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
- :"=w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18", "v19");
- return result;
-}
-
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqtbx1_s8 (int8x8_t r, int8x16_t tab, int8x8_t idx)
-{
- int8x8_t result = r;
- __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
- : "+w"(result)
- : "w"(tab), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqtbx1_u8 (uint8x8_t r, uint8x16_t tab, uint8x8_t idx)
-{
- uint8x8_t result = r;
- __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
- : "+w"(result)
- : "w"(tab), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vqtbx1_p8 (poly8x8_t r, poly8x16_t tab, uint8x8_t idx)
-{
- poly8x8_t result = r;
- __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
- : "+w"(result)
- : "w"(tab), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqtbx1q_s8 (int8x16_t r, int8x16_t tab, int8x16_t idx)
-{
- int8x16_t result = r;
- __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
- : "+w"(result)
- : "w"(tab), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqtbx1q_u8 (uint8x16_t r, uint8x16_t tab, uint8x16_t idx)
-{
- uint8x16_t result = r;
- __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
- : "+w"(result)
- : "w"(tab), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vqtbx1q_p8 (poly8x16_t r, poly8x16_t tab, uint8x16_t idx)
-{
- poly8x16_t result = r;
- __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
- : "+w"(result)
- : "w"(tab), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqtbx2_s8 (int8x8_t r, int8x16x2_t tab, int8x8_t idx)
-{
- int8x8_t result = r;
- __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
- "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17");
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqtbx2_u8 (uint8x8_t r, uint8x16x2_t tab, uint8x8_t idx)
-{
- uint8x8_t result = r;
- __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
- "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17");
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vqtbx2_p8 (poly8x8_t r, poly8x16x2_t tab, uint8x8_t idx)
-{
- poly8x8_t result = r;
- __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
- "tbx %0.8b, {v16.16b, v17.16b}, %2.8b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17");
- return result;
-}
-
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqtbx2q_s8 (int8x16_t r, int8x16x2_t tab, int8x16_t idx)
-{
- int8x16_t result = r;
- __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
- "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17");
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqtbx2q_u8 (uint8x16_t r, uint8x16x2_t tab, uint8x16_t idx)
-{
- uint8x16_t result = r;
- __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
- "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17");
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vqtbx2q_p8 (poly8x16_t r, poly8x16x2_t tab, uint8x16_t idx)
-{
- poly8x16_t result = r;
- __asm__ ("ld1 {v16.16b, v17.16b}, %1\n\t"
- "tbx %0.16b, {v16.16b, v17.16b}, %2.16b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17");
- return result;
-}
-
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqtbx3_s8 (int8x8_t r, int8x16x3_t tab, int8x8_t idx)
-{
- int8x8_t result = r;
- __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
- "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18");
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqtbx3_u8 (uint8x8_t r, uint8x16x3_t tab, uint8x8_t idx)
-{
- uint8x8_t result = r;
- __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
- "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18");
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vqtbx3_p8 (poly8x8_t r, poly8x16x3_t tab, uint8x8_t idx)
-{
- poly8x8_t result = r;
- __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
- "tbx %0.8b, {v16.16b - v18.16b}, %2.8b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18");
- return result;
-}
-
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqtbx3q_s8 (int8x16_t r, int8x16x3_t tab, int8x16_t idx)
-{
- int8x16_t result = r;
- __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
- "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18");
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqtbx3q_u8 (uint8x16_t r, uint8x16x3_t tab, uint8x16_t idx)
-{
- uint8x16_t result = r;
- __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
- "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18");
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vqtbx3q_p8 (poly8x16_t r, poly8x16x3_t tab, uint8x16_t idx)
-{
- poly8x16_t result = r;
- __asm__ ("ld1 {v16.16b - v18.16b}, %1\n\t"
- "tbx %0.16b, {v16.16b - v18.16b}, %2.16b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18");
- return result;
-}
-
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqtbx4_s8 (int8x8_t r, int8x16x4_t tab, int8x8_t idx)
-{
- int8x8_t result = r;
- __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
- "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18", "v19");
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqtbx4_u8 (uint8x8_t r, uint8x16x4_t tab, uint8x8_t idx)
-{
- uint8x8_t result = r;
- __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
- "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18", "v19");
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vqtbx4_p8 (poly8x8_t r, poly8x16x4_t tab, uint8x8_t idx)
-{
- poly8x8_t result = r;
- __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
- "tbx %0.8b, {v16.16b - v19.16b}, %2.8b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18", "v19");
- return result;
-}
-
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqtbx4q_s8 (int8x16_t r, int8x16x4_t tab, int8x16_t idx)
-{
- int8x16_t result = r;
- __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
- "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18", "v19");
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqtbx4q_u8 (uint8x16_t r, uint8x16x4_t tab, uint8x16_t idx)
-{
- uint8x16_t result = r;
- __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
- "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18", "v19");
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vqtbx4q_p8 (poly8x16_t r, poly8x16x4_t tab, uint8x16_t idx)
-{
- poly8x16_t result = r;
- __asm__ ("ld1 {v16.16b - v19.16b}, %1\n\t"
- "tbx %0.16b, {v16.16b - v19.16b}, %2.16b\n\t"
- :"+w"(result)
- :"Q"(tab),"w"(idx)
- :"memory", "v16", "v17", "v18", "v19");
- return result;
-}
-
-/* V7 legacy table intrinsics. */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vtbl1_s8 (int8x8_t tab, int8x8_t idx)
-{
- int8x8_t result;
- int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (UINT64_C (0x0)));
- __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
- : "=w"(result)
- : "w"(temp), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vtbl1_u8 (uint8x8_t tab, uint8x8_t idx)
-{
- uint8x8_t result;
- uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (UINT64_C (0x0)));
- __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
- : "=w"(result)
- : "w"(temp), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vtbl1_p8 (poly8x8_t tab, uint8x8_t idx)
-{
- poly8x8_t result;
- poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (UINT64_C (0x0)));
- __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
- : "=w"(result)
- : "w"(temp), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vtbl2_s8 (int8x8x2_t tab, int8x8_t idx)
-{
- int8x8_t result;
- int8x16_t temp = vcombine_s8 (tab.val[0], tab.val[1]);
- __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
- : "=w"(result)
- : "w"(temp), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vtbl2_u8 (uint8x8x2_t tab, uint8x8_t idx)
-{
- uint8x8_t result;
- uint8x16_t temp = vcombine_u8 (tab.val[0], tab.val[1]);
- __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
- : "=w"(result)
- : "w"(temp), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vtbl2_p8 (poly8x8x2_t tab, uint8x8_t idx)
-{
- poly8x8_t result;
- poly8x16_t temp = vcombine_p8 (tab.val[0], tab.val[1]);
- __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
- : "=w"(result)
- : "w"(temp), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vtbl3_s8 (int8x8x3_t tab, int8x8_t idx)
-{
- int8x8_t result;
- int8x16x2_t temp;
- temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (UINT64_C (0x0)));
- __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
- "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
- : "=w"(result)
- : "Q"(temp), "w"(idx)
- : "v16", "v17", "memory");
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vtbl3_u8 (uint8x8x3_t tab, uint8x8_t idx)
-{
- uint8x8_t result;
- uint8x16x2_t temp;
- temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (UINT64_C (0x0)));
- __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
- "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
- : "=w"(result)
- : "Q"(temp), "w"(idx)
- : "v16", "v17", "memory");
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vtbl3_p8 (poly8x8x3_t tab, uint8x8_t idx)
-{
- poly8x8_t result;
- poly8x16x2_t temp;
- temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (UINT64_C (0x0)));
- __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
- "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
- : "=w"(result)
- : "Q"(temp), "w"(idx)
- : "v16", "v17", "memory");
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vtbl4_s8 (int8x8x4_t tab, int8x8_t idx)
-{
- int8x8_t result;
- int8x16x2_t temp;
- temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_s8 (tab.val[2], tab.val[3]);
- __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
- "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
- : "=w"(result)
- : "Q"(temp), "w"(idx)
- : "v16", "v17", "memory");
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vtbl4_u8 (uint8x8x4_t tab, uint8x8_t idx)
-{
- uint8x8_t result;
- uint8x16x2_t temp;
- temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_u8 (tab.val[2], tab.val[3]);
- __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
- "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
- : "=w"(result)
- : "Q"(temp), "w"(idx)
- : "v16", "v17", "memory");
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vtbl4_p8 (poly8x8x4_t tab, uint8x8_t idx)
-{
- poly8x8_t result;
- poly8x16x2_t temp;
- temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_p8 (tab.val[2], tab.val[3]);
- __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
- "tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
- : "=w"(result)
- : "Q"(temp), "w"(idx)
- : "v16", "v17", "memory");
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vtbx1_s8 (int8x8_t r, int8x8_t tab, int8x8_t idx)
-{
- int8x8_t result;
- int8x8_t tmp1;
- int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (UINT64_C (0x0)));
- __asm__ ("movi %0.8b, 8\n\t"
- "cmhs %0.8b, %3.8b, %0.8b\n\t"
- "tbl %1.8b, {%2.16b}, %3.8b\n\t"
- "bsl %0.8b, %4.8b, %1.8b\n\t"
- : "+w"(result), "=w"(tmp1)
- : "w"(temp), "w"(idx), "w"(r)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vtbx1_u8 (uint8x8_t r, uint8x8_t tab, uint8x8_t idx)
-{
- uint8x8_t result;
- uint8x8_t tmp1;
- uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (UINT64_C (0x0)));
- __asm__ ("movi %0.8b, 8\n\t"
- "cmhs %0.8b, %3.8b, %0.8b\n\t"
- "tbl %1.8b, {%2.16b}, %3.8b\n\t"
- "bsl %0.8b, %4.8b, %1.8b\n\t"
- : "+w"(result), "=w"(tmp1)
- : "w"(temp), "w"(idx), "w"(r)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vtbx1_p8 (poly8x8_t r, poly8x8_t tab, uint8x8_t idx)
-{
- poly8x8_t result;
- poly8x8_t tmp1;
- poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (UINT64_C (0x0)));
- __asm__ ("movi %0.8b, 8\n\t"
- "cmhs %0.8b, %3.8b, %0.8b\n\t"
- "tbl %1.8b, {%2.16b}, %3.8b\n\t"
- "bsl %0.8b, %4.8b, %1.8b\n\t"
- : "+w"(result), "=w"(tmp1)
- : "w"(temp), "w"(idx), "w"(r)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vtbx2_s8 (int8x8_t r, int8x8x2_t tab, int8x8_t idx)
-{
- int8x8_t result = r;
- int8x16_t temp = vcombine_s8 (tab.val[0], tab.val[1]);
- __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
- : "+w"(result)
- : "w"(temp), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vtbx2_u8 (uint8x8_t r, uint8x8x2_t tab, uint8x8_t idx)
-{
- uint8x8_t result = r;
- uint8x16_t temp = vcombine_u8 (tab.val[0], tab.val[1]);
- __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
- : "+w"(result)
- : "w"(temp), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vtbx2_p8 (poly8x8_t r, poly8x8x2_t tab, uint8x8_t idx)
-{
- poly8x8_t result = r;
- poly8x16_t temp = vcombine_p8 (tab.val[0], tab.val[1]);
- __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
- : "+w"(result)
- : "w"(temp), "w"(idx)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vtbx3_s8 (int8x8_t r, int8x8x3_t tab, int8x8_t idx)
-{
- int8x8_t result;
- int8x8_t tmp1;
- int8x16x2_t temp;
- temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (UINT64_C (0x0)));
- __asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t"
- "movi %0.8b, 24\n\t"
- "cmhs %0.8b, %3.8b, %0.8b\n\t"
- "tbl %1.8b, {v16.16b - v17.16b}, %3.8b\n\t"
- "bsl %0.8b, %4.8b, %1.8b\n\t"
- : "+w"(result), "=w"(tmp1)
- : "Q"(temp), "w"(idx), "w"(r)
- : "v16", "v17", "memory");
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vtbx3_u8 (uint8x8_t r, uint8x8x3_t tab, uint8x8_t idx)
-{
- uint8x8_t result;
- uint8x8_t tmp1;
- uint8x16x2_t temp;
- temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (UINT64_C (0x0)));
- __asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t"
- "movi %0.8b, 24\n\t"
- "cmhs %0.8b, %3.8b, %0.8b\n\t"
- "tbl %1.8b, {v16.16b - v17.16b}, %3.8b\n\t"
- "bsl %0.8b, %4.8b, %1.8b\n\t"
- : "+w"(result), "=w"(tmp1)
- : "Q"(temp), "w"(idx), "w"(r)
- : "v16", "v17", "memory");
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vtbx3_p8 (poly8x8_t r, poly8x8x3_t tab, uint8x8_t idx)
-{
- poly8x8_t result;
- poly8x8_t tmp1;
- poly8x16x2_t temp;
- temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (UINT64_C (0x0)));
- __asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t"
- "movi %0.8b, 24\n\t"
- "cmhs %0.8b, %3.8b, %0.8b\n\t"
- "tbl %1.8b, {v16.16b - v17.16b}, %3.8b\n\t"
- "bsl %0.8b, %4.8b, %1.8b\n\t"
- : "+w"(result), "=w"(tmp1)
- : "Q"(temp), "w"(idx), "w"(r)
- : "v16", "v17", "memory");
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vtbx4_s8 (int8x8_t r, int8x8x4_t tab, int8x8_t idx)
-{
- int8x8_t result = r;
- int8x16x2_t temp;
- temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_s8 (tab.val[2], tab.val[3]);
- __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
- "tbx %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
- : "+w"(result)
- : "Q"(temp), "w"(idx)
- : "v16", "v17", "memory");
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vtbx4_u8 (uint8x8_t r, uint8x8x4_t tab, uint8x8_t idx)
-{
- uint8x8_t result = r;
- uint8x16x2_t temp;
- temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_u8 (tab.val[2], tab.val[3]);
- __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
- "tbx %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
- : "+w"(result)
- : "Q"(temp), "w"(idx)
- : "v16", "v17", "memory");
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vtbx4_p8 (poly8x8_t r, poly8x8x4_t tab, uint8x8_t idx)
-{
- poly8x8_t result = r;
- poly8x16x2_t temp;
- temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_p8 (tab.val[2], tab.val[3]);
- __asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
- "tbx %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
- : "+w"(result)
- : "Q"(temp), "w"(idx)
- : "v16", "v17", "memory");
- return result;
-}
-
-/* End of temporary inline asm. */
-
-/* Start of optimal implementations in approved order. */
-
-/* vadd */
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vaddd_s64 (int64x1_t __a, int64x1_t __b)
-{
- return __a + __b;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vaddd_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return __a + __b;
-}
-
-/* vceq */
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vceq_p8 (poly8x8_t __a, poly8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_cmeqv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vceq_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_cmeqv8qi (__a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vceq_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_cmeqv4hi (__a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vceq_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_cmeqv2si (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vceq_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmeqdi (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vceq_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_cmeqv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vceq_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_cmeqv4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vceq_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_cmeqv2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vceq_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmeqdi ((int64x1_t) __a,
- (int64x1_t) __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vceqq_p8 (poly8x16_t __a, poly8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_cmeqv16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vceqq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_cmeqv16qi (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vceqq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_cmeqv8hi (__a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vceqq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_cmeqv4si (__a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vceqq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_cmeqv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vceqq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_cmeqv16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vceqq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_cmeqv8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vceqq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_cmeqv4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vceqq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_cmeqv2di ((int64x2_t) __a,
- (int64x2_t) __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vceqd_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmeqdi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vceqd_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmeqdi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vceqzd_s64 (int64x1_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_cmeqdi (__a, 0);
-}
-
-/* vcge */
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vcge_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_cmgev8qi (__a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vcge_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_cmgev4hi (__a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcge_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_cmgev2si (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcge_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmgedi (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vcge_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_cmhsv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vcge_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_cmhsv4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcge_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_cmhsv2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcge_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmhsdi ((int64x1_t) __a,
- (int64x1_t) __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vcgeq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_cmgev16qi (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vcgeq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_cmgev8hi (__a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcgeq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_cmgev4si (__a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcgeq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_cmgev2di (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vcgeq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_cmhsv16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vcgeq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_cmhsv8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcgeq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_cmhsv4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcgeq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_cmhsv2di ((int64x2_t) __a,
- (int64x2_t) __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcged_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmgedi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcged_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmhsdi ((int64x1_t) __a,
- (int64x1_t) __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcgezd_s64 (int64x1_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_cmgedi (__a, 0);
-}
-
-/* vcgt */
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vcgt_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_cmgtv8qi (__a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vcgt_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_cmgtv4hi (__a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcgt_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_cmgtv2si (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcgt_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmgtdi (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vcgt_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_cmhiv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vcgt_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_cmhiv4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcgt_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_cmhiv2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcgt_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmhidi ((int64x1_t) __a,
- (int64x1_t) __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vcgtq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_cmgtv16qi (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vcgtq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_cmgtv8hi (__a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcgtq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_cmgtv4si (__a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcgtq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_cmgtv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vcgtq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_cmhiv16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vcgtq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_cmhiv8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcgtq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_cmhiv4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcgtq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_cmhiv2di ((int64x2_t) __a,
- (int64x2_t) __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcgtd_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmgtdi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcgtd_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmhidi ((int64x1_t) __a,
- (int64x1_t) __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcgtzd_s64 (int64x1_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_cmgtdi (__a, 0);
-}
-
-/* vcle */
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vcle_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_cmgev8qi (__b, __a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vcle_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_cmgev4hi (__b, __a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcle_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_cmgev2si (__b, __a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcle_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmgedi (__b, __a);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vcle_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_cmhsv8qi ((int8x8_t) __b,
- (int8x8_t) __a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vcle_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_cmhsv4hi ((int16x4_t) __b,
- (int16x4_t) __a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vcle_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_cmhsv2si ((int32x2_t) __b,
- (int32x2_t) __a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcle_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmhsdi ((int64x1_t) __b,
- (int64x1_t) __a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vcleq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_cmgev16qi (__b, __a);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vcleq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_cmgev8hi (__b, __a);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcleq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_cmgev4si (__b, __a);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcleq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_cmgev2di (__b, __a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vcleq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_cmhsv16qi ((int8x16_t) __b,
- (int8x16_t) __a);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vcleq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_cmhsv8hi ((int16x8_t) __b,
- (int16x8_t) __a);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcleq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_cmhsv4si ((int32x4_t) __b,
- (int32x4_t) __a);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcleq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_cmhsv2di ((int64x2_t) __b,
- (int64x2_t) __a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcled_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmgedi (__b, __a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vclezd_s64 (int64x1_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_cmledi (__a, 0);
-}
-
-/* vclt */
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vclt_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_cmgtv8qi (__b, __a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vclt_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_cmgtv4hi (__b, __a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vclt_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_cmgtv2si (__b, __a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vclt_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmgtdi (__b, __a);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vclt_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_cmhiv8qi ((int8x8_t) __b,
- (int8x8_t) __a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vclt_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_cmhiv4hi ((int16x4_t) __b,
- (int16x4_t) __a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vclt_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_cmhiv2si ((int32x2_t) __b,
- (int32x2_t) __a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vclt_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmhidi ((int64x1_t) __b,
- (int64x1_t) __a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vcltq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_cmgtv16qi (__b, __a);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vcltq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_cmgtv8hi (__b, __a);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcltq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_cmgtv4si (__b, __a);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcltq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_cmgtv2di (__b, __a);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vcltq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_cmhiv16qi ((int8x16_t) __b,
- (int8x16_t) __a);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vcltq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_cmhiv8hi ((int16x8_t) __b,
- (int16x8_t) __a);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vcltq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_cmhiv4si ((int32x4_t) __b,
- (int32x4_t) __a);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vcltq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_cmhiv2di ((int64x2_t) __b,
- (int64x2_t) __a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcltd_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmgtdi (__b, __a);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vcltzd_s64 (int64x1_t __a)
-{
- return (uint64x1_t) __builtin_aarch64_cmltdi (__a, 0);
-}
-
-/* vdup */
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vdupb_lane_s8 (int8x16_t a, int const b)
-{
- return __builtin_aarch64_dup_laneqi (a, b);
-}
-
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vdupb_lane_u8 (uint8x16_t a, int const b)
-{
- return (uint8x1_t) __builtin_aarch64_dup_laneqi ((int8x16_t) a, b);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vduph_lane_s16 (int16x8_t a, int const b)
-{
- return __builtin_aarch64_dup_lanehi (a, b);
-}
-
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vduph_lane_u16 (uint16x8_t a, int const b)
-{
- return (uint16x1_t) __builtin_aarch64_dup_lanehi ((int16x8_t) a, b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vdups_lane_s32 (int32x4_t a, int const b)
-{
- return __builtin_aarch64_dup_lanesi (a, b);
-}
-
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vdups_lane_u32 (uint32x4_t a, int const b)
-{
- return (uint32x1_t) __builtin_aarch64_dup_lanesi ((int32x4_t) a, b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vdupd_lane_s64 (int64x2_t a, int const b)
-{
- return __builtin_aarch64_dup_lanedi (a, b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vdupd_lane_u64 (uint64x2_t a, int const b)
-{
- return (uint64x1_t) __builtin_aarch64_dup_lanedi ((int64x2_t) a, b);
-}
-
-/* vldn */
-
-__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
-vld2_s64 (const int64_t * __a)
-{
- int64x1x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a);
- ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
- ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
- return ret;
-}
-
-__extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__))
-vld2_u64 (const uint64_t * __a)
-{
- uint64x1x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a);
- ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
- ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
- return ret;
-}
-
-__extension__ static __inline float64x1x2_t __attribute__ ((__always_inline__))
-vld2_f64 (const float64_t * __a)
-{
- float64x1x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2df ((const __builtin_aarch64_simd_df *) __a);
- ret.val[0] = (float64x1_t) __builtin_aarch64_get_dregoidf (__o, 0);
- ret.val[1] = (float64x1_t) __builtin_aarch64_get_dregoidf (__o, 1);
- return ret;
-}
-
-__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
-vld2_s8 (const int8_t * __a)
-{
- int8x8x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
- ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
- return ret;
-}
-
-__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
-vld2_p8 (const poly8_t * __a)
-{
- poly8x8x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
- ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
- return ret;
-}
-
-__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
-vld2_s16 (const int16_t * __a)
-{
- int16x4x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
- ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
- return ret;
-}
-
-__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
-vld2_p16 (const poly16_t * __a)
-{
- poly16x4x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
- ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
- return ret;
-}
-
-__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
-vld2_s32 (const int32_t * __a)
-{
- int32x2x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v2si ((const __builtin_aarch64_simd_si *) __a);
- ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0);
- ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1);
- return ret;
-}
-
-__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
-vld2_u8 (const uint8_t * __a)
-{
- uint8x8x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
- ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
- return ret;
-}
-
-__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
-vld2_u16 (const uint16_t * __a)
-{
- uint16x4x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
- ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
- return ret;
-}
-
-__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
-vld2_u32 (const uint32_t * __a)
-{
- uint32x2x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v2si ((const __builtin_aarch64_simd_si *) __a);
- ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0);
- ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1);
- return ret;
-}
-
-__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
-vld2_f32 (const float32_t * __a)
-{
- float32x2x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v2sf ((const __builtin_aarch64_simd_sf *) __a);
- ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 0);
- ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 1);
- return ret;
-}
-
-__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
-vld2q_s8 (const int8_t * __a)
-{
- int8x16x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
- ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
- return ret;
-}
-
-__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
-vld2q_p8 (const poly8_t * __a)
-{
- poly8x16x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
- ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
- return ret;
-}
-
-__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
-vld2q_s16 (const int16_t * __a)
-{
- int16x8x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
- ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
- return ret;
-}
-
-__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
-vld2q_p16 (const poly16_t * __a)
-{
- poly16x8x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
- ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
- return ret;
-}
-
-__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
-vld2q_s32 (const int32_t * __a)
-{
- int32x4x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v4si ((const __builtin_aarch64_simd_si *) __a);
- ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
- ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
- return ret;
-}
-
-__extension__ static __inline int64x2x2_t __attribute__ ((__always_inline__))
-vld2q_s64 (const int64_t * __a)
-{
- int64x2x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a);
- ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
- ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
- return ret;
-}
-
-__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
-vld2q_u8 (const uint8_t * __a)
-{
- uint8x16x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
- ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
- return ret;
-}
-
-__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
-vld2q_u16 (const uint16_t * __a)
-{
- uint16x8x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
- ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
- return ret;
-}
-
-__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
-vld2q_u32 (const uint32_t * __a)
-{
- uint32x4x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v4si ((const __builtin_aarch64_simd_si *) __a);
- ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
- ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
- return ret;
-}
-
-__extension__ static __inline uint64x2x2_t __attribute__ ((__always_inline__))
-vld2q_u64 (const uint64_t * __a)
-{
- uint64x2x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a);
- ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
- ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
- return ret;
-}
-
-__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
-vld2q_f32 (const float32_t * __a)
-{
- float32x4x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v4sf ((const __builtin_aarch64_simd_sf *) __a);
- ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 0);
- ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 1);
- return ret;
-}
-
-__extension__ static __inline float64x2x2_t __attribute__ ((__always_inline__))
-vld2q_f64 (const float64_t * __a)
-{
- float64x2x2_t ret;
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_ld2v2df ((const __builtin_aarch64_simd_df *) __a);
- ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 0);
- ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 1);
- return ret;
-}
-
-__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
-vld3_s64 (const int64_t * __a)
-{
- int64x1x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a);
- ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
- ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
- ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
- return ret;
-}
-
-__extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__))
-vld3_u64 (const uint64_t * __a)
-{
- uint64x1x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a);
- ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
- ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
- ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
- return ret;
-}
-
-__extension__ static __inline float64x1x3_t __attribute__ ((__always_inline__))
-vld3_f64 (const float64_t * __a)
-{
- float64x1x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3df ((const __builtin_aarch64_simd_df *) __a);
- ret.val[0] = (float64x1_t) __builtin_aarch64_get_dregcidf (__o, 0);
- ret.val[1] = (float64x1_t) __builtin_aarch64_get_dregcidf (__o, 1);
- ret.val[2] = (float64x1_t) __builtin_aarch64_get_dregcidf (__o, 2);
- return ret;
-}
-
-__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
-vld3_s8 (const int8_t * __a)
-{
- int8x8x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
- ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
- ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
- return ret;
-}
-
-__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
-vld3_p8 (const poly8_t * __a)
-{
- poly8x8x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
- ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
- ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
- return ret;
-}
-
-__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
-vld3_s16 (const int16_t * __a)
-{
- int16x4x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
- ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
- ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
- return ret;
-}
-
-__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
-vld3_p16 (const poly16_t * __a)
-{
- poly16x4x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
- ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
- ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
- return ret;
-}
-
-__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
-vld3_s32 (const int32_t * __a)
-{
- int32x2x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v2si ((const __builtin_aarch64_simd_si *) __a);
- ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0);
- ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1);
- ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2);
- return ret;
-}
-
-__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
-vld3_u8 (const uint8_t * __a)
-{
- uint8x8x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
- ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
- ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
- return ret;
-}
-
-__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
-vld3_u16 (const uint16_t * __a)
-{
- uint16x4x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
- ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
- ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
- return ret;
-}
-
-__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
-vld3_u32 (const uint32_t * __a)
-{
- uint32x2x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v2si ((const __builtin_aarch64_simd_si *) __a);
- ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0);
- ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1);
- ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2);
- return ret;
-}
-
-__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
-vld3_f32 (const float32_t * __a)
-{
- float32x2x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v2sf ((const __builtin_aarch64_simd_sf *) __a);
- ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 0);
- ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 1);
- ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 2);
- return ret;
-}
-
-__extension__ static __inline int8x16x3_t __attribute__ ((__always_inline__))
-vld3q_s8 (const int8_t * __a)
-{
- int8x16x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
- ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
- ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
- return ret;
-}
-
-__extension__ static __inline poly8x16x3_t __attribute__ ((__always_inline__))
-vld3q_p8 (const poly8_t * __a)
-{
- poly8x16x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
- ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
- ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
- return ret;
-}
-
-__extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__))
-vld3q_s16 (const int16_t * __a)
-{
- int16x8x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
- ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
- ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
- return ret;
-}
-
-__extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__))
-vld3q_p16 (const poly16_t * __a)
-{
- poly16x8x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
- ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
- ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
- return ret;
-}
-
-__extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__))
-vld3q_s32 (const int32_t * __a)
-{
- int32x4x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v4si ((const __builtin_aarch64_simd_si *) __a);
- ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0);
- ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1);
- ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2);
- return ret;
-}
-
-__extension__ static __inline int64x2x3_t __attribute__ ((__always_inline__))
-vld3q_s64 (const int64_t * __a)
-{
- int64x2x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a);
- ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
- ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
- ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
- return ret;
-}
-
-__extension__ static __inline uint8x16x3_t __attribute__ ((__always_inline__))
-vld3q_u8 (const uint8_t * __a)
-{
- uint8x16x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
- ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
- ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
- return ret;
-}
-
-__extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__))
-vld3q_u16 (const uint16_t * __a)
-{
- uint16x8x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
- ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
- ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
- return ret;
-}
-
-__extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__))
-vld3q_u32 (const uint32_t * __a)
-{
- uint32x4x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v4si ((const __builtin_aarch64_simd_si *) __a);
- ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0);
- ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1);
- ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2);
- return ret;
-}
-
-__extension__ static __inline uint64x2x3_t __attribute__ ((__always_inline__))
-vld3q_u64 (const uint64_t * __a)
-{
- uint64x2x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a);
- ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
- ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
- ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
- return ret;
-}
-
-__extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__))
-vld3q_f32 (const float32_t * __a)
-{
- float32x4x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v4sf ((const __builtin_aarch64_simd_sf *) __a);
- ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 0);
- ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 1);
- ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 2);
- return ret;
-}
-
-__extension__ static __inline float64x2x3_t __attribute__ ((__always_inline__))
-vld3q_f64 (const float64_t * __a)
-{
- float64x2x3_t ret;
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_ld3v2df ((const __builtin_aarch64_simd_df *) __a);
- ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 0);
- ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 1);
- ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 2);
- return ret;
-}
-
-__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
-vld4_s64 (const int64_t * __a)
-{
- int64x1x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a);
- ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 0);
- ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 1);
- ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 2);
- ret.val[3] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 3);
- return ret;
-}
-
-__extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__))
-vld4_u64 (const uint64_t * __a)
-{
- uint64x1x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a);
- ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 0);
- ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 1);
- ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 2);
- ret.val[3] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 3);
- return ret;
-}
-
-__extension__ static __inline float64x1x4_t __attribute__ ((__always_inline__))
-vld4_f64 (const float64_t * __a)
-{
- float64x1x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4df ((const __builtin_aarch64_simd_df *) __a);
- ret.val[0] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 0);
- ret.val[1] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 1);
- ret.val[2] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 2);
- ret.val[3] = (float64x1_t) __builtin_aarch64_get_dregxidf (__o, 3);
- return ret;
-}
-
-__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
-vld4_s8 (const int8_t * __a)
-{
- int8x8x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
- ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
- ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
- ret.val[3] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
- return ret;
-}
-
-__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
-vld4_p8 (const poly8_t * __a)
-{
- poly8x8x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
- ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
- ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
- ret.val[3] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
- return ret;
-}
-
-__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
-vld4_s16 (const int16_t * __a)
-{
- int16x4x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
- ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
- ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
- ret.val[3] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
- return ret;
-}
-
-__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
-vld4_p16 (const poly16_t * __a)
-{
- poly16x4x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
- ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
- ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
- ret.val[3] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
- return ret;
-}
-
-__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
-vld4_s32 (const int32_t * __a)
-{
- int32x2x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v2si ((const __builtin_aarch64_simd_si *) __a);
- ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0);
- ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1);
- ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2);
- ret.val[3] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3);
- return ret;
-}
-
-__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
-vld4_u8 (const uint8_t * __a)
-{
- uint8x8x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
- ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
- ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
- ret.val[3] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
- return ret;
-}
-
-__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
-vld4_u16 (const uint16_t * __a)
-{
- uint16x4x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
- ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
- ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
- ret.val[3] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
- return ret;
-}
-
-__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
-vld4_u32 (const uint32_t * __a)
-{
- uint32x2x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v2si ((const __builtin_aarch64_simd_si *) __a);
- ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0);
- ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1);
- ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2);
- ret.val[3] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3);
- return ret;
-}
-
-__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
-vld4_f32 (const float32_t * __a)
-{
- float32x2x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v2sf ((const __builtin_aarch64_simd_sf *) __a);
- ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 0);
- ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 1);
- ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 2);
- ret.val[3] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 3);
- return ret;
-}
-
-__extension__ static __inline int8x16x4_t __attribute__ ((__always_inline__))
-vld4q_s8 (const int8_t * __a)
-{
- int8x16x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
- ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
- ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
- ret.val[3] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
- return ret;
-}
-
-__extension__ static __inline poly8x16x4_t __attribute__ ((__always_inline__))
-vld4q_p8 (const poly8_t * __a)
-{
- poly8x16x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
- ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
- ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
- ret.val[3] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
- return ret;
-}
-
-__extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__))
-vld4q_s16 (const int16_t * __a)
-{
- int16x8x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
- ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
- ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
- ret.val[3] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
- return ret;
-}
-
-__extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__))
-vld4q_p16 (const poly16_t * __a)
-{
- poly16x8x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
- ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
- ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
- ret.val[3] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
- return ret;
-}
-
-__extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__))
-vld4q_s32 (const int32_t * __a)
-{
- int32x4x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v4si ((const __builtin_aarch64_simd_si *) __a);
- ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0);
- ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1);
- ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2);
- ret.val[3] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3);
- return ret;
-}
-
-__extension__ static __inline int64x2x4_t __attribute__ ((__always_inline__))
-vld4q_s64 (const int64_t * __a)
-{
- int64x2x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a);
- ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0);
- ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1);
- ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2);
- ret.val[3] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3);
- return ret;
-}
-
-__extension__ static __inline uint8x16x4_t __attribute__ ((__always_inline__))
-vld4q_u8 (const uint8_t * __a)
-{
- uint8x16x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
- ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
- ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
- ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
- ret.val[3] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
- return ret;
-}
-
-__extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__))
-vld4q_u16 (const uint16_t * __a)
-{
- uint16x8x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
- ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
- ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
- ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
- ret.val[3] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
- return ret;
-}
-
-__extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__))
-vld4q_u32 (const uint32_t * __a)
-{
- uint32x4x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v4si ((const __builtin_aarch64_simd_si *) __a);
- ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0);
- ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1);
- ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2);
- ret.val[3] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3);
- return ret;
-}
-
-__extension__ static __inline uint64x2x4_t __attribute__ ((__always_inline__))
-vld4q_u64 (const uint64_t * __a)
-{
- uint64x2x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a);
- ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0);
- ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1);
- ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2);
- ret.val[3] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3);
- return ret;
-}
-
-__extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__))
-vld4q_f32 (const float32_t * __a)
-{
- float32x4x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v4sf ((const __builtin_aarch64_simd_sf *) __a);
- ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 0);
- ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 1);
- ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 2);
- ret.val[3] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 3);
- return ret;
-}
-
-__extension__ static __inline float64x2x4_t __attribute__ ((__always_inline__))
-vld4q_f64 (const float64_t * __a)
-{
- float64x2x4_t ret;
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_ld4v2df ((const __builtin_aarch64_simd_df *) __a);
- ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 0);
- ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 1);
- ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 2);
- ret.val[3] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 3);
- return ret;
-}
-
-/* vmax */
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vmax_f32 (float32x2_t __a, float32x2_t __b)
-{
- return __builtin_aarch64_fmaxv2sf (__a, __b);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vmax_s8 (int8x8_t __a, int8x8_t __b)
-{
- return __builtin_aarch64_smaxv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmax_s16 (int16x4_t __a, int16x4_t __b)
-{
- return __builtin_aarch64_smaxv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmax_s32 (int32x2_t __a, int32x2_t __b)
-{
- return __builtin_aarch64_smaxv2si (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vmax_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_umaxv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmax_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_umaxv4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmax_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_umaxv2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vmaxq_f32 (float32x4_t __a, float32x4_t __b)
-{
- return __builtin_aarch64_fmaxv4sf (__a, __b);
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vmaxq_f64 (float64x2_t __a, float64x2_t __b)
-{
- return __builtin_aarch64_fmaxv2df (__a, __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vmaxq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return __builtin_aarch64_smaxv16qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmaxq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __builtin_aarch64_smaxv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmaxq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __builtin_aarch64_smaxv4si (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_umaxv16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_umaxv8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_umaxv4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-/* vmin */
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vmin_f32 (float32x2_t __a, float32x2_t __b)
-{
- return __builtin_aarch64_fminv2sf (__a, __b);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vmin_s8 (int8x8_t __a, int8x8_t __b)
-{
- return __builtin_aarch64_sminv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmin_s16 (int16x4_t __a, int16x4_t __b)
-{
- return __builtin_aarch64_sminv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmin_s32 (int32x2_t __a, int32x2_t __b)
-{
- return __builtin_aarch64_sminv2si (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vmin_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_uminv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmin_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_uminv4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmin_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_uminv2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vminq_f32 (float32x4_t __a, float32x4_t __b)
-{
- return __builtin_aarch64_fminv4sf (__a, __b);
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vminq_f64 (float64x2_t __a, float64x2_t __b)
-{
- return __builtin_aarch64_fminv2df (__a, __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vminq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return __builtin_aarch64_sminv16qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vminq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __builtin_aarch64_sminv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vminq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __builtin_aarch64_sminv4si (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vminq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_uminv16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vminq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_uminv8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vminq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_uminv4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-/* vmla */
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vmla_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
-{
- return a + b * c;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vmlaq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
-{
- return a + b * c;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vmlaq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
-{
- return a + b * c;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vmls_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
-{
- return a - b * c;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vmlsq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
-{
- return a - b * c;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vmlsq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
-{
- return a - b * c;
-}
-
-/* vqabs */
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqabsq_s64 (int64x2_t __a)
-{
- return (int64x2_t) __builtin_aarch64_sqabsv2di (__a);
-}
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqabsb_s8 (int8x1_t __a)
-{
- return (int8x1_t) __builtin_aarch64_sqabsqi (__a);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqabsh_s16 (int16x1_t __a)
-{
- return (int16x1_t) __builtin_aarch64_sqabshi (__a);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqabss_s32 (int32x1_t __a)
-{
- return (int32x1_t) __builtin_aarch64_sqabssi (__a);
-}
-
-/* vqadd */
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqaddb_s8 (int8x1_t __a, int8x1_t __b)
-{
- return (int8x1_t) __builtin_aarch64_sqaddqi (__a, __b);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqaddh_s16 (int16x1_t __a, int16x1_t __b)
-{
- return (int16x1_t) __builtin_aarch64_sqaddhi (__a, __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqadds_s32 (int32x1_t __a, int32x1_t __b)
-{
- return (int32x1_t) __builtin_aarch64_sqaddsi (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqaddd_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (int64x1_t) __builtin_aarch64_sqadddi (__a, __b);
-}
-
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqaddb_u8 (uint8x1_t __a, uint8x1_t __b)
-{
- return (uint8x1_t) __builtin_aarch64_uqaddqi (__a, __b);
-}
-
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqaddh_u16 (uint16x1_t __a, uint16x1_t __b)
-{
- return (uint16x1_t) __builtin_aarch64_uqaddhi (__a, __b);
-}
-
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vqadds_u32 (uint32x1_t __a, uint32x1_t __b)
-{
- return (uint32x1_t) __builtin_aarch64_uqaddsi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vqaddd_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_uqadddi (__a, __b);
-}
-
-/* vqdmlal */
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
-{
- return __builtin_aarch64_sqdmlalv4hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
-{
- return __builtin_aarch64_sqdmlal2v8hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlal_high_lane_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
- int const __d)
-{
- return __builtin_aarch64_sqdmlal2_lanev8hi (__a, __b, __c, __d);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlal_high_laneq_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
- int const __d)
-{
- return __builtin_aarch64_sqdmlal2_laneqv8hi (__a, __b, __c, __d);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlal_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
-{
- return __builtin_aarch64_sqdmlal2_nv8hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d)
-{
- int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (INT64_C (0)));
- return __builtin_aarch64_sqdmlal_lanev4hi (__a, __b, __tmp, __d);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlal_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
-{
- return __builtin_aarch64_sqdmlal_laneqv4hi (__a, __b, __c, __d);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
-{
- return __builtin_aarch64_sqdmlal_nv4hi (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
-{
- return __builtin_aarch64_sqdmlalv2si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
-{
- return __builtin_aarch64_sqdmlal2v4si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlal_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
- int const __d)
-{
- return __builtin_aarch64_sqdmlal2_lanev4si (__a, __b, __c, __d);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlal_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
- int const __d)
-{
- return __builtin_aarch64_sqdmlal2_laneqv4si (__a, __b, __c, __d);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlal_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
-{
- return __builtin_aarch64_sqdmlal2_nv4si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d)
-{
- int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (INT64_C (0)));
- return __builtin_aarch64_sqdmlal_lanev2si (__a, __b, __tmp, __d);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlal_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
-{
- return __builtin_aarch64_sqdmlal_laneqv2si (__a, __b, __c, __d);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
-{
- return __builtin_aarch64_sqdmlal_nv2si (__a, __b, __c);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmlalh_s16 (int32x1_t __a, int16x1_t __b, int16x1_t __c)
-{
- return __builtin_aarch64_sqdmlalhi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmlalh_lane_s16 (int32x1_t __a, int16x1_t __b, int16x8_t __c, const int __d)
-{
- return __builtin_aarch64_sqdmlal_lanehi (__a, __b, __c, __d);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqdmlals_s32 (int64x1_t __a, int32x1_t __b, int32x1_t __c)
-{
- return __builtin_aarch64_sqdmlalsi (__a, __b, __c);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqdmlals_lane_s32 (int64x1_t __a, int32x1_t __b, int32x4_t __c, const int __d)
-{
- return __builtin_aarch64_sqdmlal_lanesi (__a, __b, __c, __d);
-}
-
-/* vqdmlsl */
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
-{
- return __builtin_aarch64_sqdmlslv4hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlsl_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
-{
- return __builtin_aarch64_sqdmlsl2v8hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlsl_high_lane_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
- int const __d)
-{
- return __builtin_aarch64_sqdmlsl2_lanev8hi (__a, __b, __c, __d);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlsl_high_laneq_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
- int const __d)
-{
- return __builtin_aarch64_sqdmlsl2_laneqv8hi (__a, __b, __c, __d);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlsl_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
-{
- return __builtin_aarch64_sqdmlsl2_nv8hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d)
-{
- int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (INT64_C (0)));
- return __builtin_aarch64_sqdmlsl_lanev4hi (__a, __b, __tmp, __d);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlsl_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
-{
- return __builtin_aarch64_sqdmlsl_laneqv4hi (__a, __b, __c, __d);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
-{
- return __builtin_aarch64_sqdmlsl_nv4hi (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
-{
- return __builtin_aarch64_sqdmlslv2si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlsl_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
-{
- return __builtin_aarch64_sqdmlsl2v4si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlsl_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
- int const __d)
-{
- return __builtin_aarch64_sqdmlsl2_lanev4si (__a, __b, __c, __d);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlsl_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
- int const __d)
-{
- return __builtin_aarch64_sqdmlsl2_laneqv4si (__a, __b, __c, __d);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlsl_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
-{
- return __builtin_aarch64_sqdmlsl2_nv4si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d)
-{
- int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (INT64_C (0)));
- return __builtin_aarch64_sqdmlsl_lanev2si (__a, __b, __tmp, __d);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlsl_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
-{
- return __builtin_aarch64_sqdmlsl_laneqv2si (__a, __b, __c, __d);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
-{
- return __builtin_aarch64_sqdmlsl_nv2si (__a, __b, __c);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmlslh_s16 (int32x1_t __a, int16x1_t __b, int16x1_t __c)
-{
- return __builtin_aarch64_sqdmlslhi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmlslh_lane_s16 (int32x1_t __a, int16x1_t __b, int16x8_t __c, const int __d)
-{
- return __builtin_aarch64_sqdmlsl_lanehi (__a, __b, __c, __d);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqdmlsls_s32 (int64x1_t __a, int32x1_t __b, int32x1_t __c)
-{
- return __builtin_aarch64_sqdmlslsi (__a, __b, __c);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqdmlsls_lane_s32 (int64x1_t __a, int32x1_t __b, int32x4_t __c, const int __d)
-{
- return __builtin_aarch64_sqdmlsl_lanesi (__a, __b, __c, __d);
-}
-
-/* vqdmulh */
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
-{
- return __builtin_aarch64_sqdmulh_lanev4hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
-{
- return __builtin_aarch64_sqdmulh_lanev2si (__a, __b, __c);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
-{
- return __builtin_aarch64_sqdmulh_lanev8hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
-{
- return __builtin_aarch64_sqdmulh_lanev4si (__a, __b, __c);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqdmulhh_s16 (int16x1_t __a, int16x1_t __b)
-{
- return (int16x1_t) __builtin_aarch64_sqdmulhhi (__a, __b);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqdmulhh_lane_s16 (int16x1_t __a, int16x8_t __b, const int __c)
-{
- return __builtin_aarch64_sqdmulh_lanehi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmulhs_s32 (int32x1_t __a, int32x1_t __b)
-{
- return (int32x1_t) __builtin_aarch64_sqdmulhsi (__a, __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmulhs_lane_s32 (int32x1_t __a, int32x4_t __b, const int __c)
-{
- return __builtin_aarch64_sqdmulh_lanesi (__a, __b, __c);
-}
-
-/* vqdmull */
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmull_s16 (int16x4_t __a, int16x4_t __b)
-{
- return __builtin_aarch64_sqdmullv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmull_high_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __builtin_aarch64_sqdmull2v8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmull_high_lane_s16 (int16x8_t __a, int16x8_t __b, int const __c)
-{
- return __builtin_aarch64_sqdmull2_lanev8hi (__a, __b,__c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmull_high_laneq_s16 (int16x8_t __a, int16x8_t __b, int const __c)
-{
- return __builtin_aarch64_sqdmull2_laneqv8hi (__a, __b,__c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmull_high_n_s16 (int16x8_t __a, int16_t __b)
-{
- return __builtin_aarch64_sqdmull2_nv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, int const __c)
-{
- int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
- return __builtin_aarch64_sqdmull_lanev4hi (__a, __tmp, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmull_laneq_s16 (int16x4_t __a, int16x8_t __b, int const __c)
-{
- return __builtin_aarch64_sqdmull_laneqv4hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqdmull_n_s16 (int16x4_t __a, int16_t __b)
-{
- return __builtin_aarch64_sqdmull_nv4hi (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmull_s32 (int32x2_t __a, int32x2_t __b)
-{
- return __builtin_aarch64_sqdmullv2si (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmull_high_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __builtin_aarch64_sqdmull2v4si (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmull_high_lane_s32 (int32x4_t __a, int32x4_t __b, int const __c)
-{
- return __builtin_aarch64_sqdmull2_lanev4si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmull_high_laneq_s32 (int32x4_t __a, int32x4_t __b, int const __c)
-{
- return __builtin_aarch64_sqdmull2_laneqv4si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmull_high_n_s32 (int32x4_t __a, int32_t __b)
-{
- return __builtin_aarch64_sqdmull2_nv4si (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, int const __c)
-{
- int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
- return __builtin_aarch64_sqdmull_lanev2si (__a, __tmp, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmull_laneq_s32 (int32x2_t __a, int32x4_t __b, int const __c)
-{
- return __builtin_aarch64_sqdmull_laneqv2si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqdmull_n_s32 (int32x2_t __a, int32_t __b)
-{
- return __builtin_aarch64_sqdmull_nv2si (__a, __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmullh_s16 (int16x1_t __a, int16x1_t __b)
-{
- return (int32x1_t) __builtin_aarch64_sqdmullhi (__a, __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqdmullh_lane_s16 (int16x1_t __a, int16x8_t __b, const int __c)
-{
- return __builtin_aarch64_sqdmull_lanehi (__a, __b, __c);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqdmulls_s32 (int32x1_t __a, int32x1_t __b)
-{
- return (int64x1_t) __builtin_aarch64_sqdmullsi (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqdmulls_lane_s32 (int32x1_t __a, int32x4_t __b, const int __c)
-{
- return __builtin_aarch64_sqdmull_lanesi (__a, __b, __c);
-}
-
-/* vqmovn */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqmovn_s16 (int16x8_t __a)
-{
- return (int8x8_t) __builtin_aarch64_sqmovnv8hi (__a);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqmovn_s32 (int32x4_t __a)
-{
- return (int16x4_t) __builtin_aarch64_sqmovnv4si (__a);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqmovn_s64 (int64x2_t __a)
-{
- return (int32x2_t) __builtin_aarch64_sqmovnv2di (__a);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqmovn_u16 (uint16x8_t __a)
-{
- return (uint8x8_t) __builtin_aarch64_uqmovnv8hi ((int16x8_t) __a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vqmovn_u32 (uint32x4_t __a)
-{
- return (uint16x4_t) __builtin_aarch64_uqmovnv4si ((int32x4_t) __a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vqmovn_u64 (uint64x2_t __a)
-{
- return (uint32x2_t) __builtin_aarch64_uqmovnv2di ((int64x2_t) __a);
-}
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqmovnh_s16 (int16x1_t __a)
-{
- return (int8x1_t) __builtin_aarch64_sqmovnhi (__a);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqmovns_s32 (int32x1_t __a)
-{
- return (int16x1_t) __builtin_aarch64_sqmovnsi (__a);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqmovnd_s64 (int64x1_t __a)
-{
- return (int32x1_t) __builtin_aarch64_sqmovndi (__a);
-}
-
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqmovnh_u16 (uint16x1_t __a)
-{
- return (uint8x1_t) __builtin_aarch64_uqmovnhi (__a);
-}
-
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqmovns_u32 (uint32x1_t __a)
-{
- return (uint16x1_t) __builtin_aarch64_uqmovnsi (__a);
-}
-
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vqmovnd_u64 (uint64x1_t __a)
-{
- return (uint32x1_t) __builtin_aarch64_uqmovndi (__a);
-}
-
-/* vqmovun */
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqmovun_s16 (int16x8_t __a)
-{
- return (uint8x8_t) __builtin_aarch64_sqmovunv8hi (__a);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vqmovun_s32 (int32x4_t __a)
-{
- return (uint16x4_t) __builtin_aarch64_sqmovunv4si (__a);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vqmovun_s64 (int64x2_t __a)
-{
- return (uint32x2_t) __builtin_aarch64_sqmovunv2di (__a);
-}
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqmovunh_s16 (int16x1_t __a)
-{
- return (int8x1_t) __builtin_aarch64_sqmovunhi (__a);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqmovuns_s32 (int32x1_t __a)
-{
- return (int16x1_t) __builtin_aarch64_sqmovunsi (__a);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqmovund_s64 (int64x1_t __a)
-{
- return (int32x1_t) __builtin_aarch64_sqmovundi (__a);
-}
-
-/* vqneg */
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqnegq_s64 (int64x2_t __a)
-{
- return (int64x2_t) __builtin_aarch64_sqnegv2di (__a);
-}
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqnegb_s8 (int8x1_t __a)
-{
- return (int8x1_t) __builtin_aarch64_sqnegqi (__a);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqnegh_s16 (int16x1_t __a)
-{
- return (int16x1_t) __builtin_aarch64_sqneghi (__a);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqnegs_s32 (int32x1_t __a)
-{
- return (int32x1_t) __builtin_aarch64_sqnegsi (__a);
-}
-
-/* vqrdmulh */
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqrdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
-{
- return __builtin_aarch64_sqrdmulh_lanev4hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqrdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
-{
- return __builtin_aarch64_sqrdmulh_lanev2si (__a, __b, __c);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqrdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
-{
- return __builtin_aarch64_sqrdmulh_lanev8hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
-{
- return __builtin_aarch64_sqrdmulh_lanev4si (__a, __b, __c);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqrdmulhh_s16 (int16x1_t __a, int16x1_t __b)
-{
- return (int16x1_t) __builtin_aarch64_sqrdmulhhi (__a, __b);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqrdmulhh_lane_s16 (int16x1_t __a, int16x8_t __b, const int __c)
-{
- return __builtin_aarch64_sqrdmulh_lanehi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqrdmulhs_s32 (int32x1_t __a, int32x1_t __b)
-{
- return (int32x1_t) __builtin_aarch64_sqrdmulhsi (__a, __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqrdmulhs_lane_s32 (int32x1_t __a, int32x4_t __b, const int __c)
-{
- return __builtin_aarch64_sqrdmulh_lanesi (__a, __b, __c);
-}
-
-/* vqrshl */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqrshl_s8 (int8x8_t __a, int8x8_t __b)
-{
- return __builtin_aarch64_sqrshlv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqrshl_s16 (int16x4_t __a, int16x4_t __b)
-{
- return __builtin_aarch64_sqrshlv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqrshl_s32 (int32x2_t __a, int32x2_t __b)
-{
- return __builtin_aarch64_sqrshlv2si (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqrshl_s64 (int64x1_t __a, int64x1_t __b)
-{
- return __builtin_aarch64_sqrshldi (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqrshl_u8 (uint8x8_t __a, int8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_uqrshlv8qi ((int8x8_t) __a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vqrshl_u16 (uint16x4_t __a, int16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_uqrshlv4hi ((int16x4_t) __a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vqrshl_u32 (uint32x2_t __a, int32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_uqrshlv2si ((int32x2_t) __a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vqrshl_u64 (uint64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_uqrshldi ((int64x1_t) __a, __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return __builtin_aarch64_sqrshlv16qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __builtin_aarch64_sqrshlv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __builtin_aarch64_sqrshlv4si (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqrshlq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return __builtin_aarch64_sqrshlv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_uqrshlv16qi ((int8x16_t) __a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_uqrshlv8hi ((int16x8_t) __a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_uqrshlv4si ((int32x4_t) __a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vqrshlq_u64 (uint64x2_t __a, int64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_uqrshlv2di ((int64x2_t) __a, __b);
-}
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqrshlb_s8 (int8x1_t __a, int8x1_t __b)
-{
- return __builtin_aarch64_sqrshlqi (__a, __b);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqrshlh_s16 (int16x1_t __a, int16x1_t __b)
-{
- return __builtin_aarch64_sqrshlhi (__a, __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqrshls_s32 (int32x1_t __a, int32x1_t __b)
-{
- return __builtin_aarch64_sqrshlsi (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqrshld_s64 (int64x1_t __a, int64x1_t __b)
-{
- return __builtin_aarch64_sqrshldi (__a, __b);
-}
-
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqrshlb_u8 (uint8x1_t __a, uint8x1_t __b)
-{
- return (uint8x1_t) __builtin_aarch64_uqrshlqi (__a, __b);
-}
-
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqrshlh_u16 (uint16x1_t __a, uint16x1_t __b)
-{
- return (uint16x1_t) __builtin_aarch64_uqrshlhi (__a, __b);
-}
-
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vqrshls_u32 (uint32x1_t __a, uint32x1_t __b)
-{
- return (uint32x1_t) __builtin_aarch64_uqrshlsi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vqrshld_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_uqrshldi (__a, __b);
-}
-
-/* vqrshrn */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqrshrn_n_s16 (int16x8_t __a, const int __b)
-{
- return (int8x8_t) __builtin_aarch64_sqrshrn_nv8hi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqrshrn_n_s32 (int32x4_t __a, const int __b)
-{
- return (int16x4_t) __builtin_aarch64_sqrshrn_nv4si (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqrshrn_n_s64 (int64x2_t __a, const int __b)
-{
- return (int32x2_t) __builtin_aarch64_sqrshrn_nv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqrshrn_n_u16 (uint16x8_t __a, const int __b)
-{
- return (uint8x8_t) __builtin_aarch64_uqrshrn_nv8hi ((int16x8_t) __a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vqrshrn_n_u32 (uint32x4_t __a, const int __b)
-{
- return (uint16x4_t) __builtin_aarch64_uqrshrn_nv4si ((int32x4_t) __a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vqrshrn_n_u64 (uint64x2_t __a, const int __b)
-{
- return (uint32x2_t) __builtin_aarch64_uqrshrn_nv2di ((int64x2_t) __a, __b);
-}
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqrshrnh_n_s16 (int16x1_t __a, const int __b)
-{
- return (int8x1_t) __builtin_aarch64_sqrshrn_nhi (__a, __b);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqrshrns_n_s32 (int32x1_t __a, const int __b)
-{
- return (int16x1_t) __builtin_aarch64_sqrshrn_nsi (__a, __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqrshrnd_n_s64 (int64x1_t __a, const int __b)
-{
- return (int32x1_t) __builtin_aarch64_sqrshrn_ndi (__a, __b);
-}
-
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqrshrnh_n_u16 (uint16x1_t __a, const int __b)
-{
- return (uint8x1_t) __builtin_aarch64_uqrshrn_nhi (__a, __b);
-}
-
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqrshrns_n_u32 (uint32x1_t __a, const int __b)
-{
- return (uint16x1_t) __builtin_aarch64_uqrshrn_nsi (__a, __b);
-}
-
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vqrshrnd_n_u64 (uint64x1_t __a, const int __b)
-{
- return (uint32x1_t) __builtin_aarch64_uqrshrn_ndi (__a, __b);
-}
-
-/* vqrshrun */
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqrshrun_n_s16 (int16x8_t __a, const int __b)
-{
- return (uint8x8_t) __builtin_aarch64_sqrshrun_nv8hi (__a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vqrshrun_n_s32 (int32x4_t __a, const int __b)
-{
- return (uint16x4_t) __builtin_aarch64_sqrshrun_nv4si (__a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vqrshrun_n_s64 (int64x2_t __a, const int __b)
-{
- return (uint32x2_t) __builtin_aarch64_sqrshrun_nv2di (__a, __b);
-}
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqrshrunh_n_s16 (int16x1_t __a, const int __b)
-{
- return (int8x1_t) __builtin_aarch64_sqrshrun_nhi (__a, __b);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqrshruns_n_s32 (int32x1_t __a, const int __b)
-{
- return (int16x1_t) __builtin_aarch64_sqrshrun_nsi (__a, __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqrshrund_n_s64 (int64x1_t __a, const int __b)
-{
- return (int32x1_t) __builtin_aarch64_sqrshrun_ndi (__a, __b);
-}
-
-/* vqshl */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqshl_s8 (int8x8_t __a, int8x8_t __b)
-{
- return __builtin_aarch64_sqshlv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqshl_s16 (int16x4_t __a, int16x4_t __b)
-{
- return __builtin_aarch64_sqshlv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqshl_s32 (int32x2_t __a, int32x2_t __b)
-{
- return __builtin_aarch64_sqshlv2si (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqshl_s64 (int64x1_t __a, int64x1_t __b)
-{
- return __builtin_aarch64_sqshldi (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqshl_u8 (uint8x8_t __a, int8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_uqshlv8qi ((int8x8_t) __a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vqshl_u16 (uint16x4_t __a, int16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_uqshlv4hi ((int16x4_t) __a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vqshl_u32 (uint32x2_t __a, int32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_uqshlv2si ((int32x2_t) __a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vqshl_u64 (uint64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_uqshldi ((int64x1_t) __a, __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqshlq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return __builtin_aarch64_sqshlv16qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqshlq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return __builtin_aarch64_sqshlv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqshlq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return __builtin_aarch64_sqshlv4si (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqshlq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return __builtin_aarch64_sqshlv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_uqshlv16qi ((int8x16_t) __a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_uqshlv8hi ((int16x8_t) __a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_uqshlv4si ((int32x4_t) __a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vqshlq_u64 (uint64x2_t __a, int64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_uqshlv2di ((int64x2_t) __a, __b);
-}
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqshlb_s8 (int8x1_t __a, int8x1_t __b)
-{
- return __builtin_aarch64_sqshlqi (__a, __b);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqshlh_s16 (int16x1_t __a, int16x1_t __b)
-{
- return __builtin_aarch64_sqshlhi (__a, __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqshls_s32 (int32x1_t __a, int32x1_t __b)
-{
- return __builtin_aarch64_sqshlsi (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqshld_s64 (int64x1_t __a, int64x1_t __b)
-{
- return __builtin_aarch64_sqshldi (__a, __b);
-}
-
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqshlb_u8 (uint8x1_t __a, uint8x1_t __b)
-{
- return (uint8x1_t) __builtin_aarch64_uqshlqi (__a, __b);
-}
-
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqshlh_u16 (uint16x1_t __a, uint16x1_t __b)
-{
- return (uint16x1_t) __builtin_aarch64_uqshlhi (__a, __b);
-}
-
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vqshls_u32 (uint32x1_t __a, uint32x1_t __b)
-{
- return (uint32x1_t) __builtin_aarch64_uqshlsi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vqshld_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_uqshldi (__a, __b);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqshl_n_s8 (int8x8_t __a, const int __b)
-{
- return (int8x8_t) __builtin_aarch64_sqshl_nv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqshl_n_s16 (int16x4_t __a, const int __b)
-{
- return (int16x4_t) __builtin_aarch64_sqshl_nv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqshl_n_s32 (int32x2_t __a, const int __b)
-{
- return (int32x2_t) __builtin_aarch64_sqshl_nv2si (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqshl_n_s64 (int64x1_t __a, const int __b)
-{
- return (int64x1_t) __builtin_aarch64_sqshl_ndi (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqshl_n_u8 (uint8x8_t __a, const int __b)
-{
- return (uint8x8_t) __builtin_aarch64_uqshl_nv8qi ((int8x8_t) __a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vqshl_n_u16 (uint16x4_t __a, const int __b)
-{
- return (uint16x4_t) __builtin_aarch64_uqshl_nv4hi ((int16x4_t) __a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vqshl_n_u32 (uint32x2_t __a, const int __b)
-{
- return (uint32x2_t) __builtin_aarch64_uqshl_nv2si ((int32x2_t) __a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vqshl_n_u64 (uint64x1_t __a, const int __b)
-{
- return (uint64x1_t) __builtin_aarch64_uqshl_ndi ((int64x1_t) __a, __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vqshlq_n_s8 (int8x16_t __a, const int __b)
-{
- return (int8x16_t) __builtin_aarch64_sqshl_nv16qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vqshlq_n_s16 (int16x8_t __a, const int __b)
-{
- return (int16x8_t) __builtin_aarch64_sqshl_nv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vqshlq_n_s32 (int32x4_t __a, const int __b)
-{
- return (int32x4_t) __builtin_aarch64_sqshl_nv4si (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vqshlq_n_s64 (int64x2_t __a, const int __b)
-{
- return (int64x2_t) __builtin_aarch64_sqshl_nv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqshlq_n_u8 (uint8x16_t __a, const int __b)
-{
- return (uint8x16_t) __builtin_aarch64_uqshl_nv16qi ((int8x16_t) __a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vqshlq_n_u16 (uint16x8_t __a, const int __b)
-{
- return (uint16x8_t) __builtin_aarch64_uqshl_nv8hi ((int16x8_t) __a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vqshlq_n_u32 (uint32x4_t __a, const int __b)
-{
- return (uint32x4_t) __builtin_aarch64_uqshl_nv4si ((int32x4_t) __a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vqshlq_n_u64 (uint64x2_t __a, const int __b)
-{
- return (uint64x2_t) __builtin_aarch64_uqshl_nv2di ((int64x2_t) __a, __b);
-}
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqshlb_n_s8 (int8x1_t __a, const int __b)
-{
- return (int8x1_t) __builtin_aarch64_sqshl_nqi (__a, __b);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqshlh_n_s16 (int16x1_t __a, const int __b)
-{
- return (int16x1_t) __builtin_aarch64_sqshl_nhi (__a, __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqshls_n_s32 (int32x1_t __a, const int __b)
-{
- return (int32x1_t) __builtin_aarch64_sqshl_nsi (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqshld_n_s64 (int64x1_t __a, const int __b)
-{
- return (int64x1_t) __builtin_aarch64_sqshl_ndi (__a, __b);
-}
-
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqshlb_n_u8 (uint8x1_t __a, const int __b)
-{
- return (uint8x1_t) __builtin_aarch64_uqshl_nqi (__a, __b);
-}
-
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqshlh_n_u16 (uint16x1_t __a, const int __b)
-{
- return (uint16x1_t) __builtin_aarch64_uqshl_nhi (__a, __b);
-}
-
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vqshls_n_u32 (uint32x1_t __a, const int __b)
-{
- return (uint32x1_t) __builtin_aarch64_uqshl_nsi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vqshld_n_u64 (uint64x1_t __a, const int __b)
-{
- return (uint64x1_t) __builtin_aarch64_uqshl_ndi (__a, __b);
-}
-
-/* vqshlu */
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqshlu_n_s8 (int8x8_t __a, const int __b)
-{
- return (uint8x8_t) __builtin_aarch64_sqshlu_nv8qi (__a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vqshlu_n_s16 (int16x4_t __a, const int __b)
-{
- return (uint16x4_t) __builtin_aarch64_sqshlu_nv4hi (__a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vqshlu_n_s32 (int32x2_t __a, const int __b)
-{
- return (uint32x2_t) __builtin_aarch64_sqshlu_nv2si (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vqshlu_n_s64 (int64x1_t __a, const int __b)
-{
- return (uint64x1_t) __builtin_aarch64_sqshlu_ndi (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vqshluq_n_s8 (int8x16_t __a, const int __b)
-{
- return (uint8x16_t) __builtin_aarch64_sqshlu_nv16qi (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vqshluq_n_s16 (int16x8_t __a, const int __b)
-{
- return (uint16x8_t) __builtin_aarch64_sqshlu_nv8hi (__a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vqshluq_n_s32 (int32x4_t __a, const int __b)
-{
- return (uint32x4_t) __builtin_aarch64_sqshlu_nv4si (__a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vqshluq_n_s64 (int64x2_t __a, const int __b)
-{
- return (uint64x2_t) __builtin_aarch64_sqshlu_nv2di (__a, __b);
-}
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqshlub_n_s8 (int8x1_t __a, const int __b)
-{
- return (int8x1_t) __builtin_aarch64_sqshlu_nqi (__a, __b);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqshluh_n_s16 (int16x1_t __a, const int __b)
-{
- return (int16x1_t) __builtin_aarch64_sqshlu_nhi (__a, __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqshlus_n_s32 (int32x1_t __a, const int __b)
-{
- return (int32x1_t) __builtin_aarch64_sqshlu_nsi (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqshlud_n_s64 (int64x1_t __a, const int __b)
-{
- return (int64x1_t) __builtin_aarch64_sqshlu_ndi (__a, __b);
-}
-
-/* vqshrn */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vqshrn_n_s16 (int16x8_t __a, const int __b)
-{
- return (int8x8_t) __builtin_aarch64_sqshrn_nv8hi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vqshrn_n_s32 (int32x4_t __a, const int __b)
-{
- return (int16x4_t) __builtin_aarch64_sqshrn_nv4si (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vqshrn_n_s64 (int64x2_t __a, const int __b)
-{
- return (int32x2_t) __builtin_aarch64_sqshrn_nv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqshrn_n_u16 (uint16x8_t __a, const int __b)
-{
- return (uint8x8_t) __builtin_aarch64_uqshrn_nv8hi ((int16x8_t) __a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vqshrn_n_u32 (uint32x4_t __a, const int __b)
-{
- return (uint16x4_t) __builtin_aarch64_uqshrn_nv4si ((int32x4_t) __a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vqshrn_n_u64 (uint64x2_t __a, const int __b)
-{
- return (uint32x2_t) __builtin_aarch64_uqshrn_nv2di ((int64x2_t) __a, __b);
-}
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqshrnh_n_s16 (int16x1_t __a, const int __b)
-{
- return (int8x1_t) __builtin_aarch64_sqshrn_nhi (__a, __b);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqshrns_n_s32 (int32x1_t __a, const int __b)
-{
- return (int16x1_t) __builtin_aarch64_sqshrn_nsi (__a, __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqshrnd_n_s64 (int64x1_t __a, const int __b)
-{
- return (int32x1_t) __builtin_aarch64_sqshrn_ndi (__a, __b);
-}
-
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqshrnh_n_u16 (uint16x1_t __a, const int __b)
-{
- return (uint8x1_t) __builtin_aarch64_uqshrn_nhi (__a, __b);
-}
-
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqshrns_n_u32 (uint32x1_t __a, const int __b)
-{
- return (uint16x1_t) __builtin_aarch64_uqshrn_nsi (__a, __b);
-}
-
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vqshrnd_n_u64 (uint64x1_t __a, const int __b)
-{
- return (uint32x1_t) __builtin_aarch64_uqshrn_ndi (__a, __b);
-}
-
-/* vqshrun */
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vqshrun_n_s16 (int16x8_t __a, const int __b)
-{
- return (uint8x8_t) __builtin_aarch64_sqshrun_nv8hi (__a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vqshrun_n_s32 (int32x4_t __a, const int __b)
-{
- return (uint16x4_t) __builtin_aarch64_sqshrun_nv4si (__a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vqshrun_n_s64 (int64x2_t __a, const int __b)
-{
- return (uint32x2_t) __builtin_aarch64_sqshrun_nv2di (__a, __b);
-}
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqshrunh_n_s16 (int16x1_t __a, const int __b)
-{
- return (int8x1_t) __builtin_aarch64_sqshrun_nhi (__a, __b);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqshruns_n_s32 (int32x1_t __a, const int __b)
-{
- return (int16x1_t) __builtin_aarch64_sqshrun_nsi (__a, __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqshrund_n_s64 (int64x1_t __a, const int __b)
-{
- return (int32x1_t) __builtin_aarch64_sqshrun_ndi (__a, __b);
-}
-
-/* vqsub */
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vqsubb_s8 (int8x1_t __a, int8x1_t __b)
-{
- return (int8x1_t) __builtin_aarch64_sqsubqi (__a, __b);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vqsubh_s16 (int16x1_t __a, int16x1_t __b)
-{
- return (int16x1_t) __builtin_aarch64_sqsubhi (__a, __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vqsubs_s32 (int32x1_t __a, int32x1_t __b)
-{
- return (int32x1_t) __builtin_aarch64_sqsubsi (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vqsubd_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (int64x1_t) __builtin_aarch64_sqsubdi (__a, __b);
-}
-
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vqsubb_u8 (uint8x1_t __a, uint8x1_t __b)
-{
- return (uint8x1_t) __builtin_aarch64_uqsubqi (__a, __b);
-}
-
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vqsubh_u16 (uint16x1_t __a, uint16x1_t __b)
-{
- return (uint16x1_t) __builtin_aarch64_uqsubhi (__a, __b);
-}
-
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vqsubs_u32 (uint32x1_t __a, uint32x1_t __b)
-{
- return (uint32x1_t) __builtin_aarch64_uqsubsi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vqsubd_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_uqsubdi (__a, __b);
-}
-
-/* vrshl */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vrshl_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (int8x8_t) __builtin_aarch64_srshlv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vrshl_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (int16x4_t) __builtin_aarch64_srshlv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vrshl_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (int32x2_t) __builtin_aarch64_srshlv2si (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vrshl_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (int64x1_t) __builtin_aarch64_srshldi (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vrshl_u8 (uint8x8_t __a, int8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_urshlv8qi ((int8x8_t) __a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vrshl_u16 (uint16x4_t __a, int16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_urshlv4hi ((int16x4_t) __a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vrshl_u32 (uint32x2_t __a, int32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_urshlv2si ((int32x2_t) __a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vrshl_u64 (uint64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_urshldi ((int64x1_t) __a, __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vrshlq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return (int8x16_t) __builtin_aarch64_srshlv16qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vrshlq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (int16x8_t) __builtin_aarch64_srshlv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vrshlq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (int32x4_t) __builtin_aarch64_srshlv4si (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vrshlq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return (int64x2_t) __builtin_aarch64_srshlv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_urshlv16qi ((int8x16_t) __a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_urshlv8hi ((int16x8_t) __a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_urshlv4si ((int32x4_t) __a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vrshlq_u64 (uint64x2_t __a, int64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_urshlv2di ((int64x2_t) __a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vrshld_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (int64x1_t) __builtin_aarch64_srshldi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vrshld_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_urshldi (__a, __b);
-}
-
-/* vrshr */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vrshr_n_s8 (int8x8_t __a, const int __b)
-{
- return (int8x8_t) __builtin_aarch64_srshr_nv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vrshr_n_s16 (int16x4_t __a, const int __b)
-{
- return (int16x4_t) __builtin_aarch64_srshr_nv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vrshr_n_s32 (int32x2_t __a, const int __b)
-{
- return (int32x2_t) __builtin_aarch64_srshr_nv2si (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vrshr_n_s64 (int64x1_t __a, const int __b)
-{
- return (int64x1_t) __builtin_aarch64_srshr_ndi (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vrshr_n_u8 (uint8x8_t __a, const int __b)
-{
- return (uint8x8_t) __builtin_aarch64_urshr_nv8qi ((int8x8_t) __a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vrshr_n_u16 (uint16x4_t __a, const int __b)
-{
- return (uint16x4_t) __builtin_aarch64_urshr_nv4hi ((int16x4_t) __a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vrshr_n_u32 (uint32x2_t __a, const int __b)
-{
- return (uint32x2_t) __builtin_aarch64_urshr_nv2si ((int32x2_t) __a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vrshr_n_u64 (uint64x1_t __a, const int __b)
-{
- return (uint64x1_t) __builtin_aarch64_urshr_ndi ((int64x1_t) __a, __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vrshrq_n_s8 (int8x16_t __a, const int __b)
-{
- return (int8x16_t) __builtin_aarch64_srshr_nv16qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vrshrq_n_s16 (int16x8_t __a, const int __b)
-{
- return (int16x8_t) __builtin_aarch64_srshr_nv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vrshrq_n_s32 (int32x4_t __a, const int __b)
-{
- return (int32x4_t) __builtin_aarch64_srshr_nv4si (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vrshrq_n_s64 (int64x2_t __a, const int __b)
-{
- return (int64x2_t) __builtin_aarch64_srshr_nv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vrshrq_n_u8 (uint8x16_t __a, const int __b)
-{
- return (uint8x16_t) __builtin_aarch64_urshr_nv16qi ((int8x16_t) __a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vrshrq_n_u16 (uint16x8_t __a, const int __b)
-{
- return (uint16x8_t) __builtin_aarch64_urshr_nv8hi ((int16x8_t) __a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vrshrq_n_u32 (uint32x4_t __a, const int __b)
-{
- return (uint32x4_t) __builtin_aarch64_urshr_nv4si ((int32x4_t) __a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vrshrq_n_u64 (uint64x2_t __a, const int __b)
-{
- return (uint64x2_t) __builtin_aarch64_urshr_nv2di ((int64x2_t) __a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vrshrd_n_s64 (int64x1_t __a, const int __b)
-{
- return (int64x1_t) __builtin_aarch64_srshr_ndi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vrshrd_n_u64 (uint64x1_t __a, const int __b)
-{
- return (uint64x1_t) __builtin_aarch64_urshr_ndi (__a, __b);
-}
-
-/* vrsra */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vrsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
-{
- return (int8x8_t) __builtin_aarch64_srsra_nv8qi (__a, __b, __c);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vrsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
-{
- return (int16x4_t) __builtin_aarch64_srsra_nv4hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vrsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
-{
- return (int32x2_t) __builtin_aarch64_srsra_nv2si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vrsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
-{
- return (int64x1_t) __builtin_aarch64_srsra_ndi (__a, __b, __c);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vrsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
-{
- return (uint8x8_t) __builtin_aarch64_ursra_nv8qi ((int8x8_t) __a,
- (int8x8_t) __b, __c);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vrsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
-{
- return (uint16x4_t) __builtin_aarch64_ursra_nv4hi ((int16x4_t) __a,
- (int16x4_t) __b, __c);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vrsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
-{
- return (uint32x2_t) __builtin_aarch64_ursra_nv2si ((int32x2_t) __a,
- (int32x2_t) __b, __c);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vrsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
-{
- return (uint64x1_t) __builtin_aarch64_ursra_ndi ((int64x1_t) __a,
- (int64x1_t) __b, __c);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vrsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
-{
- return (int8x16_t) __builtin_aarch64_srsra_nv16qi (__a, __b, __c);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vrsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
-{
- return (int16x8_t) __builtin_aarch64_srsra_nv8hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vrsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
-{
- return (int32x4_t) __builtin_aarch64_srsra_nv4si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vrsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
-{
- return (int64x2_t) __builtin_aarch64_srsra_nv2di (__a, __b, __c);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vrsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
-{
- return (uint8x16_t) __builtin_aarch64_ursra_nv16qi ((int8x16_t) __a,
- (int8x16_t) __b, __c);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vrsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
-{
- return (uint16x8_t) __builtin_aarch64_ursra_nv8hi ((int16x8_t) __a,
- (int16x8_t) __b, __c);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vrsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
-{
- return (uint32x4_t) __builtin_aarch64_ursra_nv4si ((int32x4_t) __a,
- (int32x4_t) __b, __c);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
-{
- return (uint64x2_t) __builtin_aarch64_ursra_nv2di ((int64x2_t) __a,
- (int64x2_t) __b, __c);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vrsrad_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
-{
- return (int64x1_t) __builtin_aarch64_srsra_ndi (__a, __b, __c);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vrsrad_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
-{
- return (uint64x1_t) __builtin_aarch64_ursra_ndi (__a, __b, __c);
-}
-
-/* vshl */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vshl_n_s8 (int8x8_t __a, const int __b)
-{
- return (int8x8_t) __builtin_aarch64_sshl_nv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vshl_n_s16 (int16x4_t __a, const int __b)
-{
- return (int16x4_t) __builtin_aarch64_sshl_nv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vshl_n_s32 (int32x2_t __a, const int __b)
-{
- return (int32x2_t) __builtin_aarch64_sshl_nv2si (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vshl_n_s64 (int64x1_t __a, const int __b)
-{
- return (int64x1_t) __builtin_aarch64_sshl_ndi (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vshl_n_u8 (uint8x8_t __a, const int __b)
-{
- return (uint8x8_t) __builtin_aarch64_ushl_nv8qi ((int8x8_t) __a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vshl_n_u16 (uint16x4_t __a, const int __b)
-{
- return (uint16x4_t) __builtin_aarch64_ushl_nv4hi ((int16x4_t) __a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vshl_n_u32 (uint32x2_t __a, const int __b)
-{
- return (uint32x2_t) __builtin_aarch64_ushl_nv2si ((int32x2_t) __a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vshl_n_u64 (uint64x1_t __a, const int __b)
-{
- return (uint64x1_t) __builtin_aarch64_ushl_ndi ((int64x1_t) __a, __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vshlq_n_s8 (int8x16_t __a, const int __b)
-{
- return (int8x16_t) __builtin_aarch64_sshl_nv16qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vshlq_n_s16 (int16x8_t __a, const int __b)
-{
- return (int16x8_t) __builtin_aarch64_sshl_nv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vshlq_n_s32 (int32x4_t __a, const int __b)
-{
- return (int32x4_t) __builtin_aarch64_sshl_nv4si (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vshlq_n_s64 (int64x2_t __a, const int __b)
-{
- return (int64x2_t) __builtin_aarch64_sshl_nv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vshlq_n_u8 (uint8x16_t __a, const int __b)
-{
- return (uint8x16_t) __builtin_aarch64_ushl_nv16qi ((int8x16_t) __a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vshlq_n_u16 (uint16x8_t __a, const int __b)
-{
- return (uint16x8_t) __builtin_aarch64_ushl_nv8hi ((int16x8_t) __a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vshlq_n_u32 (uint32x4_t __a, const int __b)
-{
- return (uint32x4_t) __builtin_aarch64_ushl_nv4si ((int32x4_t) __a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vshlq_n_u64 (uint64x2_t __a, const int __b)
-{
- return (uint64x2_t) __builtin_aarch64_ushl_nv2di ((int64x2_t) __a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vshld_n_s64 (int64x1_t __a, const int __b)
-{
- return (int64x1_t) __builtin_aarch64_sshl_ndi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vshld_n_u64 (uint64x1_t __a, const int __b)
-{
- return (uint64x1_t) __builtin_aarch64_ushl_ndi (__a, __b);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vshl_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (int8x8_t) __builtin_aarch64_sshlv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vshl_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (int16x4_t) __builtin_aarch64_sshlv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vshl_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (int32x2_t) __builtin_aarch64_sshlv2si (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vshl_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (int64x1_t) __builtin_aarch64_sshldi (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vshl_u8 (uint8x8_t __a, int8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_ushlv8qi ((int8x8_t) __a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vshl_u16 (uint16x4_t __a, int16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_ushlv4hi ((int16x4_t) __a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vshl_u32 (uint32x2_t __a, int32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_ushlv2si ((int32x2_t) __a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vshl_u64 (uint64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_ushldi ((int64x1_t) __a, __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vshlq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return (int8x16_t) __builtin_aarch64_sshlv16qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vshlq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (int16x8_t) __builtin_aarch64_sshlv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vshlq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (int32x4_t) __builtin_aarch64_sshlv4si (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vshlq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return (int64x2_t) __builtin_aarch64_sshlv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vshlq_u8 (uint8x16_t __a, int8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_ushlv16qi ((int8x16_t) __a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vshlq_u16 (uint16x8_t __a, int16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_ushlv8hi ((int16x8_t) __a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vshlq_u32 (uint32x4_t __a, int32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_ushlv4si ((int32x4_t) __a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vshlq_u64 (uint64x2_t __a, int64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_ushlv2di ((int64x2_t) __a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vshld_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (int64x1_t) __builtin_aarch64_sshldi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vshld_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_ushldi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vshll_high_n_s8 (int8x16_t __a, const int __b)
-{
- return __builtin_aarch64_sshll2_nv16qi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vshll_high_n_s16 (int16x8_t __a, const int __b)
-{
- return __builtin_aarch64_sshll2_nv8hi (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vshll_high_n_s32 (int32x4_t __a, const int __b)
-{
- return __builtin_aarch64_sshll2_nv4si (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vshll_high_n_u8 (uint8x16_t __a, const int __b)
-{
- return (uint16x8_t) __builtin_aarch64_ushll2_nv16qi ((int8x16_t) __a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vshll_high_n_u16 (uint16x8_t __a, const int __b)
-{
- return (uint32x4_t) __builtin_aarch64_ushll2_nv8hi ((int16x8_t) __a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vshll_high_n_u32 (uint32x4_t __a, const int __b)
-{
- return (uint64x2_t) __builtin_aarch64_ushll2_nv4si ((int32x4_t) __a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vshll_n_s8 (int8x8_t __a, const int __b)
-{
- return __builtin_aarch64_sshll_nv8qi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vshll_n_s16 (int16x4_t __a, const int __b)
-{
- return __builtin_aarch64_sshll_nv4hi (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vshll_n_s32 (int32x2_t __a, const int __b)
-{
- return __builtin_aarch64_sshll_nv2si (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vshll_n_u8 (uint8x8_t __a, const int __b)
-{
- return (uint16x8_t) __builtin_aarch64_ushll_nv8qi ((int8x8_t) __a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vshll_n_u16 (uint16x4_t __a, const int __b)
-{
- return (uint32x4_t) __builtin_aarch64_ushll_nv4hi ((int16x4_t) __a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vshll_n_u32 (uint32x2_t __a, const int __b)
-{
- return (uint64x2_t) __builtin_aarch64_ushll_nv2si ((int32x2_t) __a, __b);
-}
-
-/* vshr */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vshr_n_s8 (int8x8_t __a, const int __b)
-{
- return (int8x8_t) __builtin_aarch64_sshr_nv8qi (__a, __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vshr_n_s16 (int16x4_t __a, const int __b)
-{
- return (int16x4_t) __builtin_aarch64_sshr_nv4hi (__a, __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vshr_n_s32 (int32x2_t __a, const int __b)
-{
- return (int32x2_t) __builtin_aarch64_sshr_nv2si (__a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vshr_n_s64 (int64x1_t __a, const int __b)
-{
- return (int64x1_t) __builtin_aarch64_sshr_ndi (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vshr_n_u8 (uint8x8_t __a, const int __b)
-{
- return (uint8x8_t) __builtin_aarch64_ushr_nv8qi ((int8x8_t) __a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vshr_n_u16 (uint16x4_t __a, const int __b)
-{
- return (uint16x4_t) __builtin_aarch64_ushr_nv4hi ((int16x4_t) __a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vshr_n_u32 (uint32x2_t __a, const int __b)
-{
- return (uint32x2_t) __builtin_aarch64_ushr_nv2si ((int32x2_t) __a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vshr_n_u64 (uint64x1_t __a, const int __b)
-{
- return (uint64x1_t) __builtin_aarch64_ushr_ndi ((int64x1_t) __a, __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vshrq_n_s8 (int8x16_t __a, const int __b)
-{
- return (int8x16_t) __builtin_aarch64_sshr_nv16qi (__a, __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vshrq_n_s16 (int16x8_t __a, const int __b)
-{
- return (int16x8_t) __builtin_aarch64_sshr_nv8hi (__a, __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vshrq_n_s32 (int32x4_t __a, const int __b)
-{
- return (int32x4_t) __builtin_aarch64_sshr_nv4si (__a, __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vshrq_n_s64 (int64x2_t __a, const int __b)
-{
- return (int64x2_t) __builtin_aarch64_sshr_nv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vshrq_n_u8 (uint8x16_t __a, const int __b)
-{
- return (uint8x16_t) __builtin_aarch64_ushr_nv16qi ((int8x16_t) __a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vshrq_n_u16 (uint16x8_t __a, const int __b)
-{
- return (uint16x8_t) __builtin_aarch64_ushr_nv8hi ((int16x8_t) __a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vshrq_n_u32 (uint32x4_t __a, const int __b)
-{
- return (uint32x4_t) __builtin_aarch64_ushr_nv4si ((int32x4_t) __a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vshrq_n_u64 (uint64x2_t __a, const int __b)
-{
- return (uint64x2_t) __builtin_aarch64_ushr_nv2di ((int64x2_t) __a, __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vshrd_n_s64 (int64x1_t __a, const int __b)
-{
- return (int64x1_t) __builtin_aarch64_sshr_ndi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vshrd_n_u64 (uint64x1_t __a, const int __b)
-{
- return (uint64x1_t) __builtin_aarch64_ushr_ndi (__a, __b);
-}
-
-/* vsli */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vsli_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
-{
- return (int8x8_t) __builtin_aarch64_ssli_nv8qi (__a, __b, __c);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vsli_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
-{
- return (int16x4_t) __builtin_aarch64_ssli_nv4hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vsli_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
-{
- return (int32x2_t) __builtin_aarch64_ssli_nv2si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vsli_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
-{
- return (int64x1_t) __builtin_aarch64_ssli_ndi (__a, __b, __c);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vsli_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
-{
- return (uint8x8_t) __builtin_aarch64_usli_nv8qi ((int8x8_t) __a,
- (int8x8_t) __b, __c);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vsli_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
-{
- return (uint16x4_t) __builtin_aarch64_usli_nv4hi ((int16x4_t) __a,
- (int16x4_t) __b, __c);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vsli_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
-{
- return (uint32x2_t) __builtin_aarch64_usli_nv2si ((int32x2_t) __a,
- (int32x2_t) __b, __c);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vsli_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
-{
- return (uint64x1_t) __builtin_aarch64_usli_ndi ((int64x1_t) __a,
- (int64x1_t) __b, __c);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
-{
- return (int8x16_t) __builtin_aarch64_ssli_nv16qi (__a, __b, __c);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
-{
- return (int16x8_t) __builtin_aarch64_ssli_nv8hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
-{
- return (int32x4_t) __builtin_aarch64_ssli_nv4si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vsliq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
-{
- return (int64x2_t) __builtin_aarch64_ssli_nv2di (__a, __b, __c);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
-{
- return (uint8x16_t) __builtin_aarch64_usli_nv16qi ((int8x16_t) __a,
- (int8x16_t) __b, __c);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
-{
- return (uint16x8_t) __builtin_aarch64_usli_nv8hi ((int16x8_t) __a,
- (int16x8_t) __b, __c);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
-{
- return (uint32x4_t) __builtin_aarch64_usli_nv4si ((int32x4_t) __a,
- (int32x4_t) __b, __c);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vsliq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
-{
- return (uint64x2_t) __builtin_aarch64_usli_nv2di ((int64x2_t) __a,
- (int64x2_t) __b, __c);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vslid_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
-{
- return (int64x1_t) __builtin_aarch64_ssli_ndi (__a, __b, __c);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vslid_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
-{
- return (uint64x1_t) __builtin_aarch64_usli_ndi (__a, __b, __c);
-}
-
-/* vsqadd */
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vsqadd_u8 (uint8x8_t __a, int8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_usqaddv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vsqadd_u16 (uint16x4_t __a, int16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_usqaddv4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vsqadd_u32 (uint32x2_t __a, int32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_usqaddv2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vsqadd_u64 (uint64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_usqadddi ((int64x1_t) __a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vsqaddq_u8 (uint8x16_t __a, int8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_usqaddv16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vsqaddq_u16 (uint16x8_t __a, int16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_usqaddv8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vsqaddq_u32 (uint32x4_t __a, int32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_usqaddv4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vsqaddq_u64 (uint64x2_t __a, int64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_usqaddv2di ((int64x2_t) __a,
- (int64x2_t) __b);
-}
-
-__extension__ static __inline uint8x1_t __attribute__ ((__always_inline__))
-vsqaddb_u8 (uint8x1_t __a, int8x1_t __b)
-{
- return (uint8x1_t) __builtin_aarch64_usqaddqi ((int8x1_t) __a, __b);
-}
-
-__extension__ static __inline uint16x1_t __attribute__ ((__always_inline__))
-vsqaddh_u16 (uint16x1_t __a, int16x1_t __b)
-{
- return (uint16x1_t) __builtin_aarch64_usqaddhi ((int16x1_t) __a, __b);
-}
-
-__extension__ static __inline uint32x1_t __attribute__ ((__always_inline__))
-vsqadds_u32 (uint32x1_t __a, int32x1_t __b)
-{
- return (uint32x1_t) __builtin_aarch64_usqaddsi ((int32x1_t) __a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vsqaddd_u64 (uint64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_usqadddi ((int64x1_t) __a, __b);
-}
-
-/* vsqrt */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vsqrt_f32 (float32x2_t a)
-{
- return __builtin_aarch64_sqrtv2sf (a);
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vsqrtq_f32 (float32x4_t a)
-{
- return __builtin_aarch64_sqrtv4sf (a);
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vsqrtq_f64 (float64x2_t a)
-{
- return __builtin_aarch64_sqrtv2df (a);
-}
-
-/* vsra */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
-{
- return (int8x8_t) __builtin_aarch64_ssra_nv8qi (__a, __b, __c);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
-{
- return (int16x4_t) __builtin_aarch64_ssra_nv4hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
-{
- return (int32x2_t) __builtin_aarch64_ssra_nv2si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
-{
- return (int64x1_t) __builtin_aarch64_ssra_ndi (__a, __b, __c);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
-{
- return (uint8x8_t) __builtin_aarch64_usra_nv8qi ((int8x8_t) __a,
- (int8x8_t) __b, __c);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
-{
- return (uint16x4_t) __builtin_aarch64_usra_nv4hi ((int16x4_t) __a,
- (int16x4_t) __b, __c);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
-{
- return (uint32x2_t) __builtin_aarch64_usra_nv2si ((int32x2_t) __a,
- (int32x2_t) __b, __c);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
-{
- return (uint64x1_t) __builtin_aarch64_usra_ndi ((int64x1_t) __a,
- (int64x1_t) __b, __c);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
-{
- return (int8x16_t) __builtin_aarch64_ssra_nv16qi (__a, __b, __c);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
-{
- return (int16x8_t) __builtin_aarch64_ssra_nv8hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
-{
- return (int32x4_t) __builtin_aarch64_ssra_nv4si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
-{
- return (int64x2_t) __builtin_aarch64_ssra_nv2di (__a, __b, __c);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
-{
- return (uint8x16_t) __builtin_aarch64_usra_nv16qi ((int8x16_t) __a,
- (int8x16_t) __b, __c);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
-{
- return (uint16x8_t) __builtin_aarch64_usra_nv8hi ((int16x8_t) __a,
- (int16x8_t) __b, __c);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
-{
- return (uint32x4_t) __builtin_aarch64_usra_nv4si ((int32x4_t) __a,
- (int32x4_t) __b, __c);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
-{
- return (uint64x2_t) __builtin_aarch64_usra_nv2di ((int64x2_t) __a,
- (int64x2_t) __b, __c);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vsrad_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
-{
- return (int64x1_t) __builtin_aarch64_ssra_ndi (__a, __b, __c);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vsrad_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
-{
- return (uint64x1_t) __builtin_aarch64_usra_ndi (__a, __b, __c);
-}
-
-/* vsri */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vsri_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
-{
- return (int8x8_t) __builtin_aarch64_ssri_nv8qi (__a, __b, __c);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vsri_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
-{
- return (int16x4_t) __builtin_aarch64_ssri_nv4hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vsri_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
-{
- return (int32x2_t) __builtin_aarch64_ssri_nv2si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vsri_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
-{
- return (int64x1_t) __builtin_aarch64_ssri_ndi (__a, __b, __c);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vsri_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
-{
- return (uint8x8_t) __builtin_aarch64_usri_nv8qi ((int8x8_t) __a,
- (int8x8_t) __b, __c);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vsri_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
-{
- return (uint16x4_t) __builtin_aarch64_usri_nv4hi ((int16x4_t) __a,
- (int16x4_t) __b, __c);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vsri_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
-{
- return (uint32x2_t) __builtin_aarch64_usri_nv2si ((int32x2_t) __a,
- (int32x2_t) __b, __c);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vsri_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
-{
- return (uint64x1_t) __builtin_aarch64_usri_ndi ((int64x1_t) __a,
- (int64x1_t) __b, __c);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
-{
- return (int8x16_t) __builtin_aarch64_ssri_nv16qi (__a, __b, __c);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
-{
- return (int16x8_t) __builtin_aarch64_ssri_nv8hi (__a, __b, __c);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
-{
- return (int32x4_t) __builtin_aarch64_ssri_nv4si (__a, __b, __c);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vsriq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
-{
- return (int64x2_t) __builtin_aarch64_ssri_nv2di (__a, __b, __c);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
-{
- return (uint8x16_t) __builtin_aarch64_usri_nv16qi ((int8x16_t) __a,
- (int8x16_t) __b, __c);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
-{
- return (uint16x8_t) __builtin_aarch64_usri_nv8hi ((int16x8_t) __a,
- (int16x8_t) __b, __c);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
-{
- return (uint32x4_t) __builtin_aarch64_usri_nv4si ((int32x4_t) __a,
- (int32x4_t) __b, __c);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vsriq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
-{
- return (uint64x2_t) __builtin_aarch64_usri_nv2di ((int64x2_t) __a,
- (int64x2_t) __b, __c);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vsrid_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
-{
- return (int64x1_t) __builtin_aarch64_ssri_ndi (__a, __b, __c);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vsrid_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
-{
- return (uint64x1_t) __builtin_aarch64_usri_ndi (__a, __b, __c);
-}
-
-/* vstn */
-
-__extension__ static __inline void
-vst2_s64 (int64_t * __a, int64x1x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- int64x2x2_t temp;
- temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0)));
- temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0)));
- __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[1], 1);
- __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
-}
-
-__extension__ static __inline void
-vst2_u64 (uint64_t * __a, uint64x1x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- uint64x2x2_t temp;
- temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0)));
- temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[1], 1);
- __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
-}
-
-__extension__ static __inline void
-vst2_f64 (float64_t * __a, float64x1x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- float64x2x2_t temp;
- temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0)));
- temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) temp.val[1], 1);
- __builtin_aarch64_st2df ((__builtin_aarch64_simd_df *) __a, __o);
-}
-
-__extension__ static __inline void
-vst2_s8 (int8_t * __a, int8x8x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- int8x16x2_t temp;
- temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0)));
- temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0)));
- __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
- __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2_p8 (poly8_t * __a, poly8x8x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- poly8x16x2_t temp;
- temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0)));
- temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
- __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2_s16 (int16_t * __a, int16x4x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- int16x8x2_t temp;
- temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0)));
- temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0)));
- __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
- __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2_p16 (poly16_t * __a, poly16x4x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- poly16x8x2_t temp;
- temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0)));
- temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
- __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2_s32 (int32_t * __a, int32x2x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- int32x4x2_t temp;
- temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0)));
- temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0)));
- __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[1], 1);
- __builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2_u8 (uint8_t * __a, uint8x8x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- uint8x16x2_t temp;
- temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0)));
- temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
- __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2_u16 (uint16_t * __a, uint16x4x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- uint16x8x2_t temp;
- temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0)));
- temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
- __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2_u32 (uint32_t * __a, uint32x2x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- uint32x4x2_t temp;
- temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0)));
- temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[1], 1);
- __builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2_f32 (float32_t * __a, float32x2x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- float32x4x2_t temp;
- temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0)));
- temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) temp.val[1], 1);
- __builtin_aarch64_st2v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2q_s8 (int8_t * __a, int8x16x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1);
- __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2q_p8 (poly8_t * __a, poly8x16x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1);
- __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2q_s16 (int16_t * __a, int16x8x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1);
- __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2q_p16 (poly16_t * __a, poly16x8x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1);
- __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2q_s32 (int32_t * __a, int32x4x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[1], 1);
- __builtin_aarch64_st2v4si ((__builtin_aarch64_simd_si *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2q_s64 (int64_t * __a, int64x2x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[1], 1);
- __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2q_u8 (uint8_t * __a, uint8x16x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) val.val[1], 1);
- __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2q_u16 (uint16_t * __a, uint16x8x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) val.val[1], 1);
- __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2q_u32 (uint32_t * __a, uint32x4x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) val.val[1], 1);
- __builtin_aarch64_st2v4si ((__builtin_aarch64_simd_si *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2q_u64 (uint64_t * __a, uint64x2x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) val.val[1], 1);
- __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2q_f32 (float32_t * __a, float32x4x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) val.val[1], 1);
- __builtin_aarch64_st2v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst2q_f64 (float64_t * __a, float64x2x2_t val)
-{
- __builtin_aarch64_simd_oi __o;
- __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) val.val[1], 1);
- __builtin_aarch64_st2v2df ((__builtin_aarch64_simd_df *) __a, __o);
-}
-
-__extension__ static __inline void
-vst3_s64 (int64_t * __a, int64x1x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- int64x2x3_t temp;
- temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0)));
- temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0)));
- temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (INT64_C (0)));
- __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[2], 2);
- __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
-}
-
-__extension__ static __inline void
-vst3_u64 (uint64_t * __a, uint64x1x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- uint64x2x3_t temp;
- temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0)));
- temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0)));
- temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[2], 2);
- __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
-}
-
-__extension__ static __inline void
-vst3_f64 (float64_t * __a, float64x1x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- float64x2x3_t temp;
- temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0)));
- temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0)));
- temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[2], 2);
- __builtin_aarch64_st3df ((__builtin_aarch64_simd_df *) __a, __o);
-}
-
-__extension__ static __inline void
-vst3_s8 (int8_t * __a, int8x8x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- int8x16x3_t temp;
- temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0)));
- temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0)));
- temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (INT64_C (0)));
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
- __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3_p8 (poly8_t * __a, poly8x8x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- poly8x16x3_t temp;
- temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0)));
- temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0)));
- temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
- __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3_s16 (int16_t * __a, int16x4x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- int16x8x3_t temp;
- temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0)));
- temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0)));
- temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (INT64_C (0)));
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
- __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3_p16 (poly16_t * __a, poly16x4x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- poly16x8x3_t temp;
- temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0)));
- temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0)));
- temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
- __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3_s32 (int32_t * __a, int32x2x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- int32x4x3_t temp;
- temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0)));
- temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0)));
- temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (INT64_C (0)));
- __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[2], 2);
- __builtin_aarch64_st3v2si ((__builtin_aarch64_simd_si *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3_u8 (uint8_t * __a, uint8x8x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- uint8x16x3_t temp;
- temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0)));
- temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0)));
- temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
- __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3_u16 (uint16_t * __a, uint16x4x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- uint16x8x3_t temp;
- temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0)));
- temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0)));
- temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
- __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3_u32 (uint32_t * __a, uint32x2x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- uint32x4x3_t temp;
- temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0)));
- temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0)));
- temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[2], 2);
- __builtin_aarch64_st3v2si ((__builtin_aarch64_simd_si *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3_f32 (float32_t * __a, float32x2x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- float32x4x3_t temp;
- temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0)));
- temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0)));
- temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[2], 2);
- __builtin_aarch64_st3v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3q_s8 (int8_t * __a, int8x16x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2);
- __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3q_p8 (poly8_t * __a, poly8x16x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2);
- __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3q_s16 (int16_t * __a, int16x8x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2);
- __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3q_p16 (poly16_t * __a, poly16x8x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2);
- __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3q_s32 (int32_t * __a, int32x4x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[2], 2);
- __builtin_aarch64_st3v4si ((__builtin_aarch64_simd_si *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3q_s64 (int64_t * __a, int64x2x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[2], 2);
- __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3q_u8 (uint8_t * __a, uint8x16x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) val.val[2], 2);
- __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3q_u16 (uint16_t * __a, uint16x8x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) val.val[2], 2);
- __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3q_u32 (uint32_t * __a, uint32x4x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) val.val[2], 2);
- __builtin_aarch64_st3v4si ((__builtin_aarch64_simd_si *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3q_u64 (uint64_t * __a, uint64x2x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) val.val[2], 2);
- __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3q_f32 (float32_t * __a, float32x4x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) val.val[2], 2);
- __builtin_aarch64_st3v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst3q_f64 (float64_t * __a, float64x2x3_t val)
-{
- __builtin_aarch64_simd_ci __o;
- __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) val.val[2], 2);
- __builtin_aarch64_st3v2df ((__builtin_aarch64_simd_df *) __a, __o);
-}
-
-__extension__ static __inline void
-vst4_s64 (int64_t * __a, int64x1x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- int64x2x4_t temp;
- temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0)));
- temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0)));
- temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (INT64_C (0)));
- temp.val[3] = vcombine_s64 (val.val[3], vcreate_s64 (INT64_C (0)));
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[3], 3);
- __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
-}
-
-__extension__ static __inline void
-vst4_u64 (uint64_t * __a, uint64x1x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- uint64x2x4_t temp;
- temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0)));
- temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0)));
- temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (UINT64_C (0)));
- temp.val[3] = vcombine_u64 (val.val[3], vcreate_u64 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[3], 3);
- __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
-}
-
-__extension__ static __inline void
-vst4_f64 (float64_t * __a, float64x1x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- float64x2x4_t temp;
- temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0)));
- temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0)));
- temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (UINT64_C (0)));
- temp.val[3] = vcombine_f64 (val.val[3], vcreate_f64 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[3], 3);
- __builtin_aarch64_st4df ((__builtin_aarch64_simd_df *) __a, __o);
-}
-
-__extension__ static __inline void
-vst4_s8 (int8_t * __a, int8x8x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- int8x16x4_t temp;
- temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0)));
- temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0)));
- temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (INT64_C (0)));
- temp.val[3] = vcombine_s8 (val.val[3], vcreate_s8 (INT64_C (0)));
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3);
- __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4_p8 (poly8_t * __a, poly8x8x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- poly8x16x4_t temp;
- temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0)));
- temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0)));
- temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (UINT64_C (0)));
- temp.val[3] = vcombine_p8 (val.val[3], vcreate_p8 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3);
- __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4_s16 (int16_t * __a, int16x4x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- int16x8x4_t temp;
- temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0)));
- temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0)));
- temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (INT64_C (0)));
- temp.val[3] = vcombine_s16 (val.val[3], vcreate_s16 (INT64_C (0)));
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3);
- __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4_p16 (poly16_t * __a, poly16x4x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- poly16x8x4_t temp;
- temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0)));
- temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0)));
- temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (UINT64_C (0)));
- temp.val[3] = vcombine_p16 (val.val[3], vcreate_p16 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3);
- __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4_s32 (int32_t * __a, int32x2x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- int32x4x4_t temp;
- temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0)));
- temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0)));
- temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (INT64_C (0)));
- temp.val[3] = vcombine_s32 (val.val[3], vcreate_s32 (INT64_C (0)));
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[3], 3);
- __builtin_aarch64_st4v2si ((__builtin_aarch64_simd_si *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4_u8 (uint8_t * __a, uint8x8x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- uint8x16x4_t temp;
- temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0)));
- temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0)));
- temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (UINT64_C (0)));
- temp.val[3] = vcombine_u8 (val.val[3], vcreate_u8 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[3], 3);
- __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4_u16 (uint16_t * __a, uint16x4x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- uint16x8x4_t temp;
- temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0)));
- temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0)));
- temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (UINT64_C (0)));
- temp.val[3] = vcombine_u16 (val.val[3], vcreate_u16 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[3], 3);
- __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4_u32 (uint32_t * __a, uint32x2x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- uint32x4x4_t temp;
- temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0)));
- temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0)));
- temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (UINT64_C (0)));
- temp.val[3] = vcombine_u32 (val.val[3], vcreate_u32 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[3], 3);
- __builtin_aarch64_st4v2si ((__builtin_aarch64_simd_si *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4_f32 (float32_t * __a, float32x2x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- float32x4x4_t temp;
- temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0)));
- temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0)));
- temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (UINT64_C (0)));
- temp.val[3] = vcombine_f32 (val.val[3], vcreate_f32 (UINT64_C (0)));
- __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[3], 3);
- __builtin_aarch64_st4v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4q_s8 (int8_t * __a, int8x16x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3);
- __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4q_p8 (poly8_t * __a, poly8x16x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3);
- __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4q_s16 (int16_t * __a, int16x8x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3);
- __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4q_p16 (poly16_t * __a, poly16x8x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3);
- __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4q_s32 (int32_t * __a, int32x4x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[3], 3);
- __builtin_aarch64_st4v4si ((__builtin_aarch64_simd_si *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4q_s64 (int64_t * __a, int64x2x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[3], 3);
- __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4q_u8 (uint8_t * __a, uint8x16x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) val.val[3], 3);
- __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4q_u16 (uint16_t * __a, uint16x8x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) val.val[3], 3);
- __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4q_u32 (uint32_t * __a, uint32x4x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) val.val[3], 3);
- __builtin_aarch64_st4v4si ((__builtin_aarch64_simd_si *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4q_u64 (uint64_t * __a, uint64x2x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) val.val[3], 3);
- __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4q_f32 (float32_t * __a, float32x4x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) val.val[3], 3);
- __builtin_aarch64_st4v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
-}
-
-__extension__ static __inline void __attribute__ ((__always_inline__))
-vst4q_f64 (float64_t * __a, float64x2x4_t val)
-{
- __builtin_aarch64_simd_xi __o;
- __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[0], 0);
- __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[1], 1);
- __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[2], 2);
- __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) val.val[3], 3);
- __builtin_aarch64_st4v2df ((__builtin_aarch64_simd_df *) __a, __o);
-}
-
-/* vsub */
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vsubd_s64 (int64x1_t __a, int64x1_t __b)
-{
- return __a - __b;
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vsubd_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return __a - __b;
-}
-
-/* vtrn */
-
-__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
-vtrn_f32 (float32x2_t a, float32x2_t b)
-{
- return (float32x2x2_t) {vtrn1_f32 (a, b), vtrn2_f32 (a, b)};
-}
-
-__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
-vtrn_p8 (poly8x8_t a, poly8x8_t b)
-{
- return (poly8x8x2_t) {vtrn1_p8 (a, b), vtrn2_p8 (a, b)};
-}
-
-__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
-vtrn_p16 (poly16x4_t a, poly16x4_t b)
-{
- return (poly16x4x2_t) {vtrn1_p16 (a, b), vtrn2_p16 (a, b)};
-}
-
-__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
-vtrn_s8 (int8x8_t a, int8x8_t b)
-{
- return (int8x8x2_t) {vtrn1_s8 (a, b), vtrn2_s8 (a, b)};
-}
-
-__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
-vtrn_s16 (int16x4_t a, int16x4_t b)
-{
- return (int16x4x2_t) {vtrn1_s16 (a, b), vtrn2_s16 (a, b)};
-}
-
-__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
-vtrn_s32 (int32x2_t a, int32x2_t b)
-{
- return (int32x2x2_t) {vtrn1_s32 (a, b), vtrn2_s32 (a, b)};
-}
-
-__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
-vtrn_u8 (uint8x8_t a, uint8x8_t b)
-{
- return (uint8x8x2_t) {vtrn1_u8 (a, b), vtrn2_u8 (a, b)};
-}
-
-__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
-vtrn_u16 (uint16x4_t a, uint16x4_t b)
-{
- return (uint16x4x2_t) {vtrn1_u16 (a, b), vtrn2_u16 (a, b)};
-}
-
-__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
-vtrn_u32 (uint32x2_t a, uint32x2_t b)
-{
- return (uint32x2x2_t) {vtrn1_u32 (a, b), vtrn2_u32 (a, b)};
-}
-
-__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
-vtrnq_f32 (float32x4_t a, float32x4_t b)
-{
- return (float32x4x2_t) {vtrn1q_f32 (a, b), vtrn2q_f32 (a, b)};
-}
-
-__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
-vtrnq_p8 (poly8x16_t a, poly8x16_t b)
-{
- return (poly8x16x2_t) {vtrn1q_p8 (a, b), vtrn2q_p8 (a, b)};
-}
-
-__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
-vtrnq_p16 (poly16x8_t a, poly16x8_t b)
-{
- return (poly16x8x2_t) {vtrn1q_p16 (a, b), vtrn2q_p16 (a, b)};
-}
-
-__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
-vtrnq_s8 (int8x16_t a, int8x16_t b)
-{
- return (int8x16x2_t) {vtrn1q_s8 (a, b), vtrn2q_s8 (a, b)};
-}
-
-__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
-vtrnq_s16 (int16x8_t a, int16x8_t b)
-{
- return (int16x8x2_t) {vtrn1q_s16 (a, b), vtrn2q_s16 (a, b)};
-}
-
-__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
-vtrnq_s32 (int32x4_t a, int32x4_t b)
-{
- return (int32x4x2_t) {vtrn1q_s32 (a, b), vtrn2q_s32 (a, b)};
-}
-
-__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
-vtrnq_u8 (uint8x16_t a, uint8x16_t b)
-{
- return (uint8x16x2_t) {vtrn1q_u8 (a, b), vtrn2q_u8 (a, b)};
-}
-
-__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
-vtrnq_u16 (uint16x8_t a, uint16x8_t b)
-{
- return (uint16x8x2_t) {vtrn1q_u16 (a, b), vtrn2q_u16 (a, b)};
-}
-
-__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
-vtrnq_u32 (uint32x4_t a, uint32x4_t b)
-{
- return (uint32x4x2_t) {vtrn1q_u32 (a, b), vtrn2q_u32 (a, b)};
-}
-
-/* vtst */
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vtst_s8 (int8x8_t __a, int8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_cmtstv8qi (__a, __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vtst_s16 (int16x4_t __a, int16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_cmtstv4hi (__a, __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vtst_s32 (int32x2_t __a, int32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_cmtstv2si (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vtst_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmtstdi (__a, __b);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vtst_u8 (uint8x8_t __a, uint8x8_t __b)
-{
- return (uint8x8_t) __builtin_aarch64_cmtstv8qi ((int8x8_t) __a,
- (int8x8_t) __b);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vtst_u16 (uint16x4_t __a, uint16x4_t __b)
-{
- return (uint16x4_t) __builtin_aarch64_cmtstv4hi ((int16x4_t) __a,
- (int16x4_t) __b);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vtst_u32 (uint32x2_t __a, uint32x2_t __b)
-{
- return (uint32x2_t) __builtin_aarch64_cmtstv2si ((int32x2_t) __a,
- (int32x2_t) __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vtst_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmtstdi ((int64x1_t) __a,
- (int64x1_t) __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vtstq_s8 (int8x16_t __a, int8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_cmtstv16qi (__a, __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vtstq_s16 (int16x8_t __a, int16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_cmtstv8hi (__a, __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vtstq_s32 (int32x4_t __a, int32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_cmtstv4si (__a, __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vtstq_s64 (int64x2_t __a, int64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_cmtstv2di (__a, __b);
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vtstq_u8 (uint8x16_t __a, uint8x16_t __b)
-{
- return (uint8x16_t) __builtin_aarch64_cmtstv16qi ((int8x16_t) __a,
- (int8x16_t) __b);
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vtstq_u16 (uint16x8_t __a, uint16x8_t __b)
-{
- return (uint16x8_t) __builtin_aarch64_cmtstv8hi ((int16x8_t) __a,
- (int16x8_t) __b);
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vtstq_u32 (uint32x4_t __a, uint32x4_t __b)
-{
- return (uint32x4_t) __builtin_aarch64_cmtstv4si ((int32x4_t) __a,
- (int32x4_t) __b);
-}
-
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
-vtstq_u64 (uint64x2_t __a, uint64x2_t __b)
-{
- return (uint64x2_t) __builtin_aarch64_cmtstv2di ((int64x2_t) __a,
- (int64x2_t) __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vtstd_s64 (int64x1_t __a, int64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmtstdi (__a, __b);
-}
-
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vtstd_u64 (uint64x1_t __a, uint64x1_t __b)
-{
- return (uint64x1_t) __builtin_aarch64_cmtstdi ((int64x1_t) __a,
- (int64x1_t) __b);
-}
-
-/* vuqadd */
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vuqadd_s8 (int8x8_t __a, uint8x8_t __b)
-{
- return (int8x8_t) __builtin_aarch64_suqaddv8qi (__a, (int8x8_t) __b);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vuqadd_s16 (int16x4_t __a, uint16x4_t __b)
-{
- return (int16x4_t) __builtin_aarch64_suqaddv4hi (__a, (int16x4_t) __b);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vuqadd_s32 (int32x2_t __a, uint32x2_t __b)
-{
- return (int32x2_t) __builtin_aarch64_suqaddv2si (__a, (int32x2_t) __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vuqadd_s64 (int64x1_t __a, uint64x1_t __b)
-{
- return (int64x1_t) __builtin_aarch64_suqadddi (__a, (int64x1_t) __b);
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vuqaddq_s8 (int8x16_t __a, uint8x16_t __b)
-{
- return (int8x16_t) __builtin_aarch64_suqaddv16qi (__a, (int8x16_t) __b);
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vuqaddq_s16 (int16x8_t __a, uint16x8_t __b)
-{
- return (int16x8_t) __builtin_aarch64_suqaddv8hi (__a, (int16x8_t) __b);
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vuqaddq_s32 (int32x4_t __a, uint32x4_t __b)
-{
- return (int32x4_t) __builtin_aarch64_suqaddv4si (__a, (int32x4_t) __b);
-}
-
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
-vuqaddq_s64 (int64x2_t __a, uint64x2_t __b)
-{
- return (int64x2_t) __builtin_aarch64_suqaddv2di (__a, (int64x2_t) __b);
-}
-
-__extension__ static __inline int8x1_t __attribute__ ((__always_inline__))
-vuqaddb_s8 (int8x1_t __a, uint8x1_t __b)
-{
- return (int8x1_t) __builtin_aarch64_suqaddqi (__a, (int8x1_t) __b);
-}
-
-__extension__ static __inline int16x1_t __attribute__ ((__always_inline__))
-vuqaddh_s16 (int16x1_t __a, uint16x1_t __b)
-{
- return (int16x1_t) __builtin_aarch64_suqaddhi (__a, (int16x1_t) __b);
-}
-
-__extension__ static __inline int32x1_t __attribute__ ((__always_inline__))
-vuqadds_s32 (int32x1_t __a, uint32x1_t __b)
-{
- return (int32x1_t) __builtin_aarch64_suqaddsi (__a, (int32x1_t) __b);
-}
-
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
-vuqaddd_s64 (int64x1_t __a, uint64x1_t __b)
-{
- return (int64x1_t) __builtin_aarch64_suqadddi (__a, (int64x1_t) __b);
-}
-
-#define __DEFINTERLEAVE(op, rettype, intype, funcsuffix, Q) \
- __extension__ static __inline rettype \
- __attribute__ ((__always_inline__)) \
- v ## op ## Q ## _ ## funcsuffix (intype a, intype b) \
- { \
- return (rettype) {v ## op ## 1 ## Q ## _ ## funcsuffix (a, b), \
- v ## op ## 2 ## Q ## _ ## funcsuffix (a, b)}; \
- }
-
-#define __INTERLEAVE_LIST(op) \
- __DEFINTERLEAVE (op, float32x2x2_t, float32x2_t, f32,) \
- __DEFINTERLEAVE (op, poly8x8x2_t, poly8x8_t, p8,) \
- __DEFINTERLEAVE (op, poly16x4x2_t, poly16x4_t, p16,) \
- __DEFINTERLEAVE (op, int8x8x2_t, int8x8_t, s8,) \
- __DEFINTERLEAVE (op, int16x4x2_t, int16x4_t, s16,) \
- __DEFINTERLEAVE (op, int32x2x2_t, int32x2_t, s32,) \
- __DEFINTERLEAVE (op, uint8x8x2_t, uint8x8_t, u8,) \
- __DEFINTERLEAVE (op, uint16x4x2_t, uint16x4_t, u16,) \
- __DEFINTERLEAVE (op, uint32x2x2_t, uint32x2_t, u32,) \
- __DEFINTERLEAVE (op, float32x4x2_t, float32x4_t, f32, q) \
- __DEFINTERLEAVE (op, poly8x16x2_t, poly8x16_t, p8, q) \
- __DEFINTERLEAVE (op, poly16x8x2_t, poly16x8_t, p16, q) \
- __DEFINTERLEAVE (op, int8x16x2_t, int8x16_t, s8, q) \
- __DEFINTERLEAVE (op, int16x8x2_t, int16x8_t, s16, q) \
- __DEFINTERLEAVE (op, int32x4x2_t, int32x4_t, s32, q) \
- __DEFINTERLEAVE (op, uint8x16x2_t, uint8x16_t, u8, q) \
- __DEFINTERLEAVE (op, uint16x8x2_t, uint16x8_t, u16, q) \
- __DEFINTERLEAVE (op, uint32x4x2_t, uint32x4_t, u32, q)
-
-/* vuzp */
-
-__INTERLEAVE_LIST (uzp)
-
-/* vzip */
-
-__INTERLEAVE_LIST (zip)
-
-#undef __INTERLEAVE_LIST
-#undef __DEFINTERLEAVE
-
-/* End of optimal implementations in approved order. */
-
-#endif
diff --git a/gcc-4.8.1/gcc/config/aarch64/atomics.md b/gcc-4.8.1/gcc/config/aarch64/atomics.md
deleted file mode 100644
index e576166c9..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/atomics.md
+++ /dev/null
@@ -1,382 +0,0 @@
-;; Machine description for AArch64 processor synchronization primitives.
-;; Copyright (C) 2009-2013 Free Software Foundation, Inc.
-;; Contributed by ARM Ltd.
-;;
-;; This file is part of GCC.
-;;
-;; GCC is free software; you can redistribute it and/or modify it
-;; under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 3, or (at your option)
-;; any later version.
-;;
-;; GCC is distributed in the hope that it will be useful, but
-;; WITHOUT ANY WARRANTY; without even the implied warranty of
-;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-;; General Public License for more details.
-;;
-;; You should have received a copy of the GNU General Public License
-;; along with GCC; see the file COPYING3. If not see
-;; <http://www.gnu.org/licenses/>.
-
-(define_c_enum "unspecv"
- [
- UNSPECV_LX ; Represent a load-exclusive.
- UNSPECV_SX ; Represent a store-exclusive.
- UNSPECV_LDA ; Represent an atomic load or load-acquire.
- UNSPECV_STL ; Represent an atomic store or store-release.
- UNSPECV_ATOMIC_CMPSW ; Represent an atomic compare swap.
- UNSPECV_ATOMIC_EXCHG ; Represent an atomic exchange.
- UNSPECV_ATOMIC_OP ; Represent an atomic operation.
-])
-
-(define_expand "atomic_compare_and_swap<mode>"
- [(match_operand:SI 0 "register_operand" "") ;; bool out
- (match_operand:ALLI 1 "register_operand" "") ;; val out
- (match_operand:ALLI 2 "aarch64_sync_memory_operand" "") ;; memory
- (match_operand:ALLI 3 "general_operand" "") ;; expected
- (match_operand:ALLI 4 "register_operand" "") ;; desired
- (match_operand:SI 5 "const_int_operand") ;; is_weak
- (match_operand:SI 6 "const_int_operand") ;; mod_s
- (match_operand:SI 7 "const_int_operand")] ;; mod_f
- ""
- {
- aarch64_expand_compare_and_swap (operands);
- DONE;
- }
-)
-
-(define_insn_and_split "atomic_compare_and_swap<mode>_1"
- [(set (reg:CC CC_REGNUM) ;; bool out
- (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
- (set (match_operand:SI 0 "register_operand" "=&r") ;; val out
- (zero_extend:SI
- (match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory
- (set (match_dup 1)
- (unspec_volatile:SHORT
- [(match_operand:SI 2 "aarch64_plus_operand" "rI") ;; expected
- (match_operand:SHORT 3 "register_operand" "r") ;; desired
- (match_operand:SI 4 "const_int_operand") ;; is_weak
- (match_operand:SI 5 "const_int_operand") ;; mod_s
- (match_operand:SI 6 "const_int_operand")] ;; mod_f
- UNSPECV_ATOMIC_CMPSW))
- (clobber (match_scratch:SI 7 "=&r"))]
- ""
- "#"
- "&& reload_completed"
- [(const_int 0)]
- {
- aarch64_split_compare_and_swap (operands);
- DONE;
- }
-)
-
-(define_insn_and_split "atomic_compare_and_swap<mode>_1"
- [(set (reg:CC CC_REGNUM) ;; bool out
- (unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
- (set (match_operand:GPI 0 "register_operand" "=&r") ;; val out
- (match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
- (set (match_dup 1)
- (unspec_volatile:GPI
- [(match_operand:GPI 2 "aarch64_plus_operand" "rI") ;; expect
- (match_operand:GPI 3 "register_operand" "r") ;; desired
- (match_operand:SI 4 "const_int_operand") ;; is_weak
- (match_operand:SI 5 "const_int_operand") ;; mod_s
- (match_operand:SI 6 "const_int_operand")] ;; mod_f
- UNSPECV_ATOMIC_CMPSW))
- (clobber (match_scratch:SI 7 "=&r"))]
- ""
- "#"
- "&& reload_completed"
- [(const_int 0)]
- {
- aarch64_split_compare_and_swap (operands);
- DONE;
- }
-)
-
-(define_insn_and_split "atomic_exchange<mode>"
- [(set (match_operand:ALLI 0 "register_operand" "=&r") ;; output
- (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
- (set (match_dup 1)
- (unspec_volatile:ALLI
- [(match_operand:ALLI 2 "register_operand" "r") ;; input
- (match_operand:SI 3 "const_int_operand" "")] ;; model
- UNSPECV_ATOMIC_EXCHG))
- (clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:SI 4 "=&r"))]
- ""
- "#"
- "&& reload_completed"
- [(const_int 0)]
- {
- aarch64_split_atomic_op (SET, operands[0], NULL, operands[1],
- operands[2], operands[3], operands[4]);
- DONE;
- }
-)
-
-(define_insn_and_split "atomic_<atomic_optab><mode>"
- [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
- (unspec_volatile:ALLI
- [(atomic_op:ALLI (match_dup 0)
- (match_operand:ALLI 1 "<atomic_op_operand>" "rn"))
- (match_operand:SI 2 "const_int_operand")] ;; model
- UNSPECV_ATOMIC_OP))
- (clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:ALLI 3 "=&r"))
- (clobber (match_scratch:SI 4 "=&r"))]
- ""
- "#"
- "&& reload_completed"
- [(const_int 0)]
- {
- aarch64_split_atomic_op (<CODE>, NULL, operands[3], operands[0],
- operands[1], operands[2], operands[4]);
- DONE;
- }
-)
-
-(define_insn_and_split "atomic_nand<mode>"
- [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q")
- (unspec_volatile:ALLI
- [(not:ALLI
- (and:ALLI (match_dup 0)
- (match_operand:ALLI 1 "aarch64_logical_operand" "rn")))
- (match_operand:SI 2 "const_int_operand")] ;; model
- UNSPECV_ATOMIC_OP))
- (clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:ALLI 3 "=&r"))
- (clobber (match_scratch:SI 4 "=&r"))]
- ""
- "#"
- "&& reload_completed"
- [(const_int 0)]
- {
- aarch64_split_atomic_op (NOT, NULL, operands[3], operands[0],
- operands[1], operands[2], operands[4]);
- DONE;
- }
-)
-
-(define_insn_and_split "atomic_fetch_<atomic_optab><mode>"
- [(set (match_operand:ALLI 0 "register_operand" "=&r")
- (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
- (set (match_dup 1)
- (unspec_volatile:ALLI
- [(atomic_op:ALLI (match_dup 1)
- (match_operand:ALLI 2 "<atomic_op_operand>" "rn"))
- (match_operand:SI 3 "const_int_operand")] ;; model
- UNSPECV_ATOMIC_OP))
- (clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:ALLI 4 "=&r"))
- (clobber (match_scratch:SI 5 "=&r"))]
- ""
- "#"
- "&& reload_completed"
- [(const_int 0)]
- {
- aarch64_split_atomic_op (<CODE>, operands[0], operands[4], operands[1],
- operands[2], operands[3], operands[5]);
- DONE;
- }
-)
-
-(define_insn_and_split "atomic_fetch_nand<mode>"
- [(set (match_operand:ALLI 0 "register_operand" "=&r")
- (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q"))
- (set (match_dup 1)
- (unspec_volatile:ALLI
- [(not:ALLI
- (and:ALLI (match_dup 1)
- (match_operand:ALLI 2 "aarch64_logical_operand" "rn")))
- (match_operand:SI 3 "const_int_operand")] ;; model
- UNSPECV_ATOMIC_OP))
- (clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:ALLI 4 "=&r"))
- (clobber (match_scratch:SI 5 "=&r"))]
- ""
- "#"
- "&& reload_completed"
- [(const_int 0)]
- {
- aarch64_split_atomic_op (NOT, operands[0], operands[4], operands[1],
- operands[2], operands[3], operands[5]);
- DONE;
- }
-)
-
-(define_insn_and_split "atomic_<atomic_optab>_fetch<mode>"
- [(set (match_operand:ALLI 0 "register_operand" "=&r")
- (atomic_op:ALLI
- (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
- (match_operand:ALLI 2 "<atomic_op_operand>" "rn")))
- (set (match_dup 1)
- (unspec_volatile:ALLI
- [(match_dup 1) (match_dup 2)
- (match_operand:SI 3 "const_int_operand")] ;; model
- UNSPECV_ATOMIC_OP))
- (clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:SI 4 "=&r"))]
- ""
- "#"
- "&& reload_completed"
- [(const_int 0)]
- {
- aarch64_split_atomic_op (<CODE>, NULL, operands[0], operands[1],
- operands[2], operands[3], operands[4]);
- DONE;
- }
-)
-
-(define_insn_and_split "atomic_nand_fetch<mode>"
- [(set (match_operand:ALLI 0 "register_operand" "=&r")
- (not:ALLI
- (and:ALLI
- (match_operand:ALLI 1 "aarch64_sync_memory_operand" "+Q")
- (match_operand:ALLI 2 "aarch64_logical_operand" "rn"))))
- (set (match_dup 1)
- (unspec_volatile:ALLI
- [(match_dup 1) (match_dup 2)
- (match_operand:SI 3 "const_int_operand")] ;; model
- UNSPECV_ATOMIC_OP))
- (clobber (reg:CC CC_REGNUM))
- (clobber (match_scratch:SI 4 "=&r"))]
- ""
- "#"
- "&& reload_completed"
- [(const_int 0)]
- {
- aarch64_split_atomic_op (NOT, NULL, operands[0], operands[1],
- operands[2], operands[3], operands[4]);
- DONE;
- }
-)
-
-(define_insn "atomic_load<mode>"
- [(set (match_operand:ALLI 0 "register_operand" "=r")
- (unspec_volatile:ALLI
- [(match_operand:ALLI 1 "aarch64_sync_memory_operand" "Q")
- (match_operand:SI 2 "const_int_operand")] ;; model
- UNSPECV_LDA))]
- ""
- {
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
- if (model == MEMMODEL_RELAXED
- || model == MEMMODEL_CONSUME
- || model == MEMMODEL_RELEASE)
- return "ldr<atomic_sfx>\t%<w>0, %1";
- else
- return "ldar<atomic_sfx>\t%<w>0, %1";
- }
-)
-
-(define_insn "atomic_store<mode>"
- [(set (match_operand:ALLI 0 "memory_operand" "=Q")
- (unspec_volatile:ALLI
- [(match_operand:ALLI 1 "general_operand" "rZ")
- (match_operand:SI 2 "const_int_operand")] ;; model
- UNSPECV_STL))]
- ""
- {
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
- if (model == MEMMODEL_RELAXED
- || model == MEMMODEL_CONSUME
- || model == MEMMODEL_ACQUIRE)
- return "str<atomic_sfx>\t%<w>1, %0";
- else
- return "stlr<atomic_sfx>\t%<w>1, %0";
- }
-)
-
-(define_insn "aarch64_load_exclusive<mode>"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (zero_extend:SI
- (unspec_volatile:SHORT
- [(match_operand:SHORT 1 "aarch64_sync_memory_operand" "Q")
- (match_operand:SI 2 "const_int_operand")]
- UNSPECV_LX)))]
- ""
- {
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
- if (model == MEMMODEL_RELAXED
- || model == MEMMODEL_CONSUME
- || model == MEMMODEL_RELEASE)
- return "ldxr<atomic_sfx>\t%w0, %1";
- else
- return "ldaxr<atomic_sfx>\t%w0, %1";
- }
-)
-
-(define_insn "aarch64_load_exclusive<mode>"
- [(set (match_operand:GPI 0 "register_operand" "=r")
- (unspec_volatile:GPI
- [(match_operand:GPI 1 "aarch64_sync_memory_operand" "Q")
- (match_operand:SI 2 "const_int_operand")]
- UNSPECV_LX))]
- ""
- {
- enum memmodel model = (enum memmodel) INTVAL (operands[2]);
- if (model == MEMMODEL_RELAXED
- || model == MEMMODEL_CONSUME
- || model == MEMMODEL_RELEASE)
- return "ldxr\t%<w>0, %1";
- else
- return "ldaxr\t%<w>0, %1";
- }
-)
-
-(define_insn "aarch64_store_exclusive<mode>"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (unspec_volatile:SI [(const_int 0)] UNSPECV_SX))
- (set (match_operand:ALLI 1 "aarch64_sync_memory_operand" "=Q")
- (unspec_volatile:ALLI
- [(match_operand:ALLI 2 "register_operand" "r")
- (match_operand:SI 3 "const_int_operand")]
- UNSPECV_SX))]
- ""
- {
- enum memmodel model = (enum memmodel) INTVAL (operands[3]);
- if (model == MEMMODEL_RELAXED
- || model == MEMMODEL_CONSUME
- || model == MEMMODEL_ACQUIRE)
- return "stxr<atomic_sfx>\t%w0, %<w>2, %1";
- else
- return "stlxr<atomic_sfx>\t%w0, %<w>2, %1";
- }
-)
-
-(define_expand "mem_thread_fence"
- [(match_operand:SI 0 "const_int_operand" "")]
- ""
- {
- enum memmodel model = (enum memmodel) INTVAL (operands[0]);
- if (model != MEMMODEL_RELAXED && model != MEMMODEL_CONSUME)
- emit_insn (gen_dmb (operands[0]));
- DONE;
- }
-)
-
-(define_expand "dmb"
- [(set (match_dup 1)
- (unspec:BLK [(match_dup 1) (match_operand:SI 0 "const_int_operand")]
- UNSPEC_MB))]
- ""
- {
- operands[1] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
- MEM_VOLATILE_P (operands[1]) = 1;
- }
-)
-
-(define_insn "*dmb"
- [(set (match_operand:BLK 0 "" "")
- (unspec:BLK [(match_dup 0) (match_operand:SI 1 "const_int_operand")]
- UNSPEC_MB))]
- ""
- {
- enum memmodel model = (enum memmodel) INTVAL (operands[1]);
- if (model == MEMMODEL_ACQUIRE)
- return "dmb\\tishld";
- else
- return "dmb\\tish";
- }
-)
diff --git a/gcc-4.8.1/gcc/config/aarch64/constraints.md b/gcc-4.8.1/gcc/config/aarch64/constraints.md
deleted file mode 100644
index 917b93922..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/constraints.md
+++ /dev/null
@@ -1,178 +0,0 @@
-;; Machine description for AArch64 architecture.
-;; Copyright (C) 2009-2013 Free Software Foundation, Inc.
-;; Contributed by ARM Ltd.
-;;
-;; This file is part of GCC.
-;;
-;; GCC is free software; you can redistribute it and/or modify it
-;; under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 3, or (at your option)
-;; any later version.
-;;
-;; GCC is distributed in the hope that it will be useful, but
-;; WITHOUT ANY WARRANTY; without even the implied warranty of
-;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-;; General Public License for more details.
-;;
-;; You should have received a copy of the GNU General Public License
-;; along with GCC; see the file COPYING3. If not see
-;; <http://www.gnu.org/licenses/>.
-
-(define_register_constraint "k" "STACK_REG"
- "@internal The stack register.")
-
-(define_register_constraint "w" "FP_REGS"
- "Floating point and SIMD vector registers.")
-
-(define_register_constraint "x" "FP_LO_REGS"
- "Floating point and SIMD vector registers V0 - V15.")
-
-(define_constraint "I"
- "A constant that can be used with an ADD operation."
- (and (match_code "const_int")
- (match_test "aarch64_uimm12_shift (ival)")))
-
-(define_constraint "J"
- "A constant that can be used with a SUB operation (once negated)."
- (and (match_code "const_int")
- (match_test "aarch64_uimm12_shift (-ival)")))
-
-;; We can't use the mode of a CONST_INT to determine the context in
-;; which it is being used, so we must have a separate constraint for
-;; each context.
-
-(define_constraint "K"
- "A constant that can be used with a 32-bit logical operation."
- (and (match_code "const_int")
- (match_test "aarch64_bitmask_imm (ival, SImode)")))
-
-(define_constraint "L"
- "A constant that can be used with a 64-bit logical operation."
- (and (match_code "const_int")
- (match_test "aarch64_bitmask_imm (ival, DImode)")))
-
-(define_constraint "M"
- "A constant that can be used with a 32-bit MOV immediate operation."
- (and (match_code "const_int")
- (match_test "aarch64_move_imm (ival, SImode)")))
-
-(define_constraint "N"
- "A constant that can be used with a 64-bit MOV immediate operation."
- (and (match_code "const_int")
- (match_test "aarch64_move_imm (ival, DImode)")))
-
-(define_constraint "S"
- "A constraint that matches an absolute symbolic address."
- (and (match_code "const,symbol_ref,label_ref")
- (match_test "aarch64_symbolic_address_p (op)")))
-
-(define_constraint "Y"
- "Floating point constant zero."
- (and (match_code "const_double")
- (match_test "aarch64_float_const_zero_rtx_p (op)")))
-
-(define_constraint "Z"
- "Integer constant zero."
- (match_test "op == const0_rtx"))
-
-(define_constraint "Usa"
- "A constraint that matches an absolute symbolic address."
- (and (match_code "const,symbol_ref")
- (match_test "aarch64_symbolic_address_p (op)")))
-
-(define_constraint "Ush"
- "A constraint that matches an absolute symbolic address high part."
- (and (match_code "high")
- (match_test "aarch64_valid_symref (XEXP (op, 0), GET_MODE (XEXP (op, 0)))")))
-
-(define_constraint "Uss"
- "@internal
- A constraint that matches an immediate shift constant in SImode."
- (and (match_code "const_int")
- (match_test "(unsigned HOST_WIDE_INT) ival < 32")))
-
-(define_constraint "Usd"
- "@internal
- A constraint that matches an immediate shift constant in DImode."
- (and (match_code "const_int")
- (match_test "(unsigned HOST_WIDE_INT) ival < 64")))
-
-(define_constraint "UsM"
- "@internal
- A constraint that matches the immediate constant -1."
- (match_test "op == constm1_rtx"))
-
-(define_constraint "Ui1"
- "@internal
- A constraint that matches the immediate constant +1."
- (match_test "op == const1_rtx"))
-
-(define_constraint "Ui3"
- "@internal
- A constraint that matches the integers 0...4."
- (and (match_code "const_int")
- (match_test "(unsigned HOST_WIDE_INT) ival <= 4")))
-
-(define_constraint "Up3"
- "@internal
- A constraint that matches the integers 2^(0...4)."
- (and (match_code "const_int")
- (match_test "(unsigned) exact_log2 (ival) <= 4")))
-
-(define_memory_constraint "Q"
- "A memory address which uses a single base register with no offset."
- (and (match_code "mem")
- (match_test "REG_P (XEXP (op, 0))")))
-
-(define_memory_constraint "Ump"
- "@internal
- A memory address suitable for a load/store pair operation."
- (and (match_code "mem")
- (match_test "aarch64_legitimate_address_p (GET_MODE (op), XEXP (op, 0),
- PARALLEL, 1)")))
-
-(define_memory_constraint "Utv"
- "@internal
- An address valid for loading/storing opaque structure
- types wider than TImode."
- (and (match_code "mem")
- (match_test "aarch64_simd_mem_operand_p (op)")))
-
-(define_constraint "Ufc"
- "A floating point constant which can be used with an\
- FMOV immediate operation."
- (and (match_code "const_double")
- (match_test "aarch64_float_const_representable_p (op)")))
-
-(define_constraint "Dn"
- "@internal
- A constraint that matches vector of immediates."
- (and (match_code "const_vector")
- (match_test "aarch64_simd_immediate_valid_for_move (op, GET_MODE (op),
- NULL, NULL, NULL,
- NULL, NULL) != 0")))
-
-(define_constraint "Dl"
- "@internal
- A constraint that matches vector of immediates for left shifts."
- (and (match_code "const_vector")
- (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
- true)")))
-
-(define_constraint "Dr"
- "@internal
- A constraint that matches vector of immediates for right shifts."
- (and (match_code "const_vector")
- (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
- false)")))
-(define_constraint "Dz"
- "@internal
- A constraint that matches vector of immediate zero."
- (and (match_code "const_vector")
- (match_test "aarch64_simd_imm_zero_p (op, GET_MODE (op))")))
-
-(define_constraint "Dd"
- "@internal
- A constraint that matches an immediate operand valid for AdvSIMD scalar."
- (and (match_code "const_int")
- (match_test "aarch64_simd_imm_scalar_p (op, GET_MODE (op))")))
diff --git a/gcc-4.8.1/gcc/config/aarch64/gentune.sh b/gcc-4.8.1/gcc/config/aarch64/gentune.sh
deleted file mode 100644
index 1d414bc8b..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/gentune.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh
-#
-# Copyright (C) 2011-2013 Free Software Foundation, Inc.
-# Contributed by ARM Ltd.
-#
-# This file is part of GCC.
-#
-# GCC is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GCC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GCC; see the file COPYING3. If not see
-# <http://www.gnu.org/licenses/>.
-
-# Generate aarch64-tune.md, a file containing the tune attribute from the list of
-# CPUs in aarch64-cores.def
-
-echo ";; -*- buffer-read-only: t -*-"
-echo ";; Generated automatically by gentune.sh from aarch64-cores.def"
-
-allcores=`awk -F'[(, ]+' '/^AARCH64_CORE/ { cores = cores$3"," } END { print cores } ' $1`
-
-echo "(define_attr \"tune\""
-echo " \"$allcores\"" | sed -e 's/,"$/"/'
-echo " (const (symbol_ref \"((enum attr_tune) aarch64_tune)\")))"
diff --git a/gcc-4.8.1/gcc/config/aarch64/iterators.md b/gcc-4.8.1/gcc/config/aarch64/iterators.md
deleted file mode 100644
index ce81ac5ce..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/iterators.md
+++ /dev/null
@@ -1,802 +0,0 @@
-;; Machine description for AArch64 architecture.
-;; Copyright (C) 2009-2013 Free Software Foundation, Inc.
-;; Contributed by ARM Ltd.
-;;
-;; This file is part of GCC.
-;;
-;; GCC is free software; you can redistribute it and/or modify it
-;; under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 3, or (at your option)
-;; any later version.
-;;
-;; GCC is distributed in the hope that it will be useful, but
-;; WITHOUT ANY WARRANTY; without even the implied warranty of
-;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-;; General Public License for more details.
-;;
-;; You should have received a copy of the GNU General Public License
-;; along with GCC; see the file COPYING3. If not see
-;; <http://www.gnu.org/licenses/>.
-
-;; -------------------------------------------------------------------
-;; Mode Iterators
-;; -------------------------------------------------------------------
-
-
-;; Iterator for General Purpose Integer registers (32- and 64-bit modes)
-(define_mode_iterator GPI [SI DI])
-
-;; Iterator for QI and HI modes
-(define_mode_iterator SHORT [QI HI])
-
-;; Iterator for all integer modes (up to 64-bit)
-(define_mode_iterator ALLI [QI HI SI DI])
-
-;; Iterator scalar modes (up to 64-bit)
-(define_mode_iterator SDQ_I [QI HI SI DI])
-
-;; Iterator for all integer modes that can be extended (up to 64-bit)
-(define_mode_iterator ALLX [QI HI SI])
-
-;; Iterator for General Purpose Floating-point registers (32- and 64-bit modes)
-(define_mode_iterator GPF [SF DF])
-
-;; Integer vector modes.
-(define_mode_iterator VDQ [V8QI V16QI V4HI V8HI V2SI V4SI V2DI])
-
-;; Integer vector modes.
-(define_mode_iterator VDQ_I [V8QI V16QI V4HI V8HI V2SI V4SI V2DI])
-
-;; vector and scalar, 64 & 128-bit container, all integer modes
-(define_mode_iterator VSDQ_I [V8QI V16QI V4HI V8HI V2SI V4SI V2DI QI HI SI DI])
-
-;; vector and scalar, 64 & 128-bit container: all vector integer modes;
-;; 64-bit scalar integer mode
-(define_mode_iterator VSDQ_I_DI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI DI])
-
-;; Double vector modes.
-(define_mode_iterator VD [V8QI V4HI V2SI V2SF])
-
-;; vector, 64-bit container, all integer modes
-(define_mode_iterator VD_BHSI [V8QI V4HI V2SI])
-
-;; 128 and 64-bit container; 8, 16, 32-bit vector integer modes
-(define_mode_iterator VDQ_BHSI [V8QI V16QI V4HI V8HI V2SI V4SI])
-
-;; Quad vector modes.
-(define_mode_iterator VQ [V16QI V8HI V4SI V2DI V4SF V2DF])
-
-;; All vector modes, except double.
-(define_mode_iterator VQ_S [V8QI V16QI V4HI V8HI V2SI V4SI])
-
-;; Vector and scalar, 64 & 128-bit container: all vector integer mode;
-;; 8, 16, 32-bit scalar integer modes
-(define_mode_iterator VSDQ_I_BHSI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI QI HI SI])
-
-;; Vector modes for moves.
-(define_mode_iterator VDQM [V8QI V16QI V4HI V8HI V2SI V4SI])
-
-;; This mode iterator allows :PTR to be used for patterns that operate on
-;; pointer-sized quantities. Exactly one of the two alternatives will match.
-(define_mode_iterator PTR [(SI "Pmode == SImode") (DI "Pmode == DImode")])
-
-;; Vector Float modes.
-(define_mode_iterator VDQF [V2SF V4SF V2DF])
-
-;; Vector Float modes with 2 elements.
-(define_mode_iterator V2F [V2SF V2DF])
-
-;; All modes.
-(define_mode_iterator VALL [V8QI V16QI V4HI V8HI V2SI V4SI V2DI V2SF V4SF V2DF])
-
-;; All vector modes and DI.
-(define_mode_iterator VALLDI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI V2SF V4SF V2DF DI])
-
-;; Vector modes for Integer reduction across lanes.
-(define_mode_iterator VDQV [V8QI V16QI V4HI V8HI V4SI])
-
-;; All double integer narrow-able modes.
-(define_mode_iterator VDN [V4HI V2SI DI])
-
-;; All quad integer narrow-able modes.
-(define_mode_iterator VQN [V8HI V4SI V2DI])
-
-;; All double integer widen-able modes.
-(define_mode_iterator VDW [V8QI V4HI V2SI])
-
-;; Vector and scalar 128-bit container: narrowable 16, 32, 64-bit integer modes
-(define_mode_iterator VSQN_HSDI [V8HI V4SI V2DI HI SI DI])
-
-;; All quad integer widen-able modes.
-(define_mode_iterator VQW [V16QI V8HI V4SI])
-
-;; Double vector modes for combines.
-(define_mode_iterator VDC [V8QI V4HI V2SI V2SF DI DF])
-
-;; Double vector modes for combines.
-(define_mode_iterator VDIC [V8QI V4HI V2SI])
-
-;; Double vector modes.
-(define_mode_iterator VD_RE [V8QI V4HI V2SI DI DF V2SF])
-
-;; Vector modes except double int.
-(define_mode_iterator VDQIF [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF V2DF])
-
-;; Vector modes for H and S types.
-(define_mode_iterator VDQHS [V4HI V8HI V2SI V4SI])
-
-;; Vector and scalar integer modes for H and S
-(define_mode_iterator VSDQ_HSI [V4HI V8HI V2SI V4SI HI SI])
-
-;; Vector and scalar 64-bit container: 16, 32-bit integer modes
-(define_mode_iterator VSD_HSI [V4HI V2SI HI SI])
-
-;; Vector 64-bit container: 16, 32-bit integer modes
-(define_mode_iterator VD_HSI [V4HI V2SI])
-
-;; Scalar 64-bit container: 16, 32-bit integer modes
-(define_mode_iterator SD_HSI [HI SI])
-
-;; Vector 64-bit container: 16, 32-bit integer modes
-(define_mode_iterator VQ_HSI [V8HI V4SI])
-
-;; All byte modes.
-(define_mode_iterator VB [V8QI V16QI])
-
-(define_mode_iterator TX [TI TF])
-
-;; Opaque structure modes.
-(define_mode_iterator VSTRUCT [OI CI XI])
-
-;; Double scalar modes
-(define_mode_iterator DX [DI DF])
-
-;; ------------------------------------------------------------------
-;; Unspec enumerations for Advance SIMD. These could well go into
-;; aarch64.md but for their use in int_iterators here.
-;; ------------------------------------------------------------------
-
-(define_c_enum "unspec"
- [
- UNSPEC_ASHIFT_SIGNED ; Used in aarch-simd.md.
- UNSPEC_ASHIFT_UNSIGNED ; Used in aarch64-simd.md.
- UNSPEC_FMAXV ; Used in aarch64-simd.md.
- UNSPEC_FMINV ; Used in aarch64-simd.md.
- UNSPEC_FADDV ; Used in aarch64-simd.md.
- UNSPEC_ADDV ; Used in aarch64-simd.md.
- UNSPEC_SMAXV ; Used in aarch64-simd.md.
- UNSPEC_SMINV ; Used in aarch64-simd.md.
- UNSPEC_UMAXV ; Used in aarch64-simd.md.
- UNSPEC_UMINV ; Used in aarch64-simd.md.
- UNSPEC_SHADD ; Used in aarch64-simd.md.
- UNSPEC_UHADD ; Used in aarch64-simd.md.
- UNSPEC_SRHADD ; Used in aarch64-simd.md.
- UNSPEC_URHADD ; Used in aarch64-simd.md.
- UNSPEC_SHSUB ; Used in aarch64-simd.md.
- UNSPEC_UHSUB ; Used in aarch64-simd.md.
- UNSPEC_SRHSUB ; Used in aarch64-simd.md.
- UNSPEC_URHSUB ; Used in aarch64-simd.md.
- UNSPEC_ADDHN ; Used in aarch64-simd.md.
- UNSPEC_RADDHN ; Used in aarch64-simd.md.
- UNSPEC_SUBHN ; Used in aarch64-simd.md.
- UNSPEC_RSUBHN ; Used in aarch64-simd.md.
- UNSPEC_ADDHN2 ; Used in aarch64-simd.md.
- UNSPEC_RADDHN2 ; Used in aarch64-simd.md.
- UNSPEC_SUBHN2 ; Used in aarch64-simd.md.
- UNSPEC_RSUBHN2 ; Used in aarch64-simd.md.
- UNSPEC_SQDMULH ; Used in aarch64-simd.md.
- UNSPEC_SQRDMULH ; Used in aarch64-simd.md.
- UNSPEC_PMUL ; Used in aarch64-simd.md.
- UNSPEC_USQADD ; Used in aarch64-simd.md.
- UNSPEC_SUQADD ; Used in aarch64-simd.md.
- UNSPEC_SQXTUN ; Used in aarch64-simd.md.
- UNSPEC_SQXTN ; Used in aarch64-simd.md.
- UNSPEC_UQXTN ; Used in aarch64-simd.md.
- UNSPEC_SSRA ; Used in aarch64-simd.md.
- UNSPEC_USRA ; Used in aarch64-simd.md.
- UNSPEC_SRSRA ; Used in aarch64-simd.md.
- UNSPEC_URSRA ; Used in aarch64-simd.md.
- UNSPEC_SRSHR ; Used in aarch64-simd.md.
- UNSPEC_URSHR ; Used in aarch64-simd.md.
- UNSPEC_SQSHLU ; Used in aarch64-simd.md.
- UNSPEC_SQSHL ; Used in aarch64-simd.md.
- UNSPEC_UQSHL ; Used in aarch64-simd.md.
- UNSPEC_SQSHRUN ; Used in aarch64-simd.md.
- UNSPEC_SQRSHRUN ; Used in aarch64-simd.md.
- UNSPEC_SQSHRN ; Used in aarch64-simd.md.
- UNSPEC_UQSHRN ; Used in aarch64-simd.md.
- UNSPEC_SQRSHRN ; Used in aarch64-simd.md.
- UNSPEC_UQRSHRN ; Used in aarch64-simd.md.
- UNSPEC_SSHL ; Used in aarch64-simd.md.
- UNSPEC_USHL ; Used in aarch64-simd.md.
- UNSPEC_SRSHL ; Used in aarch64-simd.md.
- UNSPEC_URSHL ; Used in aarch64-simd.md.
- UNSPEC_SQRSHL ; Used in aarch64-simd.md.
- UNSPEC_UQRSHL ; Used in aarch64-simd.md.
- UNSPEC_CMEQ ; Used in aarch64-simd.md.
- UNSPEC_CMLE ; Used in aarch64-simd.md.
- UNSPEC_CMLT ; Used in aarch64-simd.md.
- UNSPEC_CMGE ; Used in aarch64-simd.md.
- UNSPEC_CMGT ; Used in aarch64-simd.md.
- UNSPEC_CMHS ; Used in aarch64-simd.md.
- UNSPEC_CMHI ; Used in aarch64-simd.md.
- UNSPEC_SSLI ; Used in aarch64-simd.md.
- UNSPEC_USLI ; Used in aarch64-simd.md.
- UNSPEC_SSRI ; Used in aarch64-simd.md.
- UNSPEC_USRI ; Used in aarch64-simd.md.
- UNSPEC_SSHLL ; Used in aarch64-simd.md.
- UNSPEC_USHLL ; Used in aarch64-simd.md.
- UNSPEC_ADDP ; Used in aarch64-simd.md.
- UNSPEC_CMTST ; Used in aarch64-simd.md.
- UNSPEC_FMAX ; Used in aarch64-simd.md.
- UNSPEC_FMIN ; Used in aarch64-simd.md.
- UNSPEC_BSL ; Used in aarch64-simd.md.
- UNSPEC_TBL ; Used in vector permute patterns.
- UNSPEC_CONCAT ; Used in vector permute patterns.
- UNSPEC_ZIP1 ; Used in vector permute patterns.
- UNSPEC_ZIP2 ; Used in vector permute patterns.
- UNSPEC_UZP1 ; Used in vector permute patterns.
- UNSPEC_UZP2 ; Used in vector permute patterns.
- UNSPEC_TRN1 ; Used in vector permute patterns.
- UNSPEC_TRN2 ; Used in vector permute patterns.
-])
-
-;; -------------------------------------------------------------------
-;; Mode attributes
-;; -------------------------------------------------------------------
-
-;; In GPI templates, a string like "%<w>0" will expand to "%w0" in the
-;; 32-bit version and "%x0" in the 64-bit version.
-(define_mode_attr w [(QI "w") (HI "w") (SI "w") (DI "x") (SF "s") (DF "d")])
-
-;; For scalar usage of vector/FP registers
-(define_mode_attr v [(QI "b") (HI "h") (SI "s") (DI "d")
- (V8QI "") (V16QI "")
- (V4HI "") (V8HI "")
- (V2SI "") (V4SI "")
- (V2DI "") (V2SF "")
- (V4SF "") (V2DF "")])
-
-;; For scalar usage of vector/FP registers, narrowing
-(define_mode_attr vn2 [(QI "") (HI "b") (SI "h") (DI "s")
- (V8QI "") (V16QI "")
- (V4HI "") (V8HI "")
- (V2SI "") (V4SI "")
- (V2DI "") (V2SF "")
- (V4SF "") (V2DF "")])
-
-;; For scalar usage of vector/FP registers, widening
-(define_mode_attr vw2 [(DI "") (QI "h") (HI "s") (SI "d")
- (V8QI "") (V16QI "")
- (V4HI "") (V8HI "")
- (V2SI "") (V4SI "")
- (V2DI "") (V2SF "")
- (V4SF "") (V2DF "")])
-
-;; Map a floating point mode to the appropriate register name prefix
-(define_mode_attr s [(SF "s") (DF "d")])
-
-;; Give the length suffix letter for a sign- or zero-extension.
-(define_mode_attr size [(QI "b") (HI "h") (SI "w")])
-
-;; Give the number of bits in the mode
-(define_mode_attr sizen [(QI "8") (HI "16") (SI "32") (DI "64")])
-
-;; Give the ordinal of the MSB in the mode
-(define_mode_attr sizem1 [(QI "#7") (HI "#15") (SI "#31") (DI "#63")])
-
-;; Attribute to describe constants acceptable in logical operations
-(define_mode_attr lconst [(SI "K") (DI "L")])
-
-;; Map a mode to a specific constraint character.
-(define_mode_attr cmode [(QI "q") (HI "h") (SI "s") (DI "d")])
-
-(define_mode_attr Vtype [(V8QI "8b") (V16QI "16b")
- (V4HI "4h") (V8HI "8h")
- (V2SI "2s") (V4SI "4s")
- (DI "1d") (DF "1d")
- (V2DI "2d") (V2SF "2s")
- (V4SF "4s") (V2DF "2d")])
-
-(define_mode_attr Vmtype [(V8QI ".8b") (V16QI ".16b")
- (V4HI ".4h") (V8HI ".8h")
- (V2SI ".2s") (V4SI ".4s")
- (V2DI ".2d") (V2SF ".2s")
- (V4SF ".4s") (V2DF ".2d")
- (DI "") (SI "")
- (HI "") (QI "")
- (TI "")])
-
-;; Register suffix narrowed modes for VQN.
-(define_mode_attr Vmntype [(V8HI ".8b") (V4SI ".4h")
- (V2DI ".2s")
- (DI "") (SI "")
- (HI "")])
-
-;; Mode-to-individual element type mapping.
-(define_mode_attr Vetype [(V8QI "b") (V16QI "b")
- (V4HI "h") (V8HI "h")
- (V2SI "s") (V4SI "s")
- (V2DI "d") (V2SF "s")
- (V4SF "s") (V2DF "d")
- (QI "b") (HI "h")
- (SI "s") (DI "d")])
-
-;; Mode-to-bitwise operation type mapping.
-(define_mode_attr Vbtype [(V8QI "8b") (V16QI "16b")
- (V4HI "8b") (V8HI "16b")
- (V2SI "8b") (V4SI "16b")
- (V2DI "16b") (V2SF "8b")
- (V4SF "16b") (V2DF "16b")])
-
-;; Define element mode for each vector mode.
-(define_mode_attr VEL [(V8QI "QI") (V16QI "QI")
- (V4HI "HI") (V8HI "HI")
- (V2SI "SI") (V4SI "SI")
- (DI "DI") (V2DI "DI")
- (V2SF "SF") (V4SF "SF")
- (V2DF "DF")
- (SI "SI") (HI "HI")
- (QI "QI")])
-
-;; Define container mode for lane selection.
-(define_mode_attr VCOND [(V4HI "V4HI") (V8HI "V4HI")
- (V2SI "V2SI") (V4SI "V2SI")
- (DI "DI") (V2DI "DI")
- (V2SF "V2SF") (V4SF "V2SF")
- (V2DF "DF")])
-
-;; Define container mode for lane selection.
-(define_mode_attr VCONQ [(V8QI "V16QI") (V16QI "V16QI")
- (V4HI "V8HI") (V8HI "V8HI")
- (V2SI "V4SI") (V4SI "V4SI")
- (DI "V2DI") (V2DI "V2DI")
- (V2SF "V2SF") (V4SF "V4SF")
- (V2DF "V2DF") (SI "V4SI")
- (HI "V8HI") (QI "V16QI")])
-
-;; Define container mode for lane selection.
-(define_mode_attr VCON [(V8QI "V16QI") (V16QI "V16QI")
- (V4HI "V8HI") (V8HI "V8HI")
- (V2SI "V4SI") (V4SI "V4SI")
- (DI "V2DI") (V2DI "V2DI")
- (V2SF "V2SF") (V4SF "V4SF")
- (V2DF "V2DF") (SI "V4SI")
- (HI "V8HI") (QI "V16QI")])
-
-;; Half modes of all vector modes.
-(define_mode_attr VHALF [(V8QI "V4QI") (V16QI "V8QI")
- (V4HI "V2HI") (V8HI "V4HI")
- (V2SI "SI") (V4SI "V2SI")
- (V2DI "DI") (V2SF "SF")
- (V4SF "V2SF") (V2DF "DF")])
-
-;; Double modes of vector modes.
-(define_mode_attr VDBL [(V8QI "V16QI") (V4HI "V8HI")
- (V2SI "V4SI") (V2SF "V4SF")
- (SI "V2SI") (DI "V2DI")
- (DF "V2DF")])
-
-;; Double modes of vector modes (lower case).
-(define_mode_attr Vdbl [(V8QI "v16qi") (V4HI "v8hi")
- (V2SI "v4si") (V2SF "v4sf")
- (SI "v2si") (DI "v2di")])
-
-;; Narrowed modes for VDN.
-(define_mode_attr VNARROWD [(V4HI "V8QI") (V2SI "V4HI")
- (DI "V2SI")])
-
-;; Narrowed double-modes for VQN (Used for XTN).
-(define_mode_attr VNARROWQ [(V8HI "V8QI") (V4SI "V4HI")
- (V2DI "V2SI")
- (DI "SI") (SI "HI")
- (HI "QI")])
-
-;; Narrowed quad-modes for VQN (Used for XTN2).
-(define_mode_attr VNARROWQ2 [(V8HI "V16QI") (V4SI "V8HI")
- (V2DI "V4SI")])
-
-;; Register suffix narrowed modes for VQN.
-(define_mode_attr Vntype [(V8HI "8b") (V4SI "4h")
- (V2DI "2s")])
-
-;; Register suffix narrowed modes for VQN.
-(define_mode_attr V2ntype [(V8HI "16b") (V4SI "8h")
- (V2DI "4s")])
-
-;; Widened modes of vector modes.
-(define_mode_attr VWIDE [(V8QI "V8HI") (V4HI "V4SI")
- (V2SI "V2DI") (V16QI "V8HI")
- (V8HI "V4SI") (V4SI "V2DI")
- (HI "SI") (SI "DI")]
-
-)
-
-;; Widened mode register suffixes for VDW/VQW.
-(define_mode_attr Vwtype [(V8QI "8h") (V4HI "4s")
- (V2SI "2d") (V16QI "8h")
- (V8HI "4s") (V4SI "2d")])
-
-;; Widened mode register suffixes for VDW/VQW.
-(define_mode_attr Vmwtype [(V8QI ".8h") (V4HI ".4s")
- (V2SI ".2d") (V16QI ".8h")
- (V8HI ".4s") (V4SI ".2d")
- (SI "") (HI "")])
-
-;; Lower part register suffixes for VQW.
-(define_mode_attr Vhalftype [(V16QI "8b") (V8HI "4h")
- (V4SI "2s")])
-
-;; Define corresponding core/FP element mode for each vector mode.
-(define_mode_attr vw [(V8QI "w") (V16QI "w")
- (V4HI "w") (V8HI "w")
- (V2SI "w") (V4SI "w")
- (DI "x") (V2DI "x")
- (V2SF "s") (V4SF "s")
- (V2DF "d")])
-
-;; Double vector types for ALLX.
-(define_mode_attr Vallxd [(QI "8b") (HI "4h") (SI "2s")])
-
-;; Mode of result of comparison operations.
-(define_mode_attr V_cmp_result [(V8QI "V8QI") (V16QI "V16QI")
- (V4HI "V4HI") (V8HI "V8HI")
- (V2SI "V2SI") (V4SI "V4SI")
- (DI "DI") (V2DI "V2DI")
- (V2SF "V2SI") (V4SF "V4SI")
- (V2DF "V2DI")])
-
-;; Lower case mode of results of comparison operations.
-(define_mode_attr v_cmp_result [(V8QI "v8qi") (V16QI "v16qi")
- (V4HI "v4hi") (V8HI "v8hi")
- (V2SI "v2si") (V4SI "v4si")
- (DI "di") (V2DI "v2di")
- (V2SF "v2si") (V4SF "v4si")
- (V2DF "v2di")])
-
-;; Vm for lane instructions is restricted to FP_LO_REGS.
-(define_mode_attr vwx [(V4HI "x") (V8HI "x") (HI "x")
- (V2SI "w") (V4SI "w") (SI "w")])
-
-(define_mode_attr Vendreg [(OI "T") (CI "U") (XI "V")])
-
-(define_mode_attr nregs [(OI "2") (CI "3") (XI "4")])
-
-(define_mode_attr VRL2 [(V8QI "V32QI") (V4HI "V16HI")
- (V2SI "V8SI") (V2SF "V8SF")
- (DI "V4DI") (DF "V4DF")
- (V16QI "V32QI") (V8HI "V16HI")
- (V4SI "V8SI") (V4SF "V8SF")
- (V2DI "V4DI") (V2DF "V4DF")])
-
-(define_mode_attr VRL3 [(V8QI "V48QI") (V4HI "V24HI")
- (V2SI "V12SI") (V2SF "V12SF")
- (DI "V6DI") (DF "V6DF")
- (V16QI "V48QI") (V8HI "V24HI")
- (V4SI "V12SI") (V4SF "V12SF")
- (V2DI "V6DI") (V2DF "V6DF")])
-
-(define_mode_attr VRL4 [(V8QI "V64QI") (V4HI "V32HI")
- (V2SI "V16SI") (V2SF "V16SF")
- (DI "V8DI") (DF "V8DF")
- (V16QI "V64QI") (V8HI "V32HI")
- (V4SI "V16SI") (V4SF "V16SF")
- (V2DI "V8DI") (V2DF "V8DF")])
-
-(define_mode_attr VSTRUCT_DREG [(OI "TI") (CI "EI") (XI "OI")])
-
-;; Mode for atomic operation suffixes
-(define_mode_attr atomic_sfx
- [(QI "b") (HI "h") (SI "") (DI "")])
-
-(define_mode_attr fcvt_target [(V2DF "v2di") (V4SF "v4si") (V2SF "v2si")])
-(define_mode_attr FCVT_TARGET [(V2DF "V2DI") (V4SF "V4SI") (V2SF "V2SI")])
-
-;; -------------------------------------------------------------------
-;; Code Iterators
-;; -------------------------------------------------------------------
-
-;; This code iterator allows the various shifts supported on the core
-(define_code_iterator SHIFT [ashift ashiftrt lshiftrt rotatert])
-
-;; This code iterator allows the shifts supported in arithmetic instructions
-(define_code_iterator ASHIFT [ashift ashiftrt lshiftrt])
-
-;; Code iterator for logical operations
-(define_code_iterator LOGICAL [and ior xor])
-
-;; Code iterator for sign/zero extension
-(define_code_iterator ANY_EXTEND [sign_extend zero_extend])
-
-;; All division operations (signed/unsigned)
-(define_code_iterator ANY_DIV [div udiv])
-
-;; Code iterator for sign/zero extraction
-(define_code_iterator ANY_EXTRACT [sign_extract zero_extract])
-
-;; Code iterator for equality comparisons
-(define_code_iterator EQL [eq ne])
-
-;; Code iterator for less-than and greater/equal-to
-(define_code_iterator LTGE [lt ge])
-
-;; Iterator for __sync_<op> operations that where the operation can be
-;; represented directly RTL. This is all of the sync operations bar
-;; nand.
-(define_code_iterator atomic_op [plus minus ior xor and])
-
-;; Iterator for integer conversions
-(define_code_iterator FIXUORS [fix unsigned_fix])
-
-;; Code iterator for variants of vector max and min.
-(define_code_iterator MAXMIN [smax smin umax umin])
-
-;; Code iterator for variants of vector max and min.
-(define_code_iterator ADDSUB [plus minus])
-
-;; Code iterator for variants of vector saturating binary ops.
-(define_code_iterator BINQOPS [ss_plus us_plus ss_minus us_minus])
-
-;; Code iterator for variants of vector saturating unary ops.
-(define_code_iterator UNQOPS [ss_neg ss_abs])
-
-;; Code iterator for signed variants of vector saturating binary ops.
-(define_code_iterator SBINQOPS [ss_plus ss_minus])
-
-;; -------------------------------------------------------------------
-;; Code Attributes
-;; -------------------------------------------------------------------
-;; Map rtl objects to optab names
-(define_code_attr optab [(ashift "ashl")
- (ashiftrt "ashr")
- (lshiftrt "lshr")
- (rotatert "rotr")
- (sign_extend "extend")
- (zero_extend "zero_extend")
- (sign_extract "extv")
- (zero_extract "extzv")
- (and "and")
- (ior "ior")
- (xor "xor")
- (not "one_cmpl")
- (neg "neg")
- (plus "add")
- (minus "sub")
- (ss_plus "qadd")
- (us_plus "qadd")
- (ss_minus "qsub")
- (us_minus "qsub")
- (ss_neg "qneg")
- (ss_abs "qabs")
- (eq "eq")
- (ne "ne")
- (lt "lt")
- (ge "ge")])
-
-;; Optab prefix for sign/zero-extending operations
-(define_code_attr su_optab [(sign_extend "") (zero_extend "u")
- (div "") (udiv "u")
- (fix "") (unsigned_fix "u")
- (ss_plus "s") (us_plus "u")
- (ss_minus "s") (us_minus "u")])
-
-;; Similar for the instruction mnemonics
-(define_code_attr shift [(ashift "lsl") (ashiftrt "asr")
- (lshiftrt "lsr") (rotatert "ror")])
-
-;; Map shift operators onto underlying bit-field instructions
-(define_code_attr bfshift [(ashift "ubfiz") (ashiftrt "sbfx")
- (lshiftrt "ubfx") (rotatert "extr")])
-
-;; Logical operator instruction mnemonics
-(define_code_attr logical [(and "and") (ior "orr") (xor "eor")])
-
-;; Similar, but when not(op)
-(define_code_attr nlogical [(and "bic") (ior "orn") (xor "eon")])
-
-;; Sign- or zero-extending load
-(define_code_attr ldrxt [(sign_extend "ldrs") (zero_extend "ldr")])
-
-;; Sign- or zero-extending data-op
-(define_code_attr su [(sign_extend "s") (zero_extend "u")
- (sign_extract "s") (zero_extract "u")
- (fix "s") (unsigned_fix "u")
- (div "s") (udiv "u")])
-
-;; Emit cbz/cbnz depending on comparison type.
-(define_code_attr cbz [(eq "cbz") (ne "cbnz") (lt "cbnz") (ge "cbz")])
-
-;; Emit tbz/tbnz depending on comparison type.
-(define_code_attr tbz [(eq "tbz") (ne "tbnz") (lt "tbnz") (ge "tbz")])
-
-;; Max/min attributes.
-(define_code_attr maxmin [(smax "smax")
- (smin "smin")
- (umax "umax")
- (umin "umin")])
-
-;; MLA/MLS attributes.
-(define_code_attr as [(ss_plus "a") (ss_minus "s")])
-
-;; Atomic operations
-(define_code_attr atomic_optab
- [(ior "or") (xor "xor") (and "and") (plus "add") (minus "sub")])
-
-(define_code_attr atomic_op_operand
- [(ior "aarch64_logical_operand")
- (xor "aarch64_logical_operand")
- (and "aarch64_logical_operand")
- (plus "aarch64_plus_operand")
- (minus "aarch64_plus_operand")])
-
-;; -------------------------------------------------------------------
-;; Int Iterators.
-;; -------------------------------------------------------------------
-(define_int_iterator MAXMINV [UNSPEC_UMAXV UNSPEC_UMINV
- UNSPEC_SMAXV UNSPEC_SMINV])
-
-(define_int_iterator FMAXMINV [UNSPEC_FMAXV UNSPEC_FMINV])
-
-(define_int_iterator HADDSUB [UNSPEC_SHADD UNSPEC_UHADD
- UNSPEC_SRHADD UNSPEC_URHADD
- UNSPEC_SHSUB UNSPEC_UHSUB
- UNSPEC_SRHSUB UNSPEC_URHSUB])
-
-
-(define_int_iterator ADDSUBHN [UNSPEC_ADDHN UNSPEC_RADDHN
- UNSPEC_SUBHN UNSPEC_RSUBHN])
-
-(define_int_iterator ADDSUBHN2 [UNSPEC_ADDHN2 UNSPEC_RADDHN2
- UNSPEC_SUBHN2 UNSPEC_RSUBHN2])
-
-(define_int_iterator FMAXMIN [UNSPEC_FMAX UNSPEC_FMIN])
-
-(define_int_iterator VQDMULH [UNSPEC_SQDMULH UNSPEC_SQRDMULH])
-
-(define_int_iterator USSUQADD [UNSPEC_SUQADD UNSPEC_USQADD])
-
-(define_int_iterator SUQMOVN [UNSPEC_SQXTN UNSPEC_UQXTN])
-
-(define_int_iterator VSHL [UNSPEC_SSHL UNSPEC_USHL
- UNSPEC_SRSHL UNSPEC_URSHL])
-
-(define_int_iterator VSHLL [UNSPEC_SSHLL UNSPEC_USHLL])
-
-(define_int_iterator VQSHL [UNSPEC_SQSHL UNSPEC_UQSHL
- UNSPEC_SQRSHL UNSPEC_UQRSHL])
-
-(define_int_iterator VSRA [UNSPEC_SSRA UNSPEC_USRA
- UNSPEC_SRSRA UNSPEC_URSRA])
-
-(define_int_iterator VSLRI [UNSPEC_SSLI UNSPEC_USLI
- UNSPEC_SSRI UNSPEC_USRI])
-
-
-(define_int_iterator VRSHR_N [UNSPEC_SRSHR UNSPEC_URSHR])
-
-(define_int_iterator VQSHL_N [UNSPEC_SQSHLU UNSPEC_SQSHL UNSPEC_UQSHL])
-
-(define_int_iterator VQSHRN_N [UNSPEC_SQSHRUN UNSPEC_SQRSHRUN
- UNSPEC_SQSHRN UNSPEC_UQSHRN
- UNSPEC_SQRSHRN UNSPEC_UQRSHRN])
-
-(define_int_iterator VCMP_S [UNSPEC_CMEQ UNSPEC_CMGE UNSPEC_CMGT
- UNSPEC_CMLE UNSPEC_CMLT])
-
-(define_int_iterator VCMP_U [UNSPEC_CMHS UNSPEC_CMHI UNSPEC_CMTST])
-
-(define_int_iterator PERMUTE [UNSPEC_ZIP1 UNSPEC_ZIP2
- UNSPEC_TRN1 UNSPEC_TRN2
- UNSPEC_UZP1 UNSPEC_UZP2])
-
-(define_int_iterator FRINT [UNSPEC_FRINTZ UNSPEC_FRINTP UNSPEC_FRINTM
- UNSPEC_FRINTI UNSPEC_FRINTX UNSPEC_FRINTA])
-
-(define_int_iterator FCVT [UNSPEC_FRINTZ UNSPEC_FRINTP UNSPEC_FRINTM
- UNSPEC_FRINTA])
-
-;; -------------------------------------------------------------------
-;; Int Iterators Attributes.
-;; -------------------------------------------------------------------
-(define_int_attr maxminv [(UNSPEC_UMAXV "umax")
- (UNSPEC_UMINV "umin")
- (UNSPEC_SMAXV "smax")
- (UNSPEC_SMINV "smin")])
-
-(define_int_attr fmaxminv [(UNSPEC_FMAXV "max")
- (UNSPEC_FMINV "min")])
-
-(define_int_attr fmaxmin [(UNSPEC_FMAX "fmax")
- (UNSPEC_FMIN "fmin")])
-
-(define_int_attr sur [(UNSPEC_SHADD "s") (UNSPEC_UHADD "u")
- (UNSPEC_SRHADD "sr") (UNSPEC_URHADD "ur")
- (UNSPEC_SHSUB "s") (UNSPEC_UHSUB "u")
- (UNSPEC_SRHSUB "sr") (UNSPEC_URHSUB "ur")
- (UNSPEC_ADDHN "") (UNSPEC_RADDHN "r")
- (UNSPEC_SUBHN "") (UNSPEC_RSUBHN "r")
- (UNSPEC_ADDHN2 "") (UNSPEC_RADDHN2 "r")
- (UNSPEC_SUBHN2 "") (UNSPEC_RSUBHN2 "r")
- (UNSPEC_SQXTN "s") (UNSPEC_UQXTN "u")
- (UNSPEC_USQADD "us") (UNSPEC_SUQADD "su")
- (UNSPEC_SSLI "s") (UNSPEC_USLI "u")
- (UNSPEC_SSRI "s") (UNSPEC_USRI "u")
- (UNSPEC_USRA "u") (UNSPEC_SSRA "s")
- (UNSPEC_URSRA "ur") (UNSPEC_SRSRA "sr")
- (UNSPEC_URSHR "ur") (UNSPEC_SRSHR "sr")
- (UNSPEC_SQSHLU "s") (UNSPEC_SQSHL "s")
- (UNSPEC_UQSHL "u")
- (UNSPEC_SQSHRUN "s") (UNSPEC_SQRSHRUN "s")
- (UNSPEC_SQSHRN "s") (UNSPEC_UQSHRN "u")
- (UNSPEC_SQRSHRN "s") (UNSPEC_UQRSHRN "u")
- (UNSPEC_USHL "u") (UNSPEC_SSHL "s")
- (UNSPEC_USHLL "u") (UNSPEC_SSHLL "s")
- (UNSPEC_URSHL "ur") (UNSPEC_SRSHL "sr")
- (UNSPEC_UQRSHL "u") (UNSPEC_SQRSHL "s")
-])
-
-(define_int_attr r [(UNSPEC_SQDMULH "") (UNSPEC_SQRDMULH "r")
- (UNSPEC_SQSHRUN "") (UNSPEC_SQRSHRUN "r")
- (UNSPEC_SQSHRN "") (UNSPEC_UQSHRN "")
- (UNSPEC_SQRSHRN "r") (UNSPEC_UQRSHRN "r")
- (UNSPEC_SQSHL "") (UNSPEC_UQSHL "")
- (UNSPEC_SQRSHL "r")(UNSPEC_UQRSHL "r")
-])
-
-(define_int_attr lr [(UNSPEC_SSLI "l") (UNSPEC_USLI "l")
- (UNSPEC_SSRI "r") (UNSPEC_USRI "r")])
-
-(define_int_attr u [(UNSPEC_SQSHLU "u") (UNSPEC_SQSHL "") (UNSPEC_UQSHL "")
- (UNSPEC_SQSHRUN "u") (UNSPEC_SQRSHRUN "u")
- (UNSPEC_SQSHRN "") (UNSPEC_UQSHRN "")
- (UNSPEC_SQRSHRN "") (UNSPEC_UQRSHRN "")])
-
-(define_int_attr addsub [(UNSPEC_SHADD "add")
- (UNSPEC_UHADD "add")
- (UNSPEC_SRHADD "add")
- (UNSPEC_URHADD "add")
- (UNSPEC_SHSUB "sub")
- (UNSPEC_UHSUB "sub")
- (UNSPEC_SRHSUB "sub")
- (UNSPEC_URHSUB "sub")
- (UNSPEC_ADDHN "add")
- (UNSPEC_SUBHN "sub")
- (UNSPEC_RADDHN "add")
- (UNSPEC_RSUBHN "sub")
- (UNSPEC_ADDHN2 "add")
- (UNSPEC_SUBHN2 "sub")
- (UNSPEC_RADDHN2 "add")
- (UNSPEC_RSUBHN2 "sub")])
-
-(define_int_attr cmp [(UNSPEC_CMGE "ge") (UNSPEC_CMGT "gt")
- (UNSPEC_CMLE "le") (UNSPEC_CMLT "lt")
- (UNSPEC_CMEQ "eq")
- (UNSPEC_CMHS "hs") (UNSPEC_CMHI "hi")
- (UNSPEC_CMTST "tst")])
-
-(define_int_attr offsetlr [(UNSPEC_SSLI "1") (UNSPEC_USLI "1")
- (UNSPEC_SSRI "0") (UNSPEC_USRI "0")])
-
-;; Standard pattern names for floating-point rounding instructions.
-(define_int_attr frint_pattern [(UNSPEC_FRINTZ "btrunc")
- (UNSPEC_FRINTP "ceil")
- (UNSPEC_FRINTM "floor")
- (UNSPEC_FRINTI "nearbyint")
- (UNSPEC_FRINTX "rint")
- (UNSPEC_FRINTA "round")])
-
-;; frint suffix for floating-point rounding instructions.
-(define_int_attr frint_suffix [(UNSPEC_FRINTZ "z") (UNSPEC_FRINTP "p")
- (UNSPEC_FRINTM "m") (UNSPEC_FRINTI "i")
- (UNSPEC_FRINTX "x") (UNSPEC_FRINTA "a")])
-
-(define_int_attr fcvt_pattern [(UNSPEC_FRINTZ "btrunc") (UNSPEC_FRINTA "round")
- (UNSPEC_FRINTP "ceil") (UNSPEC_FRINTM "floor")])
-
-(define_int_attr perm_insn [(UNSPEC_ZIP1 "zip") (UNSPEC_ZIP2 "zip")
- (UNSPEC_TRN1 "trn") (UNSPEC_TRN2 "trn")
- (UNSPEC_UZP1 "uzp") (UNSPEC_UZP2 "uzp")])
-
-(define_int_attr perm_hilo [(UNSPEC_ZIP1 "1") (UNSPEC_ZIP2 "2")
- (UNSPEC_TRN1 "1") (UNSPEC_TRN2 "2")
- (UNSPEC_UZP1 "1") (UNSPEC_UZP2 "2")])
diff --git a/gcc-4.8.1/gcc/config/aarch64/large.md b/gcc-4.8.1/gcc/config/aarch64/large.md
deleted file mode 100644
index 4316cc7df..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/large.md
+++ /dev/null
@@ -1,312 +0,0 @@
-;; Copyright (C) 2012-2013 Free Software Foundation, Inc.
-;;
-;; Contributed by ARM Ltd.
-;;
-;; This file is part of GCC.
-;;
-;; GCC is free software; you can redistribute it and/or modify it
-;; under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 3, or (at your option)
-;; any later version.
-;;
-;; GCC is distributed in the hope that it will be useful, but
-;; WITHOUT ANY WARRANTY; without even the implied warranty of
-;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-;; General Public License for more details.
-;;
-;; You should have received a copy of the GNU General Public License
-;; along with GCC; see the file COPYING3. If not see
-;; <http://www.gnu.org/licenses/>.
-
-;; In the absence of any ARMv8-A implementations, two examples derived
-;; from ARM's most recent ARMv7-A cores (Cortex-A7 and Cortex-A15) are
-;; included by way of example. This is a temporary measure.
-
-;; Example pipeline description for an example 'large' core
-;; implementing AArch64
-
-;;-------------------------------------------------------
-;; General Description
-;;-------------------------------------------------------
-
-(define_automaton "large_cpu")
-
-;; The core is modelled as a triple issue pipeline that has
-;; the following dispatch units.
-;; 1. Two pipelines for simple integer operations: int1, int2
-;; 2. Two pipelines for SIMD and FP data-processing operations: fpsimd1, fpsimd2
-;; 3. One pipeline for branch operations: br
-;; 4. One pipeline for integer multiply and divide operations: multdiv
-;; 5. Two pipelines for load and store operations: ls1, ls2
-;;
-;; We can issue into three pipelines per-cycle.
-;;
-;; We assume that where we have unit pairs xxx1 is always filled before xxx2.
-
-;;-------------------------------------------------------
-;; CPU Units and Reservations
-;;-------------------------------------------------------
-
-;; The three issue units
-(define_cpu_unit "large_cpu_unit_i1, large_cpu_unit_i2, large_cpu_unit_i3" "large_cpu")
-
-(define_reservation "large_cpu_resv_i1"
- "(large_cpu_unit_i1 | large_cpu_unit_i2 | large_cpu_unit_i3)")
-
-(define_reservation "large_cpu_resv_i2"
- "((large_cpu_unit_i1 + large_cpu_unit_i2) | (large_cpu_unit_i2 + large_cpu_unit_i3))")
-
-(define_reservation "large_cpu_resv_i3"
- "(large_cpu_unit_i1 + large_cpu_unit_i2 + large_cpu_unit_i3)")
-
-(final_presence_set "large_cpu_unit_i2" "large_cpu_unit_i1")
-(final_presence_set "large_cpu_unit_i3" "large_cpu_unit_i2")
-
-;; The main dispatch units
-(define_cpu_unit "large_cpu_unit_int1, large_cpu_unit_int2" "large_cpu")
-(define_cpu_unit "large_cpu_unit_fpsimd1, large_cpu_unit_fpsimd2" "large_cpu")
-(define_cpu_unit "large_cpu_unit_ls1, large_cpu_unit_ls2" "large_cpu")
-(define_cpu_unit "large_cpu_unit_br" "large_cpu")
-(define_cpu_unit "large_cpu_unit_multdiv" "large_cpu")
-
-(define_reservation "large_cpu_resv_ls" "(large_cpu_unit_ls1 | large_cpu_unit_ls2)")
-
-;; The extended load-store pipeline
-(define_cpu_unit "large_cpu_unit_load, large_cpu_unit_store" "large_cpu")
-
-;; The extended ALU pipeline
-(define_cpu_unit "large_cpu_unit_int1_alu, large_cpu_unit_int2_alu" "large_cpu")
-(define_cpu_unit "large_cpu_unit_int1_shf, large_cpu_unit_int2_shf" "large_cpu")
-(define_cpu_unit "large_cpu_unit_int1_sat, large_cpu_unit_int2_sat" "large_cpu")
-
-
-;;-------------------------------------------------------
-;; Simple ALU Instructions
-;;-------------------------------------------------------
-
-;; Simple ALU operations without shift
-(define_insn_reservation "large_cpu_alu" 2
- (and (eq_attr "tune" "large") (eq_attr "v8type" "adc,alu,alu_ext"))
- "large_cpu_resv_i1, \
- (large_cpu_unit_int1, large_cpu_unit_int1_alu) |\
- (large_cpu_unit_int2, large_cpu_unit_int2_alu)")
-
-(define_insn_reservation "large_cpu_logic" 2
- (and (eq_attr "tune" "large") (eq_attr "v8type" "logic,logic_imm"))
- "large_cpu_resv_i1, \
- (large_cpu_unit_int1, large_cpu_unit_int1_alu) |\
- (large_cpu_unit_int2, large_cpu_unit_int2_alu)")
-
-(define_insn_reservation "large_cpu_shift" 2
- (and (eq_attr "tune" "large") (eq_attr "v8type" "shift,shift_imm"))
- "large_cpu_resv_i1, \
- (large_cpu_unit_int1, large_cpu_unit_int1_shf) |\
- (large_cpu_unit_int2, large_cpu_unit_int2_shf)")
-
-;; Simple ALU operations with immediate shift
-(define_insn_reservation "large_cpu_alu_shift" 3
- (and (eq_attr "tune" "large") (eq_attr "v8type" "alu_shift"))
- "large_cpu_resv_i1, \
- (large_cpu_unit_int1,
- large_cpu_unit_int1 + large_cpu_unit_int1_shf, large_cpu_unit_int1_alu) | \
- (large_cpu_unit_int2,
- large_cpu_unit_int2 + large_cpu_unit_int2_shf, large_cpu_unit_int2_alu)")
-
-(define_insn_reservation "large_cpu_logic_shift" 3
- (and (eq_attr "tune" "large") (eq_attr "v8type" "logic_shift"))
- "large_cpu_resv_i1, \
- (large_cpu_unit_int1, large_cpu_unit_int1_alu) |\
- (large_cpu_unit_int2, large_cpu_unit_int2_alu)")
-
-
-;;-------------------------------------------------------
-;; Multiplication/Division
-;;-------------------------------------------------------
-
-;; Simple multiplication
-(define_insn_reservation "large_cpu_mult_single" 3
- (and (eq_attr "tune" "large")
- (and (eq_attr "v8type" "mult,madd") (eq_attr "mode" "SI")))
- "large_cpu_resv_i1, large_cpu_unit_multdiv")
-
-(define_insn_reservation "large_cpu_mult_double" 4
- (and (eq_attr "tune" "large")
- (and (eq_attr "v8type" "mult,madd") (eq_attr "mode" "DI")))
- "large_cpu_resv_i1, large_cpu_unit_multdiv")
-
-;; 64-bit multiplication
-(define_insn_reservation "large_cpu_mull" 4
- (and (eq_attr "tune" "large") (eq_attr "v8type" "mull,mulh,maddl"))
- "large_cpu_resv_i1, large_cpu_unit_multdiv * 2")
-
-;; Division
-(define_insn_reservation "large_cpu_udiv_single" 9
- (and (eq_attr "tune" "large")
- (and (eq_attr "v8type" "udiv") (eq_attr "mode" "SI")))
- "large_cpu_resv_i1, large_cpu_unit_multdiv")
-
-(define_insn_reservation "large_cpu_udiv_double" 18
- (and (eq_attr "tune" "large")
- (and (eq_attr "v8type" "udiv") (eq_attr "mode" "DI")))
- "large_cpu_resv_i1, large_cpu_unit_multdiv")
-
-(define_insn_reservation "large_cpu_sdiv_single" 10
- (and (eq_attr "tune" "large")
- (and (eq_attr "v8type" "sdiv") (eq_attr "mode" "SI")))
- "large_cpu_resv_i1, large_cpu_unit_multdiv")
-
-(define_insn_reservation "large_cpu_sdiv_double" 20
- (and (eq_attr "tune" "large")
- (and (eq_attr "v8type" "sdiv") (eq_attr "mode" "DI")))
- "large_cpu_resv_i1, large_cpu_unit_multdiv")
-
-
-;;-------------------------------------------------------
-;; Branches
-;;-------------------------------------------------------
-
-;; Branches take one issue slot.
-;; No latency as there is no result
-(define_insn_reservation "large_cpu_branch" 0
- (and (eq_attr "tune" "large") (eq_attr "v8type" "branch"))
- "large_cpu_resv_i1, large_cpu_unit_br")
-
-
-;; Calls take up all issue slots, and form a block in the
-;; pipeline. The result however is available the next cycle.
-;; Addition of new units requires this to be updated.
-(define_insn_reservation "large_cpu_call" 1
- (and (eq_attr "tune" "large") (eq_attr "v8type" "call"))
- "large_cpu_resv_i3 | large_cpu_resv_i2, \
- large_cpu_unit_int1 + large_cpu_unit_int2 + large_cpu_unit_br + \
- large_cpu_unit_multdiv + large_cpu_unit_fpsimd1 + large_cpu_unit_fpsimd2 + \
- large_cpu_unit_ls1 + large_cpu_unit_ls2,\
- large_cpu_unit_int1_alu + large_cpu_unit_int1_shf + large_cpu_unit_int1_sat + \
- large_cpu_unit_int2_alu + large_cpu_unit_int2_shf + \
- large_cpu_unit_int2_sat + large_cpu_unit_load + large_cpu_unit_store")
-
-
-;;-------------------------------------------------------
-;; Load/Store Instructions
-;;-------------------------------------------------------
-
-;; Loads of up to two words.
-(define_insn_reservation "large_cpu_load1" 4
- (and (eq_attr "tune" "large") (eq_attr "v8type" "load_acq,load1,load2"))
- "large_cpu_resv_i1, large_cpu_resv_ls, large_cpu_unit_load, nothing")
-
-;; Stores of up to two words.
-(define_insn_reservation "large_cpu_store1" 0
- (and (eq_attr "tune" "large") (eq_attr "v8type" "store_rel,store1,store2"))
- "large_cpu_resv_i1, large_cpu_resv_ls, large_cpu_unit_store")
-
-
-;;-------------------------------------------------------
-;; Floating-point arithmetic.
-;;-------------------------------------------------------
-
-(define_insn_reservation "large_cpu_fpalu" 4
- (and (eq_attr "tune" "large")
- (eq_attr "v8type" "ffarith,fadd,fccmp,fcvt,fcmp"))
- "large_cpu_resv_i1 + large_cpu_unit_fpsimd1")
-
-(define_insn_reservation "large_cpu_fconst" 3
- (and (eq_attr "tune" "large")
- (eq_attr "v8type" "fconst"))
- "large_cpu_resv_i1 + large_cpu_unit_fpsimd1")
-
-(define_insn_reservation "large_cpu_fpmuls" 4
- (and (eq_attr "tune" "large")
- (and (eq_attr "v8type" "fmul,fmadd") (eq_attr "mode" "SF")))
- "large_cpu_resv_i1 + large_cpu_unit_fpsimd1")
-
-(define_insn_reservation "large_cpu_fpmuld" 7
- (and (eq_attr "tune" "large")
- (and (eq_attr "v8type" "fmul,fmadd") (eq_attr "mode" "DF")))
- "large_cpu_resv_i1 + large_cpu_unit_fpsimd1, large_cpu_unit_fpsimd1 * 2,\
- large_cpu_resv_i1 + large_cpu_unit_fpsimd1")
-
-
-;;-------------------------------------------------------
-;; Floating-point Division
-;;-------------------------------------------------------
-
-;; Single-precision divide takes 14 cycles to complete, and this
-;; includes the time taken for the special instruction used to collect the
-;; result to travel down the multiply pipeline.
-
-(define_insn_reservation "large_cpu_fdivs" 14
- (and (eq_attr "tune" "large")
- (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "SF")))
- "large_cpu_resv_i1, large_cpu_unit_fpsimd1 * 13")
-
-(define_insn_reservation "large_cpu_fdivd" 29
- (and (eq_attr "tune" "large")
- (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "DF")))
- "large_cpu_resv_i1, large_cpu_unit_fpsimd1 * 28")
-
-
-
-;;-------------------------------------------------------
-;; Floating-point Transfers
-;;-------------------------------------------------------
-
-(define_insn_reservation "large_cpu_i2f" 4
- (and (eq_attr "tune" "large")
- (eq_attr "v8type" "fmovi2f"))
- "large_cpu_resv_i1")
-
-(define_insn_reservation "large_cpu_f2i" 2
- (and (eq_attr "tune" "large")
- (eq_attr "v8type" "fmovf2i"))
- "large_cpu_resv_i1")
-
-
-;;-------------------------------------------------------
-;; Floating-point Load/Store
-;;-------------------------------------------------------
-
-(define_insn_reservation "large_cpu_floads" 4
- (and (eq_attr "tune" "large")
- (and (eq_attr "v8type" "fpsimd_load,fpsimd_load2") (eq_attr "mode" "SF")))
- "large_cpu_resv_i1")
-
-(define_insn_reservation "large_cpu_floadd" 5
- (and (eq_attr "tune" "large")
- (and (eq_attr "v8type" "fpsimd_load,fpsimd_load2") (eq_attr "mode" "DF")))
- "large_cpu_resv_i1 + large_cpu_unit_br, large_cpu_resv_i1")
-
-(define_insn_reservation "large_cpu_fstores" 0
- (and (eq_attr "tune" "large")
- (and (eq_attr "v8type" "fpsimd_store,fpsimd_store2") (eq_attr "mode" "SF")))
- "large_cpu_resv_i1")
-
-(define_insn_reservation "large_cpu_fstored" 0
- (and (eq_attr "tune" "large")
- (and (eq_attr "v8type" "fpsimd_store,fpsimd_store2") (eq_attr "mode" "DF")))
- "large_cpu_resv_i1 + large_cpu_unit_br, large_cpu_resv_i1")
-
-
-;;-------------------------------------------------------
-;; Bypasses
-;;-------------------------------------------------------
-
-(define_bypass 1 "large_cpu_alu, large_cpu_logic, large_cpu_shift"
- "large_cpu_alu, large_cpu_alu_shift, large_cpu_logic, large_cpu_logic_shift, large_cpu_shift")
-
-(define_bypass 2 "large_cpu_alu_shift, large_cpu_logic_shift"
- "large_cpu_alu, large_cpu_alu_shift, large_cpu_logic, large_cpu_logic_shift, large_cpu_shift")
-
-(define_bypass 1 "large_cpu_alu, large_cpu_logic, large_cpu_shift" "large_cpu_load1")
-
-(define_bypass 2 "large_cpu_alu_shift, large_cpu_logic_shift" "large_cpu_load1")
-
-(define_bypass 2 "large_cpu_floads"
- "large_cpu_fpalu, large_cpu_fpmuld,\
- large_cpu_fdivs, large_cpu_fdivd,\
- large_cpu_f2i")
-
-(define_bypass 3 "large_cpu_floadd"
- "large_cpu_fpalu, large_cpu_fpmuld,\
- large_cpu_fdivs, large_cpu_fdivd,\
- large_cpu_f2i")
diff --git a/gcc-4.8.1/gcc/config/aarch64/predicates.md b/gcc-4.8.1/gcc/config/aarch64/predicates.md
deleted file mode 100644
index 8f80b2028..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/predicates.md
+++ /dev/null
@@ -1,298 +0,0 @@
-;; Machine description for AArch64 architecture.
-;; Copyright (C) 2009-2013 Free Software Foundation, Inc.
-;; Contributed by ARM Ltd.
-;;
-;; This file is part of GCC.
-;;
-;; GCC is free software; you can redistribute it and/or modify it
-;; under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 3, or (at your option)
-;; any later version.
-;;
-;; GCC is distributed in the hope that it will be useful, but
-;; WITHOUT ANY WARRANTY; without even the implied warranty of
-;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-;; General Public License for more details.
-;;
-;; You should have received a copy of the GNU General Public License
-;; along with GCC; see the file COPYING3. If not see
-;; <http://www.gnu.org/licenses/>.
-
-(define_special_predicate "cc_register"
- (and (match_code "reg")
- (and (match_test "REGNO (op) == CC_REGNUM")
- (ior (match_test "mode == GET_MODE (op)")
- (match_test "mode == VOIDmode
- && GET_MODE_CLASS (GET_MODE (op)) == MODE_CC"))))
-)
-
-(define_predicate "aarch64_reg_or_zero"
- (and (match_code "reg,subreg,const_int")
- (ior (match_operand 0 "register_operand")
- (match_test "op == const0_rtx"))))
-
-(define_predicate "aarch64_reg_zero_or_m1_or_1"
- (and (match_code "reg,subreg,const_int")
- (ior (match_operand 0 "register_operand")
- (ior (match_test "op == const0_rtx")
- (ior (match_test "op == constm1_rtx")
- (match_test "op == const1_rtx"))))))
-
-(define_predicate "aarch64_fp_compare_operand"
- (ior (match_operand 0 "register_operand")
- (and (match_code "const_double")
- (match_test "aarch64_float_const_zero_rtx_p (op)"))))
-
-(define_predicate "aarch64_plus_immediate"
- (and (match_code "const_int")
- (ior (match_test "aarch64_uimm12_shift (INTVAL (op))")
- (match_test "aarch64_uimm12_shift (-INTVAL (op))"))))
-
-(define_predicate "aarch64_plus_operand"
- (ior (match_operand 0 "register_operand")
- (match_operand 0 "aarch64_plus_immediate")))
-
-(define_predicate "aarch64_pluslong_immediate"
- (and (match_code "const_int")
- (match_test "(INTVAL (op) < 0xffffff && INTVAL (op) > -0xffffff)")))
-
-(define_predicate "aarch64_pluslong_operand"
- (ior (match_operand 0 "register_operand")
- (match_operand 0 "aarch64_pluslong_immediate")))
-
-(define_predicate "aarch64_logical_immediate"
- (and (match_code "const_int")
- (match_test "aarch64_bitmask_imm (INTVAL (op), mode)")))
-
-(define_predicate "aarch64_logical_operand"
- (ior (match_operand 0 "register_operand")
- (match_operand 0 "aarch64_logical_immediate")))
-
-(define_predicate "aarch64_shift_imm_si"
- (and (match_code "const_int")
- (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) < 32")))
-
-(define_predicate "aarch64_shift_imm_di"
- (and (match_code "const_int")
- (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) < 64")))
-
-(define_predicate "aarch64_reg_or_shift_imm_si"
- (ior (match_operand 0 "register_operand")
- (match_operand 0 "aarch64_shift_imm_si")))
-
-(define_predicate "aarch64_reg_or_shift_imm_di"
- (ior (match_operand 0 "register_operand")
- (match_operand 0 "aarch64_shift_imm_di")))
-
-;; The imm3 field is a 3-bit field that only accepts immediates in the
-;; range 0..4.
-(define_predicate "aarch64_imm3"
- (and (match_code "const_int")
- (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) <= 4")))
-
-(define_predicate "aarch64_pwr_imm3"
- (and (match_code "const_int")
- (match_test "INTVAL (op) != 0
- && (unsigned) exact_log2 (INTVAL (op)) <= 4")))
-
-(define_predicate "aarch64_pwr_2_si"
- (and (match_code "const_int")
- (match_test "INTVAL (op) != 0
- && (unsigned) exact_log2 (INTVAL (op)) < 32")))
-
-(define_predicate "aarch64_pwr_2_di"
- (and (match_code "const_int")
- (match_test "INTVAL (op) != 0
- && (unsigned) exact_log2 (INTVAL (op)) < 64")))
-
-(define_predicate "aarch64_mem_pair_operand"
- (and (match_code "mem")
- (match_test "aarch64_legitimate_address_p (mode, XEXP (op, 0), PARALLEL,
- 0)")))
-
-(define_predicate "aarch64_const_address"
- (and (match_code "symbol_ref")
- (match_test "mode == DImode && CONSTANT_ADDRESS_P (op)")))
-
-(define_predicate "aarch64_valid_symref"
- (match_code "const, symbol_ref, label_ref")
-{
- enum aarch64_symbol_type symbol_type;
- return (aarch64_symbolic_constant_p (op, SYMBOL_CONTEXT_ADR, &symbol_type)
- && symbol_type != SYMBOL_FORCE_TO_MEM);
-})
-
-(define_predicate "aarch64_tls_ie_symref"
- (match_code "const, symbol_ref, label_ref")
-{
- switch (GET_CODE (op))
- {
- case CONST:
- op = XEXP (op, 0);
- if (GET_CODE (op) != PLUS
- || GET_CODE (XEXP (op, 0)) != SYMBOL_REF
- || GET_CODE (XEXP (op, 1)) != CONST_INT)
- return false;
- op = XEXP (op, 0);
-
- case SYMBOL_REF:
- return SYMBOL_REF_TLS_MODEL (op) == TLS_MODEL_INITIAL_EXEC;
-
- default:
- gcc_unreachable ();
- }
-})
-
-(define_predicate "aarch64_tls_le_symref"
- (match_code "const, symbol_ref, label_ref")
-{
- switch (GET_CODE (op))
- {
- case CONST:
- op = XEXP (op, 0);
- if (GET_CODE (op) != PLUS
- || GET_CODE (XEXP (op, 0)) != SYMBOL_REF
- || GET_CODE (XEXP (op, 1)) != CONST_INT)
- return false;
- op = XEXP (op, 0);
-
- case SYMBOL_REF:
- return SYMBOL_REF_TLS_MODEL (op) == TLS_MODEL_LOCAL_EXEC;
-
- default:
- gcc_unreachable ();
- }
-})
-
-(define_predicate "aarch64_mov_operand"
- (and (match_code "reg,subreg,mem,const_int,symbol_ref,high")
- (ior (match_operand 0 "register_operand")
- (ior (match_operand 0 "memory_operand")
- (ior (match_test "GET_CODE (op) == HIGH
- && aarch64_valid_symref (XEXP (op, 0),
- GET_MODE (XEXP (op, 0)))")
- (ior (match_test "CONST_INT_P (op)
- && aarch64_move_imm (INTVAL (op), mode)")
- (match_test "aarch64_const_address (op, mode)")))))))
-
-(define_predicate "aarch64_movti_operand"
- (and (match_code "reg,subreg,mem,const_int")
- (ior (match_operand 0 "register_operand")
- (ior (match_operand 0 "memory_operand")
- (match_operand 0 "const_int_operand")))))
-
-(define_predicate "aarch64_reg_or_imm"
- (and (match_code "reg,subreg,const_int")
- (ior (match_operand 0 "register_operand")
- (match_operand 0 "const_int_operand"))))
-
-;; True for integer comparisons and for FP comparisons other than LTGT or UNEQ.
-(define_special_predicate "aarch64_comparison_operator"
- (match_code "eq,ne,le,lt,ge,gt,geu,gtu,leu,ltu,unordered,ordered,unlt,unle,unge,ungt"))
-
-;; True if the operand is memory reference suitable for a load/store exclusive.
-(define_predicate "aarch64_sync_memory_operand"
- (and (match_operand 0 "memory_operand")
- (match_code "reg" "0")))
-
-;; Predicates for parallel expanders based on mode.
-(define_special_predicate "vect_par_cnst_hi_half"
- (match_code "parallel")
-{
- HOST_WIDE_INT count = XVECLEN (op, 0);
- int nunits = GET_MODE_NUNITS (mode);
- int i;
-
- if (count < 1
- || count != nunits / 2)
- return false;
-
- if (!VECTOR_MODE_P (mode))
- return false;
-
- for (i = 0; i < count; i++)
- {
- rtx elt = XVECEXP (op, 0, i);
- int val;
-
- if (GET_CODE (elt) != CONST_INT)
- return false;
-
- val = INTVAL (elt);
- if (val != (nunits / 2) + i)
- return false;
- }
- return true;
-})
-
-(define_special_predicate "vect_par_cnst_lo_half"
- (match_code "parallel")
-{
- HOST_WIDE_INT count = XVECLEN (op, 0);
- int nunits = GET_MODE_NUNITS (mode);
- int i;
-
- if (count < 1
- || count != nunits / 2)
- return false;
-
- if (!VECTOR_MODE_P (mode))
- return false;
-
- for (i = 0; i < count; i++)
- {
- rtx elt = XVECEXP (op, 0, i);
- int val;
-
- if (GET_CODE (elt) != CONST_INT)
- return false;
-
- val = INTVAL (elt);
- if (val != i)
- return false;
- }
- return true;
-})
-
-
-(define_special_predicate "aarch64_simd_lshift_imm"
- (match_code "const_vector")
-{
- return aarch64_simd_shift_imm_p (op, mode, true);
-})
-
-(define_special_predicate "aarch64_simd_rshift_imm"
- (match_code "const_vector")
-{
- return aarch64_simd_shift_imm_p (op, mode, false);
-})
-
-(define_predicate "aarch64_simd_reg_or_zero"
- (and (match_code "reg,subreg,const_int,const_vector")
- (ior (match_operand 0 "register_operand")
- (ior (match_test "op == const0_rtx")
- (match_test "aarch64_simd_imm_zero_p (op, mode)")))))
-
-(define_predicate "aarch64_simd_struct_operand"
- (and (match_code "mem")
- (match_test "TARGET_SIMD && aarch64_simd_mem_operand_p (op)")))
-
-;; Like general_operand but allow only valid SIMD addressing modes.
-(define_predicate "aarch64_simd_general_operand"
- (and (match_operand 0 "general_operand")
- (match_test "!MEM_P (op)
- || GET_CODE (XEXP (op, 0)) == POST_INC
- || GET_CODE (XEXP (op, 0)) == REG")))
-
-;; Like nonimmediate_operand but allow only valid SIMD addressing modes.
-(define_predicate "aarch64_simd_nonimmediate_operand"
- (and (match_operand 0 "nonimmediate_operand")
- (match_test "!MEM_P (op)
- || GET_CODE (XEXP (op, 0)) == POST_INC
- || GET_CODE (XEXP (op, 0)) == REG")))
-
-(define_special_predicate "aarch64_simd_imm_zero"
- (match_code "const_vector")
-{
- return aarch64_simd_imm_zero_p (op, mode);
-})
diff --git a/gcc-4.8.1/gcc/config/aarch64/small.md b/gcc-4.8.1/gcc/config/aarch64/small.md
deleted file mode 100644
index a19083ccf..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/small.md
+++ /dev/null
@@ -1,287 +0,0 @@
-;; Copyright (C) 2012-2013 Free Software Foundation, Inc.
-;;
-;; Contributed by ARM Ltd.
-;;
-;; This file is part of GCC.
-;;
-;; GCC is free software; you can redistribute it and/or modify it
-;; under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 3, or (at your option)
-;; any later version.
-;;
-;; GCC is distributed in the hope that it will be useful, but
-;; WITHOUT ANY WARRANTY; without even the implied warranty of
-;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-;; General Public License for more details.
-;;
-;; You should have received a copy of the GNU General Public License
-;; along with GCC; see the file COPYING3. If not see
-;; <http://www.gnu.org/licenses/>.
-
-;; In the absence of any ARMv8-A implementations, two examples derived
-;; from ARM's most recent ARMv7-A cores (Cortex-A7 and Cortex-A15) are
-;; included by way of example. This is a temporary measure.
-
-;; Example pipeline description for an example 'small' core
-;; implementing AArch64
-
-;;-------------------------------------------------------
-;; General Description
-;;-------------------------------------------------------
-
-(define_automaton "small_cpu")
-
-;; The core is modelled as a single issue pipeline with the following
-;; dispatch units.
-;; 1. One pipeline for simple intructions.
-;; 2. One pipeline for branch intructions.
-;;
-;; There are five pipeline stages.
-;; The decode/issue stages operate the same for all instructions.
-;; Instructions always advance one stage per cycle in order.
-;; Only branch instructions may dual-issue with other instructions, except
-;; when those instructions take multiple cycles to issue.
-
-
-;;-------------------------------------------------------
-;; CPU Units and Reservations
-;;-------------------------------------------------------
-
-(define_cpu_unit "small_cpu_unit_i" "small_cpu")
-(define_cpu_unit "small_cpu_unit_br" "small_cpu")
-
-;; Pseudo-unit for blocking the multiply pipeline when a double-precision
-;; multiply is in progress.
-(define_cpu_unit "small_cpu_unit_fpmul_pipe" "small_cpu")
-
-;; The floating-point add pipeline, used to model the usage
-;; of the add pipeline by fp alu instructions.
-(define_cpu_unit "small_cpu_unit_fpadd_pipe" "small_cpu")
-
-;; Floating-point division pipeline (long latency, out-of-order completion).
-(define_cpu_unit "small_cpu_unit_fpdiv" "small_cpu")
-
-
-;;-------------------------------------------------------
-;; Simple ALU Instructions
-;;-------------------------------------------------------
-
-;; Simple ALU operations without shift
-(define_insn_reservation "small_cpu_alu" 2
- (and (eq_attr "tune" "small")
- (eq_attr "v8type" "adc,alu,alu_ext"))
- "small_cpu_unit_i")
-
-(define_insn_reservation "small_cpu_logic" 2
- (and (eq_attr "tune" "small")
- (eq_attr "v8type" "logic,logic_imm"))
- "small_cpu_unit_i")
-
-(define_insn_reservation "small_cpu_shift" 2
- (and (eq_attr "tune" "small")
- (eq_attr "v8type" "shift,shift_imm"))
- "small_cpu_unit_i")
-
-;; Simple ALU operations with immediate shift
-(define_insn_reservation "small_cpu_alu_shift" 2
- (and (eq_attr "tune" "small")
- (eq_attr "v8type" "alu_shift"))
- "small_cpu_unit_i")
-
-(define_insn_reservation "small_cpu_logic_shift" 2
- (and (eq_attr "tune" "small")
- (eq_attr "v8type" "logic_shift"))
- "small_cpu_unit_i")
-
-
-;;-------------------------------------------------------
-;; Multiplication/Division
-;;-------------------------------------------------------
-
-;; Simple multiplication
-(define_insn_reservation "small_cpu_mult_single" 2
- (and (eq_attr "tune" "small")
- (and (eq_attr "v8type" "mult,madd") (eq_attr "mode" "SI")))
- "small_cpu_unit_i")
-
-(define_insn_reservation "small_cpu_mult_double" 3
- (and (eq_attr "tune" "small")
- (and (eq_attr "v8type" "mult,madd") (eq_attr "mode" "DI")))
- "small_cpu_unit_i")
-
-;; 64-bit multiplication
-(define_insn_reservation "small_cpu_mull" 3
- (and (eq_attr "tune" "small") (eq_attr "v8type" "mull,mulh,maddl"))
- "small_cpu_unit_i * 2")
-
-;; Division
-(define_insn_reservation "small_cpu_udiv_single" 5
- (and (eq_attr "tune" "small")
- (and (eq_attr "v8type" "udiv") (eq_attr "mode" "SI")))
- "small_cpu_unit_i")
-
-(define_insn_reservation "small_cpu_udiv_double" 10
- (and (eq_attr "tune" "small")
- (and (eq_attr "v8type" "udiv") (eq_attr "mode" "DI")))
- "small_cpu_unit_i")
-
-(define_insn_reservation "small_cpu_sdiv_single" 6
- (and (eq_attr "tune" "small")
- (and (eq_attr "v8type" "sdiv") (eq_attr "mode" "SI")))
- "small_cpu_unit_i")
-
-(define_insn_reservation "small_cpu_sdiv_double" 12
- (and (eq_attr "tune" "small")
- (and (eq_attr "v8type" "sdiv") (eq_attr "mode" "DI")))
- "small_cpu_unit_i")
-
-
-;;-------------------------------------------------------
-;; Load/Store Instructions
-;;-------------------------------------------------------
-
-(define_insn_reservation "small_cpu_load1" 2
- (and (eq_attr "tune" "small")
- (eq_attr "v8type" "load_acq,load1"))
- "small_cpu_unit_i")
-
-(define_insn_reservation "small_cpu_store1" 0
- (and (eq_attr "tune" "small")
- (eq_attr "v8type" "store_rel,store1"))
- "small_cpu_unit_i")
-
-(define_insn_reservation "small_cpu_load2" 3
- (and (eq_attr "tune" "small")
- (eq_attr "v8type" "load2"))
- "small_cpu_unit_i + small_cpu_unit_br, small_cpu_unit_i")
-
-(define_insn_reservation "small_cpu_store2" 0
- (and (eq_attr "tune" "small")
- (eq_attr "v8type" "store2"))
- "small_cpu_unit_i + small_cpu_unit_br, small_cpu_unit_i")
-
-
-;;-------------------------------------------------------
-;; Branches
-;;-------------------------------------------------------
-
-;; Direct branches are the only instructions that can dual-issue.
-;; The latency here represents when the branch actually takes place.
-
-(define_insn_reservation "small_cpu_unit_br" 3
- (and (eq_attr "tune" "small")
- (eq_attr "v8type" "branch,call"))
- "small_cpu_unit_br")
-
-
-;;-------------------------------------------------------
-;; Floating-point arithmetic.
-;;-------------------------------------------------------
-
-(define_insn_reservation "small_cpu_fpalu" 4
- (and (eq_attr "tune" "small")
- (eq_attr "v8type" "ffarith,fadd,fccmp,fcvt,fcmp"))
- "small_cpu_unit_i + small_cpu_unit_fpadd_pipe")
-
-(define_insn_reservation "small_cpu_fconst" 3
- (and (eq_attr "tune" "small")
- (eq_attr "v8type" "fconst"))
- "small_cpu_unit_i + small_cpu_unit_fpadd_pipe")
-
-(define_insn_reservation "small_cpu_fpmuls" 4
- (and (eq_attr "tune" "small")
- (and (eq_attr "v8type" "fmul") (eq_attr "mode" "SF")))
- "small_cpu_unit_i + small_cpu_unit_fpmul_pipe")
-
-(define_insn_reservation "small_cpu_fpmuld" 7
- (and (eq_attr "tune" "small")
- (and (eq_attr "v8type" "fmul") (eq_attr "mode" "DF")))
- "small_cpu_unit_i + small_cpu_unit_fpmul_pipe, small_cpu_unit_fpmul_pipe * 2,\
- small_cpu_unit_i + small_cpu_unit_fpmul_pipe")
-
-
-;;-------------------------------------------------------
-;; Floating-point Division
-;;-------------------------------------------------------
-
-;; Single-precision divide takes 14 cycles to complete, and this
-;; includes the time taken for the special instruction used to collect the
-;; result to travel down the multiply pipeline.
-
-(define_insn_reservation "small_cpu_fdivs" 14
- (and (eq_attr "tune" "small")
- (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "SF")))
- "small_cpu_unit_i, small_cpu_unit_fpdiv * 13")
-
-(define_insn_reservation "small_cpu_fdivd" 29
- (and (eq_attr "tune" "small")
- (and (eq_attr "v8type" "fdiv,fsqrt") (eq_attr "mode" "DF")))
- "small_cpu_unit_i, small_cpu_unit_fpdiv * 28")
-
-
-;;-------------------------------------------------------
-;; Floating-point Transfers
-;;-------------------------------------------------------
-
-(define_insn_reservation "small_cpu_i2f" 4
- (and (eq_attr "tune" "small")
- (eq_attr "v8type" "fmovi2f"))
- "small_cpu_unit_i")
-
-(define_insn_reservation "small_cpu_f2i" 2
- (and (eq_attr "tune" "small")
- (eq_attr "v8type" "fmovf2i"))
- "small_cpu_unit_i")
-
-
-;;-------------------------------------------------------
-;; Floating-point Load/Store
-;;-------------------------------------------------------
-
-(define_insn_reservation "small_cpu_floads" 4
- (and (eq_attr "tune" "small")
- (and (eq_attr "v8type" "fpsimd_load") (eq_attr "mode" "SF")))
- "small_cpu_unit_i")
-
-(define_insn_reservation "small_cpu_floadd" 5
- (and (eq_attr "tune" "small")
- (and (eq_attr "v8type" "fpsimd_load") (eq_attr "mode" "DF")))
- "small_cpu_unit_i + small_cpu_unit_br, small_cpu_unit_i")
-
-(define_insn_reservation "small_cpu_fstores" 0
- (and (eq_attr "tune" "small")
- (and (eq_attr "v8type" "fpsimd_store") (eq_attr "mode" "SF")))
- "small_cpu_unit_i")
-
-(define_insn_reservation "small_cpu_fstored" 0
- (and (eq_attr "tune" "small")
- (and (eq_attr "v8type" "fpsimd_store") (eq_attr "mode" "DF")))
- "small_cpu_unit_i + small_cpu_unit_br, small_cpu_unit_i")
-
-
-;;-------------------------------------------------------
-;; Bypasses
-;;-------------------------------------------------------
-
-;; Forwarding path for unshifted operands.
-
-(define_bypass 1 "small_cpu_alu, small_cpu_alu_shift"
- "small_cpu_alu, small_cpu_alu_shift, small_cpu_logic, small_cpu_logic_shift, small_cpu_shift")
-
-(define_bypass 1 "small_cpu_logic, small_cpu_logic_shift"
- "small_cpu_alu, small_cpu_alu_shift, small_cpu_logic, small_cpu_logic_shift, small_cpu_shift")
-
-(define_bypass 1 "small_cpu_shift"
- "small_cpu_alu, small_cpu_alu_shift, small_cpu_logic, small_cpu_logic_shift, small_cpu_shift")
-
-;; Load-to-use for floating-point values has a penalty of one cycle.
-
-(define_bypass 2 "small_cpu_floads"
- "small_cpu_fpalu, small_cpu_fpmuld,\
- small_cpu_fdivs, small_cpu_fdivd,\
- small_cpu_f2i")
-
-(define_bypass 3 "small_cpu_floadd"
- "small_cpu_fpalu, small_cpu_fpmuld,\
- small_cpu_fdivs, small_cpu_fdivd,\
- small_cpu_f2i")
diff --git a/gcc-4.8.1/gcc/config/aarch64/t-aarch64 b/gcc-4.8.1/gcc/config/aarch64/t-aarch64
deleted file mode 100644
index 4c265ebba..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/t-aarch64
+++ /dev/null
@@ -1,36 +0,0 @@
-# Machine description for AArch64 architecture.
-# Copyright (C) 2009-2013 Free Software Foundation, Inc.
-# Contributed by ARM Ltd.
-#
-# This file is part of GCC.
-#
-# GCC is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GCC is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GCC; see the file COPYING3. If not see
-# <http://www.gnu.org/licenses/>.
-
-TM_H += $(srcdir)/config/aarch64/aarch64-cores.def
-OPTIONS_H_EXTRA += $(srcdir)/config/aarch64/aarch64-cores.def
-
-$(srcdir)/config/aarch64/aarch64-tune.md: $(srcdir)/config/aarch64/gentune.sh \
- $(srcdir)/config/aarch64/aarch64-cores.def
- $(SHELL) $(srcdir)/config/aarch64/gentune.sh \
- $(srcdir)/config/aarch64/aarch64-cores.def > \
- $(srcdir)/config/aarch64/aarch64-tune.md
-
-aarch64-builtins.o: $(srcdir)/config/aarch64/aarch64-builtins.c $(CONFIG_H) \
- $(SYSTEM_H) coretypes.h $(TM_H) \
- $(RTL_H) $(TREE_H) expr.h $(TM_P_H) $(RECOG_H) langhooks.h \
- $(DIAGNOSTIC_CORE_H) $(OPTABS_H) \
- $(srcdir)/config/aarch64/aarch64-simd-builtins.def
- $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
- $(srcdir)/config/aarch64/aarch64-builtins.c
diff --git a/gcc-4.8.1/gcc/config/aarch64/t-aarch64-linux b/gcc-4.8.1/gcc/config/aarch64/t-aarch64-linux
deleted file mode 100644
index a7a0a8836..000000000
--- a/gcc-4.8.1/gcc/config/aarch64/t-aarch64-linux
+++ /dev/null
@@ -1,25 +0,0 @@
-# Machine description for AArch64 architecture.
-# Copyright (C) 2009-2013 Free Software Foundation, Inc.
-# Contributed by ARM Ltd.
-#
-# This file is part of GCC.
-#
-# GCC is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GCC is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GCC; see the file COPYING3. If not see
-# <http://www.gnu.org/licenses/>.
-
-LIB1ASMSRC = aarch64/lib1funcs.asm
-LIB1ASMFUNCS = _aarch64_sync_cache_range
-
-AARCH_BE = $(if $(findstring TARGET_BIG_ENDIAN_DEFAULT=1, $(tm_defines)),_be)
-MULTILIB_OSDIRNAMES = .=../lib64$(call if_multiarch,:aarch64$(AARCH_BE)-linux-gnu)