diff options
author | Ben Cheng <bccheng@google.com> | 2014-03-25 22:37:19 -0700 |
---|---|---|
committer | Ben Cheng <bccheng@google.com> | 2014-03-25 22:37:19 -0700 |
commit | 1bc5aee63eb72b341f506ad058502cd0361f0d10 (patch) | |
tree | c607e8252f3405424ff15bc2d00aa38dadbb2518 /gcc-4.9/gcc/config/aarch64 | |
parent | 283a0bf58fcf333c58a2a92c3ebbc41fb9eb1fdb (diff) | |
download | toolchain_gcc-1bc5aee63eb72b341f506ad058502cd0361f0d10.tar.gz toolchain_gcc-1bc5aee63eb72b341f506ad058502cd0361f0d10.tar.bz2 toolchain_gcc-1bc5aee63eb72b341f506ad058502cd0361f0d10.zip |
Initial checkin of GCC 4.9.0 from trunk (r208799).
Change-Id: I48a3c08bb98542aa215912a75f03c0890e497dba
Diffstat (limited to 'gcc-4.9/gcc/config/aarch64')
27 files changed, 47392 insertions, 0 deletions
diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-arches.def b/gcc-4.9/gcc/config/aarch64/aarch64-arches.def new file mode 100644 index 000000000..4b796d8c9 --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64-arches.def @@ -0,0 +1,29 @@ +/* Copyright (C) 2011-2014 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + +/* Before using #include to read this file, define a macro: + + AARCH64_ARCH(NAME, CORE, ARCH, FLAGS) + + The NAME is the name of the architecture, represented as a string + constant. The CORE is the identifier for a core representative of + this architecture. ARCH is the architecture revision. FLAGS are + the flags implied by the architecture. */ + +AARCH64_ARCH("armv8-a", generic, 8, AARCH64_FL_FOR_ARCH8) diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-builtins.c b/gcc-4.9/gcc/config/aarch64/aarch64-builtins.c new file mode 100644 index 000000000..55cfe0ab2 --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64-builtins.c @@ -0,0 +1,1253 @@ +/* Builtins' description for AArch64 SIMD architecture. + Copyright (C) 2011-2014 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "rtl.h" +#include "tree.h" +#include "stor-layout.h" +#include "stringpool.h" +#include "calls.h" +#include "expr.h" +#include "tm_p.h" +#include "recog.h" +#include "langhooks.h" +#include "diagnostic-core.h" +#include "optabs.h" +#include "pointer-set.h" +#include "hash-table.h" +#include "vec.h" +#include "ggc.h" +#include "basic-block.h" +#include "tree-ssa-alias.h" +#include "internal-fn.h" +#include "gimple-fold.h" +#include "tree-eh.h" +#include "gimple-expr.h" +#include "is-a.h" +#include "gimple.h" +#include "gimple-iterator.h" + +enum aarch64_simd_builtin_type_mode +{ + T_V8QI, + T_V4HI, + T_V2SI, + T_V2SF, + T_DI, + T_DF, + T_V16QI, + T_V8HI, + T_V4SI, + T_V4SF, + T_V2DI, + T_V2DF, + T_TI, + T_EI, + T_OI, + T_XI, + T_SI, + T_SF, + T_HI, + T_QI, + T_MAX +}; + +#define v8qi_UP T_V8QI +#define v4hi_UP T_V4HI +#define v2si_UP T_V2SI +#define v2sf_UP T_V2SF +#define di_UP T_DI +#define df_UP T_DF +#define v16qi_UP T_V16QI +#define v8hi_UP T_V8HI +#define v4si_UP T_V4SI +#define v4sf_UP T_V4SF +#define v2di_UP T_V2DI +#define v2df_UP T_V2DF +#define ti_UP T_TI +#define ei_UP T_EI +#define oi_UP T_OI +#define xi_UP T_XI +#define si_UP T_SI +#define sf_UP T_SF +#define hi_UP T_HI +#define qi_UP T_QI + +#define UP(X) X##_UP + +#define SIMD_MAX_BUILTIN_ARGS 5 + +enum aarch64_type_qualifiers +{ + /* T foo. */ + qualifier_none = 0x0, + /* unsigned T foo. */ + qualifier_unsigned = 0x1, /* 1 << 0 */ + /* const T foo. */ + qualifier_const = 0x2, /* 1 << 1 */ + /* T *foo. */ + qualifier_pointer = 0x4, /* 1 << 2 */ + /* const T *foo. */ + qualifier_const_pointer = 0x6, /* qualifier_const | qualifier_pointer */ + /* Used when expanding arguments if an operand could + be an immediate. */ + qualifier_immediate = 0x8, /* 1 << 3 */ + qualifier_maybe_immediate = 0x10, /* 1 << 4 */ + /* void foo (...). */ + qualifier_void = 0x20, /* 1 << 5 */ + /* Some patterns may have internal operands, this qualifier is an + instruction to the initialisation code to skip this operand. */ + qualifier_internal = 0x40, /* 1 << 6 */ + /* Some builtins should use the T_*mode* encoded in a simd_builtin_datum + rather than using the type of the operand. */ + qualifier_map_mode = 0x80, /* 1 << 7 */ + /* qualifier_pointer | qualifier_map_mode */ + qualifier_pointer_map_mode = 0x84, + /* qualifier_const_pointer | qualifier_map_mode */ + qualifier_const_pointer_map_mode = 0x86, + /* Polynomial types. */ + qualifier_poly = 0x100 +}; + +typedef struct +{ + const char *name; + enum aarch64_simd_builtin_type_mode mode; + const enum insn_code code; + unsigned int fcode; + enum aarch64_type_qualifiers *qualifiers; +} aarch64_simd_builtin_datum; + +static enum aarch64_type_qualifiers +aarch64_types_unop_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_none, qualifier_none }; +#define TYPES_UNOP (aarch64_types_unop_qualifiers) +static enum aarch64_type_qualifiers +aarch64_types_unopu_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_unsigned, qualifier_unsigned }; +#define TYPES_UNOPU (aarch64_types_unopu_qualifiers) +#define TYPES_CREATE (aarch64_types_unop_qualifiers) +#define TYPES_REINTERP (aarch64_types_unop_qualifiers) +static enum aarch64_type_qualifiers +aarch64_types_binop_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_none, qualifier_none, qualifier_maybe_immediate }; +#define TYPES_BINOP (aarch64_types_binop_qualifiers) +static enum aarch64_type_qualifiers +aarch64_types_binopu_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_unsigned, qualifier_unsigned, qualifier_unsigned }; +#define TYPES_BINOPU (aarch64_types_binopu_qualifiers) +static enum aarch64_type_qualifiers +aarch64_types_binopp_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_poly, qualifier_poly, qualifier_poly }; +#define TYPES_BINOPP (aarch64_types_binopp_qualifiers) + +static enum aarch64_type_qualifiers +aarch64_types_ternop_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_none, qualifier_none, qualifier_none, qualifier_none }; +#define TYPES_TERNOP (aarch64_types_ternop_qualifiers) +static enum aarch64_type_qualifiers +aarch64_types_ternopu_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_unsigned, qualifier_unsigned, + qualifier_unsigned, qualifier_unsigned }; +#define TYPES_TERNOPU (aarch64_types_ternopu_qualifiers) + +static enum aarch64_type_qualifiers +aarch64_types_quadop_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_none, qualifier_none, qualifier_none, + qualifier_none, qualifier_none }; +#define TYPES_QUADOP (aarch64_types_quadop_qualifiers) + +static enum aarch64_type_qualifiers +aarch64_types_getlane_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_none, qualifier_none, qualifier_immediate }; +#define TYPES_GETLANE (aarch64_types_getlane_qualifiers) +#define TYPES_SHIFTIMM (aarch64_types_getlane_qualifiers) +static enum aarch64_type_qualifiers +aarch64_types_unsigned_shift_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_unsigned, qualifier_unsigned, qualifier_immediate }; +#define TYPES_USHIFTIMM (aarch64_types_unsigned_shift_qualifiers) +static enum aarch64_type_qualifiers +aarch64_types_setlane_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_none, qualifier_none, qualifier_none, qualifier_immediate }; +#define TYPES_SETLANE (aarch64_types_setlane_qualifiers) +#define TYPES_SHIFTINSERT (aarch64_types_setlane_qualifiers) +#define TYPES_SHIFTACC (aarch64_types_setlane_qualifiers) + +static enum aarch64_type_qualifiers +aarch64_types_combine_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_none, qualifier_none, qualifier_none }; +#define TYPES_COMBINE (aarch64_types_combine_qualifiers) + +static enum aarch64_type_qualifiers +aarch64_types_load1_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_none, qualifier_const_pointer_map_mode }; +#define TYPES_LOAD1 (aarch64_types_load1_qualifiers) +#define TYPES_LOADSTRUCT (aarch64_types_load1_qualifiers) + +static enum aarch64_type_qualifiers +aarch64_types_bsl_p_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_poly, qualifier_unsigned, + qualifier_poly, qualifier_poly }; +#define TYPES_BSL_P (aarch64_types_bsl_p_qualifiers) +static enum aarch64_type_qualifiers +aarch64_types_bsl_s_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_none, qualifier_unsigned, + qualifier_none, qualifier_none }; +#define TYPES_BSL_S (aarch64_types_bsl_s_qualifiers) +static enum aarch64_type_qualifiers +aarch64_types_bsl_u_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_unsigned, qualifier_unsigned, + qualifier_unsigned, qualifier_unsigned }; +#define TYPES_BSL_U (aarch64_types_bsl_u_qualifiers) + +/* The first argument (return type) of a store should be void type, + which we represent with qualifier_void. Their first operand will be + a DImode pointer to the location to store to, so we must use + qualifier_map_mode | qualifier_pointer to build a pointer to the + element type of the vector. */ +static enum aarch64_type_qualifiers +aarch64_types_store1_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_void, qualifier_pointer_map_mode, qualifier_none }; +#define TYPES_STORE1 (aarch64_types_store1_qualifiers) +#define TYPES_STORESTRUCT (aarch64_types_store1_qualifiers) + +#define CF0(N, X) CODE_FOR_aarch64_##N##X +#define CF1(N, X) CODE_FOR_##N##X##1 +#define CF2(N, X) CODE_FOR_##N##X##2 +#define CF3(N, X) CODE_FOR_##N##X##3 +#define CF4(N, X) CODE_FOR_##N##X##4 +#define CF10(N, X) CODE_FOR_##N##X + +#define VAR1(T, N, MAP, A) \ + {#N, UP (A), CF##MAP (N, A), 0, TYPES_##T}, +#define VAR2(T, N, MAP, A, B) \ + VAR1 (T, N, MAP, A) \ + VAR1 (T, N, MAP, B) +#define VAR3(T, N, MAP, A, B, C) \ + VAR2 (T, N, MAP, A, B) \ + VAR1 (T, N, MAP, C) +#define VAR4(T, N, MAP, A, B, C, D) \ + VAR3 (T, N, MAP, A, B, C) \ + VAR1 (T, N, MAP, D) +#define VAR5(T, N, MAP, A, B, C, D, E) \ + VAR4 (T, N, MAP, A, B, C, D) \ + VAR1 (T, N, MAP, E) +#define VAR6(T, N, MAP, A, B, C, D, E, F) \ + VAR5 (T, N, MAP, A, B, C, D, E) \ + VAR1 (T, N, MAP, F) +#define VAR7(T, N, MAP, A, B, C, D, E, F, G) \ + VAR6 (T, N, MAP, A, B, C, D, E, F) \ + VAR1 (T, N, MAP, G) +#define VAR8(T, N, MAP, A, B, C, D, E, F, G, H) \ + VAR7 (T, N, MAP, A, B, C, D, E, F, G) \ + VAR1 (T, N, MAP, H) +#define VAR9(T, N, MAP, A, B, C, D, E, F, G, H, I) \ + VAR8 (T, N, MAP, A, B, C, D, E, F, G, H) \ + VAR1 (T, N, MAP, I) +#define VAR10(T, N, MAP, A, B, C, D, E, F, G, H, I, J) \ + VAR9 (T, N, MAP, A, B, C, D, E, F, G, H, I) \ + VAR1 (T, N, MAP, J) +#define VAR11(T, N, MAP, A, B, C, D, E, F, G, H, I, J, K) \ + VAR10 (T, N, MAP, A, B, C, D, E, F, G, H, I, J) \ + VAR1 (T, N, MAP, K) +#define VAR12(T, N, MAP, A, B, C, D, E, F, G, H, I, J, K, L) \ + VAR11 (T, N, MAP, A, B, C, D, E, F, G, H, I, J, K) \ + VAR1 (T, N, MAP, L) + +/* BUILTIN_<ITERATOR> macros should expand to cover the same range of + modes as is given for each define_mode_iterator in + config/aarch64/iterators.md. */ + +#define BUILTIN_DX(T, N, MAP) \ + VAR2 (T, N, MAP, di, df) +#define BUILTIN_GPF(T, N, MAP) \ + VAR2 (T, N, MAP, sf, df) +#define BUILTIN_SDQ_I(T, N, MAP) \ + VAR4 (T, N, MAP, qi, hi, si, di) +#define BUILTIN_SD_HSI(T, N, MAP) \ + VAR2 (T, N, MAP, hi, si) +#define BUILTIN_V2F(T, N, MAP) \ + VAR2 (T, N, MAP, v2sf, v2df) +#define BUILTIN_VALL(T, N, MAP) \ + VAR10 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, \ + v4si, v2di, v2sf, v4sf, v2df) +#define BUILTIN_VALLDI(T, N, MAP) \ + VAR11 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, \ + v4si, v2di, v2sf, v4sf, v2df, di) +#define BUILTIN_VALLDIF(T, N, MAP) \ + VAR12 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, \ + v4si, v2di, v2sf, v4sf, v2df, di, df) +#define BUILTIN_VB(T, N, MAP) \ + VAR2 (T, N, MAP, v8qi, v16qi) +#define BUILTIN_VD(T, N, MAP) \ + VAR4 (T, N, MAP, v8qi, v4hi, v2si, v2sf) +#define BUILTIN_VDC(T, N, MAP) \ + VAR6 (T, N, MAP, v8qi, v4hi, v2si, v2sf, di, df) +#define BUILTIN_VDIC(T, N, MAP) \ + VAR3 (T, N, MAP, v8qi, v4hi, v2si) +#define BUILTIN_VDN(T, N, MAP) \ + VAR3 (T, N, MAP, v4hi, v2si, di) +#define BUILTIN_VDQ(T, N, MAP) \ + VAR7 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di) +#define BUILTIN_VDQF(T, N, MAP) \ + VAR3 (T, N, MAP, v2sf, v4sf, v2df) +#define BUILTIN_VDQH(T, N, MAP) \ + VAR2 (T, N, MAP, v4hi, v8hi) +#define BUILTIN_VDQHS(T, N, MAP) \ + VAR4 (T, N, MAP, v4hi, v8hi, v2si, v4si) +#define BUILTIN_VDQIF(T, N, MAP) \ + VAR9 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2sf, v4sf, v2df) +#define BUILTIN_VDQM(T, N, MAP) \ + VAR6 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si) +#define BUILTIN_VDQV(T, N, MAP) \ + VAR5 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v4si) +#define BUILTIN_VDQQH(T, N, MAP) \ + VAR4 (T, N, MAP, v8qi, v16qi, v4hi, v8hi) +#define BUILTIN_VDQ_BHSI(T, N, MAP) \ + VAR6 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si) +#define BUILTIN_VDQ_I(T, N, MAP) \ + VAR7 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di) +#define BUILTIN_VDW(T, N, MAP) \ + VAR3 (T, N, MAP, v8qi, v4hi, v2si) +#define BUILTIN_VD_BHSI(T, N, MAP) \ + VAR3 (T, N, MAP, v8qi, v4hi, v2si) +#define BUILTIN_VD_HSI(T, N, MAP) \ + VAR2 (T, N, MAP, v4hi, v2si) +#define BUILTIN_VD_RE(T, N, MAP) \ + VAR6 (T, N, MAP, v8qi, v4hi, v2si, v2sf, di, df) +#define BUILTIN_VQ(T, N, MAP) \ + VAR6 (T, N, MAP, v16qi, v8hi, v4si, v2di, v4sf, v2df) +#define BUILTIN_VQN(T, N, MAP) \ + VAR3 (T, N, MAP, v8hi, v4si, v2di) +#define BUILTIN_VQW(T, N, MAP) \ + VAR3 (T, N, MAP, v16qi, v8hi, v4si) +#define BUILTIN_VQ_HSI(T, N, MAP) \ + VAR2 (T, N, MAP, v8hi, v4si) +#define BUILTIN_VQ_S(T, N, MAP) \ + VAR6 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si) +#define BUILTIN_VSDQ_HSI(T, N, MAP) \ + VAR6 (T, N, MAP, v4hi, v8hi, v2si, v4si, hi, si) +#define BUILTIN_VSDQ_I(T, N, MAP) \ + VAR11 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di, qi, hi, si, di) +#define BUILTIN_VSDQ_I_BHSI(T, N, MAP) \ + VAR10 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di, qi, hi, si) +#define BUILTIN_VSDQ_I_DI(T, N, MAP) \ + VAR8 (T, N, MAP, v8qi, v16qi, v4hi, v8hi, v2si, v4si, v2di, di) +#define BUILTIN_VSD_HSI(T, N, MAP) \ + VAR4 (T, N, MAP, v4hi, v2si, hi, si) +#define BUILTIN_VSQN_HSDI(T, N, MAP) \ + VAR6 (T, N, MAP, v8hi, v4si, v2di, hi, si, di) +#define BUILTIN_VSTRUCT(T, N, MAP) \ + VAR3 (T, N, MAP, oi, ci, xi) + +static aarch64_simd_builtin_datum aarch64_simd_builtin_data[] = { +#include "aarch64-simd-builtins.def" +}; + +#undef VAR1 +#define VAR1(T, N, MAP, A) \ + AARCH64_SIMD_BUILTIN_##T##_##N##A, + +enum aarch64_builtins +{ + AARCH64_BUILTIN_MIN, + AARCH64_SIMD_BUILTIN_BASE, +#include "aarch64-simd-builtins.def" + AARCH64_SIMD_BUILTIN_MAX = AARCH64_SIMD_BUILTIN_BASE + + ARRAY_SIZE (aarch64_simd_builtin_data), + AARCH64_BUILTIN_MAX +}; + +static GTY(()) tree aarch64_builtin_decls[AARCH64_BUILTIN_MAX]; + +#define NUM_DREG_TYPES 6 +#define NUM_QREG_TYPES 6 + +/* Return a tree for a signed or unsigned argument of either + the mode specified by MODE, or the inner mode of MODE. */ +tree +aarch64_build_scalar_type (enum machine_mode mode, + bool unsigned_p, + bool poly_p) +{ +#undef INT_TYPES +#define INT_TYPES \ + AARCH64_TYPE_BUILDER (QI) \ + AARCH64_TYPE_BUILDER (HI) \ + AARCH64_TYPE_BUILDER (SI) \ + AARCH64_TYPE_BUILDER (DI) \ + AARCH64_TYPE_BUILDER (EI) \ + AARCH64_TYPE_BUILDER (OI) \ + AARCH64_TYPE_BUILDER (CI) \ + AARCH64_TYPE_BUILDER (XI) \ + AARCH64_TYPE_BUILDER (TI) \ + +/* Statically declare all the possible types we might need. */ +#undef AARCH64_TYPE_BUILDER +#define AARCH64_TYPE_BUILDER(X) \ + static tree X##_aarch64_type_node_p = NULL; \ + static tree X##_aarch64_type_node_s = NULL; \ + static tree X##_aarch64_type_node_u = NULL; + + INT_TYPES + + static tree float_aarch64_type_node = NULL; + static tree double_aarch64_type_node = NULL; + + gcc_assert (!VECTOR_MODE_P (mode)); + +/* If we've already initialised this type, don't initialise it again, + otherwise ask for a new type of the correct size. */ +#undef AARCH64_TYPE_BUILDER +#define AARCH64_TYPE_BUILDER(X) \ + case X##mode: \ + if (unsigned_p) \ + return (X##_aarch64_type_node_u \ + ? X##_aarch64_type_node_u \ + : X##_aarch64_type_node_u \ + = make_unsigned_type (GET_MODE_PRECISION (mode))); \ + else if (poly_p) \ + return (X##_aarch64_type_node_p \ + ? X##_aarch64_type_node_p \ + : X##_aarch64_type_node_p \ + = make_unsigned_type (GET_MODE_PRECISION (mode))); \ + else \ + return (X##_aarch64_type_node_s \ + ? X##_aarch64_type_node_s \ + : X##_aarch64_type_node_s \ + = make_signed_type (GET_MODE_PRECISION (mode))); \ + break; + + switch (mode) + { + INT_TYPES + case SFmode: + if (!float_aarch64_type_node) + { + float_aarch64_type_node = make_node (REAL_TYPE); + TYPE_PRECISION (float_aarch64_type_node) = FLOAT_TYPE_SIZE; + layout_type (float_aarch64_type_node); + } + return float_aarch64_type_node; + break; + case DFmode: + if (!double_aarch64_type_node) + { + double_aarch64_type_node = make_node (REAL_TYPE); + TYPE_PRECISION (double_aarch64_type_node) = DOUBLE_TYPE_SIZE; + layout_type (double_aarch64_type_node); + } + return double_aarch64_type_node; + break; + default: + gcc_unreachable (); + } +} + +tree +aarch64_build_vector_type (enum machine_mode mode, + bool unsigned_p, + bool poly_p) +{ + tree eltype; + +#define VECTOR_TYPES \ + AARCH64_TYPE_BUILDER (V16QI) \ + AARCH64_TYPE_BUILDER (V8HI) \ + AARCH64_TYPE_BUILDER (V4SI) \ + AARCH64_TYPE_BUILDER (V2DI) \ + AARCH64_TYPE_BUILDER (V8QI) \ + AARCH64_TYPE_BUILDER (V4HI) \ + AARCH64_TYPE_BUILDER (V2SI) \ + \ + AARCH64_TYPE_BUILDER (V4SF) \ + AARCH64_TYPE_BUILDER (V2DF) \ + AARCH64_TYPE_BUILDER (V2SF) \ +/* Declare our "cache" of values. */ +#undef AARCH64_TYPE_BUILDER +#define AARCH64_TYPE_BUILDER(X) \ + static tree X##_aarch64_type_node_s = NULL; \ + static tree X##_aarch64_type_node_u = NULL; \ + static tree X##_aarch64_type_node_p = NULL; + + VECTOR_TYPES + + gcc_assert (VECTOR_MODE_P (mode)); + +#undef AARCH64_TYPE_BUILDER +#define AARCH64_TYPE_BUILDER(X) \ + case X##mode: \ + if (unsigned_p) \ + return X##_aarch64_type_node_u \ + ? X##_aarch64_type_node_u \ + : X##_aarch64_type_node_u \ + = build_vector_type_for_mode (aarch64_build_scalar_type \ + (GET_MODE_INNER (mode), \ + unsigned_p, poly_p), mode); \ + else if (poly_p) \ + return X##_aarch64_type_node_p \ + ? X##_aarch64_type_node_p \ + : X##_aarch64_type_node_p \ + = build_vector_type_for_mode (aarch64_build_scalar_type \ + (GET_MODE_INNER (mode), \ + unsigned_p, poly_p), mode); \ + else \ + return X##_aarch64_type_node_s \ + ? X##_aarch64_type_node_s \ + : X##_aarch64_type_node_s \ + = build_vector_type_for_mode (aarch64_build_scalar_type \ + (GET_MODE_INNER (mode), \ + unsigned_p, poly_p), mode); \ + break; + + switch (mode) + { + default: + eltype = aarch64_build_scalar_type (GET_MODE_INNER (mode), + unsigned_p, poly_p); + return build_vector_type_for_mode (eltype, mode); + break; + VECTOR_TYPES + } +} + +tree +aarch64_build_type (enum machine_mode mode, bool unsigned_p, bool poly_p) +{ + if (VECTOR_MODE_P (mode)) + return aarch64_build_vector_type (mode, unsigned_p, poly_p); + else + return aarch64_build_scalar_type (mode, unsigned_p, poly_p); +} + +tree +aarch64_build_signed_type (enum machine_mode mode) +{ + return aarch64_build_type (mode, false, false); +} + +tree +aarch64_build_unsigned_type (enum machine_mode mode) +{ + return aarch64_build_type (mode, true, false); +} + +tree +aarch64_build_poly_type (enum machine_mode mode) +{ + return aarch64_build_type (mode, false, true); +} + +static void +aarch64_init_simd_builtins (void) +{ + unsigned int i, fcode = AARCH64_SIMD_BUILTIN_BASE + 1; + + /* Signed scalar type nodes. */ + tree aarch64_simd_intQI_type_node = aarch64_build_signed_type (QImode); + tree aarch64_simd_intHI_type_node = aarch64_build_signed_type (HImode); + tree aarch64_simd_intSI_type_node = aarch64_build_signed_type (SImode); + tree aarch64_simd_intDI_type_node = aarch64_build_signed_type (DImode); + tree aarch64_simd_intTI_type_node = aarch64_build_signed_type (TImode); + tree aarch64_simd_intEI_type_node = aarch64_build_signed_type (EImode); + tree aarch64_simd_intOI_type_node = aarch64_build_signed_type (OImode); + tree aarch64_simd_intCI_type_node = aarch64_build_signed_type (CImode); + tree aarch64_simd_intXI_type_node = aarch64_build_signed_type (XImode); + + /* Unsigned scalar type nodes. */ + tree aarch64_simd_intUQI_type_node = aarch64_build_unsigned_type (QImode); + tree aarch64_simd_intUHI_type_node = aarch64_build_unsigned_type (HImode); + tree aarch64_simd_intUSI_type_node = aarch64_build_unsigned_type (SImode); + tree aarch64_simd_intUDI_type_node = aarch64_build_unsigned_type (DImode); + + /* Poly scalar type nodes. */ + tree aarch64_simd_polyQI_type_node = aarch64_build_poly_type (QImode); + tree aarch64_simd_polyHI_type_node = aarch64_build_poly_type (HImode); + tree aarch64_simd_polyDI_type_node = aarch64_build_poly_type (DImode); + tree aarch64_simd_polyTI_type_node = aarch64_build_poly_type (TImode); + + /* Float type nodes. */ + tree aarch64_simd_float_type_node = aarch64_build_signed_type (SFmode); + tree aarch64_simd_double_type_node = aarch64_build_signed_type (DFmode); + + /* Define typedefs which exactly correspond to the modes we are basing vector + types on. If you change these names you'll need to change + the table used by aarch64_mangle_type too. */ + (*lang_hooks.types.register_builtin_type) (aarch64_simd_intQI_type_node, + "__builtin_aarch64_simd_qi"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_intHI_type_node, + "__builtin_aarch64_simd_hi"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_intSI_type_node, + "__builtin_aarch64_simd_si"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_float_type_node, + "__builtin_aarch64_simd_sf"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_intDI_type_node, + "__builtin_aarch64_simd_di"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_double_type_node, + "__builtin_aarch64_simd_df"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyQI_type_node, + "__builtin_aarch64_simd_poly8"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyHI_type_node, + "__builtin_aarch64_simd_poly16"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyDI_type_node, + "__builtin_aarch64_simd_poly64"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_polyTI_type_node, + "__builtin_aarch64_simd_poly128"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_intTI_type_node, + "__builtin_aarch64_simd_ti"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_intEI_type_node, + "__builtin_aarch64_simd_ei"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_intOI_type_node, + "__builtin_aarch64_simd_oi"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_intCI_type_node, + "__builtin_aarch64_simd_ci"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_intXI_type_node, + "__builtin_aarch64_simd_xi"); + + /* Unsigned integer types for various mode sizes. */ + (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUQI_type_node, + "__builtin_aarch64_simd_uqi"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUHI_type_node, + "__builtin_aarch64_simd_uhi"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUSI_type_node, + "__builtin_aarch64_simd_usi"); + (*lang_hooks.types.register_builtin_type) (aarch64_simd_intUDI_type_node, + "__builtin_aarch64_simd_udi"); + + for (i = 0; i < ARRAY_SIZE (aarch64_simd_builtin_data); i++, fcode++) + { + bool print_type_signature_p = false; + char type_signature[SIMD_MAX_BUILTIN_ARGS] = { 0 }; + aarch64_simd_builtin_datum *d = &aarch64_simd_builtin_data[i]; + const char *const modenames[] = + { + "v8qi", "v4hi", "v2si", "v2sf", "di", "df", + "v16qi", "v8hi", "v4si", "v4sf", "v2di", "v2df", + "ti", "ei", "oi", "xi", "si", "sf", "hi", "qi" + }; + const enum machine_mode modes[] = + { + V8QImode, V4HImode, V2SImode, V2SFmode, DImode, DFmode, + V16QImode, V8HImode, V4SImode, V4SFmode, V2DImode, + V2DFmode, TImode, EImode, OImode, XImode, SImode, + SFmode, HImode, QImode + }; + char namebuf[60]; + tree ftype = NULL; + tree fndecl = NULL; + + gcc_assert (ARRAY_SIZE (modenames) == T_MAX); + + d->fcode = fcode; + + /* We must track two variables here. op_num is + the operand number as in the RTL pattern. This is + required to access the mode (e.g. V4SF mode) of the + argument, from which the base type can be derived. + arg_num is an index in to the qualifiers data, which + gives qualifiers to the type (e.g. const unsigned). + The reason these two variables may differ by one is the + void return type. While all return types take the 0th entry + in the qualifiers array, there is no operand for them in the + RTL pattern. */ + int op_num = insn_data[d->code].n_operands - 1; + int arg_num = d->qualifiers[0] & qualifier_void + ? op_num + 1 + : op_num; + tree return_type = void_type_node, args = void_list_node; + tree eltype; + + /* Build a function type directly from the insn_data for this + builtin. The build_function_type () function takes care of + removing duplicates for us. */ + for (; op_num >= 0; arg_num--, op_num--) + { + enum machine_mode op_mode = insn_data[d->code].operand[op_num].mode; + enum aarch64_type_qualifiers qualifiers = d->qualifiers[arg_num]; + + if (qualifiers & qualifier_unsigned) + { + type_signature[arg_num] = 'u'; + print_type_signature_p = true; + } + else if (qualifiers & qualifier_poly) + { + type_signature[arg_num] = 'p'; + print_type_signature_p = true; + } + else + type_signature[arg_num] = 's'; + + /* Skip an internal operand for vget_{low, high}. */ + if (qualifiers & qualifier_internal) + continue; + + /* Some builtins have different user-facing types + for certain arguments, encoded in d->mode. */ + if (qualifiers & qualifier_map_mode) + op_mode = modes[d->mode]; + + /* For pointers, we want a pointer to the basic type + of the vector. */ + if (qualifiers & qualifier_pointer && VECTOR_MODE_P (op_mode)) + op_mode = GET_MODE_INNER (op_mode); + + eltype = aarch64_build_type (op_mode, + qualifiers & qualifier_unsigned, + qualifiers & qualifier_poly); + + /* Add qualifiers. */ + if (qualifiers & qualifier_const) + eltype = build_qualified_type (eltype, TYPE_QUAL_CONST); + + if (qualifiers & qualifier_pointer) + eltype = build_pointer_type (eltype); + + /* If we have reached arg_num == 0, we are at a non-void + return type. Otherwise, we are still processing + arguments. */ + if (arg_num == 0) + return_type = eltype; + else + args = tree_cons (NULL_TREE, eltype, args); + } + + ftype = build_function_type (return_type, args); + + gcc_assert (ftype != NULL); + + if (print_type_signature_p) + snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s%s_%s", + d->name, modenames[d->mode], type_signature); + else + snprintf (namebuf, sizeof (namebuf), "__builtin_aarch64_%s%s", + d->name, modenames[d->mode]); + + fndecl = add_builtin_function (namebuf, ftype, fcode, BUILT_IN_MD, + NULL, NULL_TREE); + aarch64_builtin_decls[fcode] = fndecl; + } +} + +void +aarch64_init_builtins (void) +{ + if (TARGET_SIMD) + aarch64_init_simd_builtins (); +} + +tree +aarch64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED) +{ + if (code >= AARCH64_BUILTIN_MAX) + return error_mark_node; + + return aarch64_builtin_decls[code]; +} + +typedef enum +{ + SIMD_ARG_COPY_TO_REG, + SIMD_ARG_CONSTANT, + SIMD_ARG_STOP +} builtin_simd_arg; + +static rtx +aarch64_simd_expand_args (rtx target, int icode, int have_retval, + tree exp, ...) +{ + va_list ap; + rtx pat; + tree arg[SIMD_MAX_BUILTIN_ARGS]; + rtx op[SIMD_MAX_BUILTIN_ARGS]; + enum machine_mode tmode = insn_data[icode].operand[0].mode; + enum machine_mode mode[SIMD_MAX_BUILTIN_ARGS]; + int argc = 0; + + if (have_retval + && (!target + || GET_MODE (target) != tmode + || !(*insn_data[icode].operand[0].predicate) (target, tmode))) + target = gen_reg_rtx (tmode); + + va_start (ap, exp); + + for (;;) + { + builtin_simd_arg thisarg = (builtin_simd_arg) va_arg (ap, int); + + if (thisarg == SIMD_ARG_STOP) + break; + else + { + arg[argc] = CALL_EXPR_ARG (exp, argc); + op[argc] = expand_normal (arg[argc]); + mode[argc] = insn_data[icode].operand[argc + have_retval].mode; + + switch (thisarg) + { + case SIMD_ARG_COPY_TO_REG: + if (POINTER_TYPE_P (TREE_TYPE (arg[argc]))) + op[argc] = convert_memory_address (Pmode, op[argc]); + /*gcc_assert (GET_MODE (op[argc]) == mode[argc]); */ + if (!(*insn_data[icode].operand[argc + have_retval].predicate) + (op[argc], mode[argc])) + op[argc] = copy_to_mode_reg (mode[argc], op[argc]); + break; + + case SIMD_ARG_CONSTANT: + if (!(*insn_data[icode].operand[argc + have_retval].predicate) + (op[argc], mode[argc])) + error_at (EXPR_LOCATION (exp), "incompatible type for argument %d, " + "expected %<const int%>", argc + 1); + break; + + case SIMD_ARG_STOP: + gcc_unreachable (); + } + + argc++; + } + } + + va_end (ap); + + if (have_retval) + switch (argc) + { + case 1: + pat = GEN_FCN (icode) (target, op[0]); + break; + + case 2: + pat = GEN_FCN (icode) (target, op[0], op[1]); + break; + + case 3: + pat = GEN_FCN (icode) (target, op[0], op[1], op[2]); + break; + + case 4: + pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]); + break; + + case 5: + pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]); + break; + + default: + gcc_unreachable (); + } + else + switch (argc) + { + case 1: + pat = GEN_FCN (icode) (op[0]); + break; + + case 2: + pat = GEN_FCN (icode) (op[0], op[1]); + break; + + case 3: + pat = GEN_FCN (icode) (op[0], op[1], op[2]); + break; + + case 4: + pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]); + break; + + case 5: + pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]); + break; + + default: + gcc_unreachable (); + } + + if (!pat) + return 0; + + emit_insn (pat); + + return target; +} + +/* Expand an AArch64 AdvSIMD builtin(intrinsic). */ +rtx +aarch64_simd_expand_builtin (int fcode, tree exp, rtx target) +{ + aarch64_simd_builtin_datum *d = + &aarch64_simd_builtin_data[fcode - (AARCH64_SIMD_BUILTIN_BASE + 1)]; + enum insn_code icode = d->code; + builtin_simd_arg args[SIMD_MAX_BUILTIN_ARGS]; + int num_args = insn_data[d->code].n_operands; + int is_void = 0; + int k; + + is_void = !!(d->qualifiers[0] & qualifier_void); + + num_args += is_void; + + for (k = 1; k < num_args; k++) + { + /* We have four arrays of data, each indexed in a different fashion. + qualifiers - element 0 always describes the function return type. + operands - element 0 is either the operand for return value (if + the function has a non-void return type) or the operand for the + first argument. + expr_args - element 0 always holds the first argument. + args - element 0 is always used for the return type. */ + int qualifiers_k = k; + int operands_k = k - is_void; + int expr_args_k = k - 1; + + if (d->qualifiers[qualifiers_k] & qualifier_immediate) + args[k] = SIMD_ARG_CONSTANT; + else if (d->qualifiers[qualifiers_k] & qualifier_maybe_immediate) + { + rtx arg + = expand_normal (CALL_EXPR_ARG (exp, + (expr_args_k))); + /* Handle constants only if the predicate allows it. */ + bool op_const_int_p = + (CONST_INT_P (arg) + && (*insn_data[icode].operand[operands_k].predicate) + (arg, insn_data[icode].operand[operands_k].mode)); + args[k] = op_const_int_p ? SIMD_ARG_CONSTANT : SIMD_ARG_COPY_TO_REG; + } + else + args[k] = SIMD_ARG_COPY_TO_REG; + + } + args[k] = SIMD_ARG_STOP; + + /* The interface to aarch64_simd_expand_args expects a 0 if + the function is void, and a 1 if it is not. */ + return aarch64_simd_expand_args + (target, icode, !is_void, exp, + args[1], + args[2], + args[3], + args[4], + SIMD_ARG_STOP); +} + +/* Expand an expression EXP that calls a built-in function, + with result going to TARGET if that's convenient. */ +rtx +aarch64_expand_builtin (tree exp, + rtx target, + rtx subtarget ATTRIBUTE_UNUSED, + enum machine_mode mode ATTRIBUTE_UNUSED, + int ignore ATTRIBUTE_UNUSED) +{ + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); + int fcode = DECL_FUNCTION_CODE (fndecl); + + if (fcode >= AARCH64_SIMD_BUILTIN_BASE) + return aarch64_simd_expand_builtin (fcode, exp, target); + + return NULL_RTX; +} + +tree +aarch64_builtin_vectorized_function (tree fndecl, tree type_out, tree type_in) +{ + enum machine_mode in_mode, out_mode; + int in_n, out_n; + + if (TREE_CODE (type_out) != VECTOR_TYPE + || TREE_CODE (type_in) != VECTOR_TYPE) + return NULL_TREE; + + out_mode = TYPE_MODE (TREE_TYPE (type_out)); + out_n = TYPE_VECTOR_SUBPARTS (type_out); + in_mode = TYPE_MODE (TREE_TYPE (type_in)); + in_n = TYPE_VECTOR_SUBPARTS (type_in); + +#undef AARCH64_CHECK_BUILTIN_MODE +#define AARCH64_CHECK_BUILTIN_MODE(C, N) 1 +#define AARCH64_FIND_FRINT_VARIANT(N) \ + (AARCH64_CHECK_BUILTIN_MODE (2, D) \ + ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_UNOP_##N##v2df] \ + : (AARCH64_CHECK_BUILTIN_MODE (4, S) \ + ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_UNOP_##N##v4sf] \ + : (AARCH64_CHECK_BUILTIN_MODE (2, S) \ + ? aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_UNOP_##N##v2sf] \ + : NULL_TREE))) + if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) + { + enum built_in_function fn = DECL_FUNCTION_CODE (fndecl); + switch (fn) + { +#undef AARCH64_CHECK_BUILTIN_MODE +#define AARCH64_CHECK_BUILTIN_MODE(C, N) \ + (out_mode == N##Fmode && out_n == C \ + && in_mode == N##Fmode && in_n == C) + case BUILT_IN_FLOOR: + case BUILT_IN_FLOORF: + return AARCH64_FIND_FRINT_VARIANT (floor); + case BUILT_IN_CEIL: + case BUILT_IN_CEILF: + return AARCH64_FIND_FRINT_VARIANT (ceil); + case BUILT_IN_TRUNC: + case BUILT_IN_TRUNCF: + return AARCH64_FIND_FRINT_VARIANT (btrunc); + case BUILT_IN_ROUND: + case BUILT_IN_ROUNDF: + return AARCH64_FIND_FRINT_VARIANT (round); + case BUILT_IN_NEARBYINT: + case BUILT_IN_NEARBYINTF: + return AARCH64_FIND_FRINT_VARIANT (nearbyint); + case BUILT_IN_SQRT: + case BUILT_IN_SQRTF: + return AARCH64_FIND_FRINT_VARIANT (sqrt); +#undef AARCH64_CHECK_BUILTIN_MODE +#define AARCH64_CHECK_BUILTIN_MODE(C, N) \ + (out_mode == SImode && out_n == C \ + && in_mode == N##Imode && in_n == C) + case BUILT_IN_CLZ: + { + if (AARCH64_CHECK_BUILTIN_MODE (4, S)) + return aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_UNOP_clzv4si]; + return NULL_TREE; + } +#undef AARCH64_CHECK_BUILTIN_MODE +#define AARCH64_CHECK_BUILTIN_MODE(C, N) \ + (out_mode == N##Imode && out_n == C \ + && in_mode == N##Fmode && in_n == C) + case BUILT_IN_LFLOOR: + case BUILT_IN_LFLOORF: + case BUILT_IN_LLFLOOR: + case BUILT_IN_IFLOORF: + { + enum aarch64_builtins builtin; + if (AARCH64_CHECK_BUILTIN_MODE (2, D)) + builtin = AARCH64_SIMD_BUILTIN_UNOP_lfloorv2dfv2di; + else if (AARCH64_CHECK_BUILTIN_MODE (4, S)) + builtin = AARCH64_SIMD_BUILTIN_UNOP_lfloorv4sfv4si; + else if (AARCH64_CHECK_BUILTIN_MODE (2, S)) + builtin = AARCH64_SIMD_BUILTIN_UNOP_lfloorv2sfv2si; + else + return NULL_TREE; + + return aarch64_builtin_decls[builtin]; + } + case BUILT_IN_LCEIL: + case BUILT_IN_LCEILF: + case BUILT_IN_LLCEIL: + case BUILT_IN_ICEILF: + { + enum aarch64_builtins builtin; + if (AARCH64_CHECK_BUILTIN_MODE (2, D)) + builtin = AARCH64_SIMD_BUILTIN_UNOP_lceilv2dfv2di; + else if (AARCH64_CHECK_BUILTIN_MODE (4, S)) + builtin = AARCH64_SIMD_BUILTIN_UNOP_lceilv4sfv4si; + else if (AARCH64_CHECK_BUILTIN_MODE (2, S)) + builtin = AARCH64_SIMD_BUILTIN_UNOP_lceilv2sfv2si; + else + return NULL_TREE; + + return aarch64_builtin_decls[builtin]; + } + case BUILT_IN_LROUND: + case BUILT_IN_IROUNDF: + { + enum aarch64_builtins builtin; + if (AARCH64_CHECK_BUILTIN_MODE (2, D)) + builtin = AARCH64_SIMD_BUILTIN_UNOP_lroundv2dfv2di; + else if (AARCH64_CHECK_BUILTIN_MODE (4, S)) + builtin = AARCH64_SIMD_BUILTIN_UNOP_lroundv4sfv4si; + else if (AARCH64_CHECK_BUILTIN_MODE (2, S)) + builtin = AARCH64_SIMD_BUILTIN_UNOP_lroundv2sfv2si; + else + return NULL_TREE; + + return aarch64_builtin_decls[builtin]; + } + + default: + return NULL_TREE; + } + } + + return NULL_TREE; +} + +#undef VAR1 +#define VAR1(T, N, MAP, A) \ + case AARCH64_SIMD_BUILTIN_##T##_##N##A: + +tree +aarch64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args, + bool ignore ATTRIBUTE_UNUSED) +{ + int fcode = DECL_FUNCTION_CODE (fndecl); + tree type = TREE_TYPE (TREE_TYPE (fndecl)); + + switch (fcode) + { + BUILTIN_VALLDI (UNOP, abs, 2) + return fold_build1 (ABS_EXPR, type, args[0]); + break; + BUILTIN_VALLDI (BINOP, cmge, 0) + return fold_build2 (GE_EXPR, type, args[0], args[1]); + break; + BUILTIN_VALLDI (BINOP, cmgt, 0) + return fold_build2 (GT_EXPR, type, args[0], args[1]); + break; + BUILTIN_VALLDI (BINOP, cmeq, 0) + return fold_build2 (EQ_EXPR, type, args[0], args[1]); + break; + BUILTIN_VSDQ_I_DI (BINOP, cmtst, 0) + { + tree and_node = fold_build2 (BIT_AND_EXPR, type, args[0], args[1]); + tree vec_zero_node = build_zero_cst (type); + return fold_build2 (NE_EXPR, type, and_node, vec_zero_node); + break; + } + VAR1 (UNOP, floatv2si, 2, v2sf) + VAR1 (UNOP, floatv4si, 2, v4sf) + VAR1 (UNOP, floatv2di, 2, v2df) + return fold_build1 (FLOAT_EXPR, type, args[0]); + default: + break; + } + + return NULL_TREE; +} + +bool +aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi) +{ + bool changed = false; + gimple stmt = gsi_stmt (*gsi); + tree call = gimple_call_fn (stmt); + tree fndecl; + gimple new_stmt = NULL; + if (call) + { + fndecl = gimple_call_fndecl (stmt); + if (fndecl) + { + int fcode = DECL_FUNCTION_CODE (fndecl); + int nargs = gimple_call_num_args (stmt); + tree *args = (nargs > 0 + ? gimple_call_arg_ptr (stmt, 0) + : &error_mark_node); + + switch (fcode) + { + BUILTIN_VALL (UNOP, reduc_splus_, 10) + new_stmt = gimple_build_assign_with_ops ( + REDUC_PLUS_EXPR, + gimple_call_lhs (stmt), + args[0], + NULL_TREE); + break; + BUILTIN_VDQIF (UNOP, reduc_smax_, 10) + new_stmt = gimple_build_assign_with_ops ( + REDUC_MAX_EXPR, + gimple_call_lhs (stmt), + args[0], + NULL_TREE); + break; + BUILTIN_VDQIF (UNOP, reduc_smin_, 10) + new_stmt = gimple_build_assign_with_ops ( + REDUC_MIN_EXPR, + gimple_call_lhs (stmt), + args[0], + NULL_TREE); + break; + + default: + break; + } + } + } + + if (new_stmt) + { + gsi_replace (gsi, new_stmt, true); + changed = true; + } + + return changed; +} + +#undef AARCH64_CHECK_BUILTIN_MODE +#undef AARCH64_FIND_FRINT_VARIANT +#undef BUILTIN_DX +#undef BUILTIN_SDQ_I +#undef BUILTIN_SD_HSI +#undef BUILTIN_V2F +#undef BUILTIN_VALL +#undef BUILTIN_VB +#undef BUILTIN_VD +#undef BUILTIN_VDC +#undef BUILTIN_VDIC +#undef BUILTIN_VDN +#undef BUILTIN_VDQ +#undef BUILTIN_VDQF +#undef BUILTIN_VDQH +#undef BUILTIN_VDQHS +#undef BUILTIN_VDQIF +#undef BUILTIN_VDQM +#undef BUILTIN_VDQV +#undef BUILTIN_VDQ_BHSI +#undef BUILTIN_VDQ_I +#undef BUILTIN_VDW +#undef BUILTIN_VD_BHSI +#undef BUILTIN_VD_HSI +#undef BUILTIN_VD_RE +#undef BUILTIN_VQ +#undef BUILTIN_VQN +#undef BUILTIN_VQW +#undef BUILTIN_VQ_HSI +#undef BUILTIN_VQ_S +#undef BUILTIN_VSDQ_HSI +#undef BUILTIN_VSDQ_I +#undef BUILTIN_VSDQ_I_BHSI +#undef BUILTIN_VSDQ_I_DI +#undef BUILTIN_VSD_HSI +#undef BUILTIN_VSQN_HSDI +#undef BUILTIN_VSTRUCT +#undef CF0 +#undef CF1 +#undef CF2 +#undef CF3 +#undef CF4 +#undef CF10 +#undef VAR1 +#undef VAR2 +#undef VAR3 +#undef VAR4 +#undef VAR5 +#undef VAR6 +#undef VAR7 +#undef VAR8 +#undef VAR9 +#undef VAR10 +#undef VAR11 + diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-cores.def b/gcc-4.9/gcc/config/aarch64/aarch64-cores.def new file mode 100644 index 000000000..9319249e6 --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64-cores.def @@ -0,0 +1,42 @@ +/* Copyright (C) 2011-2014 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + +/* This is a list of cores that implement AArch64. + + Before using #include to read this file, define a macro: + + AARCH64_CORE(CORE_NAME, CORE_IDENT, SCHEDULER_IDENT, ARCH, FLAGS, COSTS) + + The CORE_NAME is the name of the core, represented as a string constant. + The CORE_IDENT is the name of the core, represented as an identifier. + The SCHEDULER_IDENT is the name of the core for which scheduling decisions + will be made, represented as an identifier. + ARCH is the architecture revision implemented by the chip. + FLAGS are the bitwise-or of the traits that apply to that core. + This need not include flags implied by the architecture. + COSTS is the name of the rtx_costs routine to use. */ + +/* V8 Architecture Processors. */ + +AARCH64_CORE("cortex-a53", cortexa53, cortexa53, 8, AARCH64_FL_FPSIMD | AARCH64_FL_CRC | AARCH64_FL_CRYPTO, cortexa53) +AARCH64_CORE("cortex-a57", cortexa15, cortexa15, 8, AARCH64_FL_FPSIMD | AARCH64_FL_CRC | AARCH64_FL_CRYPTO, cortexa57) + +/* V8 big.LITTLE implementations. */ + +AARCH64_CORE("cortex-a57.cortex-a53", cortexa57cortexa53, cortexa53, 8, AARCH64_FL_FPSIMD | AARCH64_FL_CRC | AARCH64_FL_CRYPTO, cortexa57) diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-elf-raw.h b/gcc-4.9/gcc/config/aarch64/aarch64-elf-raw.h new file mode 100644 index 000000000..adec7e7ba --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64-elf-raw.h @@ -0,0 +1,33 @@ +/* Machine description for AArch64 architecture. + Copyright (C) 2009-2014 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + +/* Support for bare-metal builds. */ +#ifndef GCC_AARCH64_ELF_RAW_H +#define GCC_AARCH64_ELF_RAW_H + +#define STARTFILE_SPEC " crti%O%s crtbegin%O%s crt0%O%s" +#define ENDFILE_SPEC " crtend%O%s crtn%O%s" + +#ifndef LINK_SPEC +#define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} -X \ + -maarch64elf%{mabi=ilp32*:32}%{mbig-endian:b}" +#endif + +#endif /* GCC_AARCH64_ELF_RAW_H */ diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-elf.h b/gcc-4.9/gcc/config/aarch64/aarch64-elf.h new file mode 100644 index 000000000..15ab630de --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64-elf.h @@ -0,0 +1,161 @@ +/* Machine description for AArch64 architecture. + Copyright (C) 2009-2014 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + +#ifndef GCC_AARCH64_ELF_H +#define GCC_AARCH64_ELF_H + + +#define ASM_OUTPUT_LABELREF(FILE, NAME) \ + aarch64_asm_output_labelref (FILE, NAME) + +#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \ + do \ + { \ + assemble_name (FILE, NAME1); \ + fputs (" = ", FILE); \ + assemble_name (FILE, NAME2); \ + fputc ('\n', FILE); \ + } while (0) + +#define TEXT_SECTION_ASM_OP "\t.text" +#define DATA_SECTION_ASM_OP "\t.data" +#define BSS_SECTION_ASM_OP "\t.bss" + +#define CTORS_SECTION_ASM_OP "\t.section\t.init_array,\"aw\",%init_array" +#define DTORS_SECTION_ASM_OP "\t.section\t.fini_array,\"aw\",%fini_array" + +#undef INIT_SECTION_ASM_OP +#undef FINI_SECTION_ASM_OP +#define INIT_ARRAY_SECTION_ASM_OP CTORS_SECTION_ASM_OP +#define FINI_ARRAY_SECTION_ASM_OP DTORS_SECTION_ASM_OP + +/* Since we use .init_array/.fini_array we don't need the markers at + the start and end of the ctors/dtors arrays. */ +#define CTOR_LIST_BEGIN asm (CTORS_SECTION_ASM_OP) +#define CTOR_LIST_END /* empty */ +#define DTOR_LIST_BEGIN asm (DTORS_SECTION_ASM_OP) +#define DTOR_LIST_END /* empty */ + +#undef TARGET_ASM_CONSTRUCTOR +#define TARGET_ASM_CONSTRUCTOR aarch64_elf_asm_constructor + +#undef TARGET_ASM_DESTRUCTOR +#define TARGET_ASM_DESTRUCTOR aarch64_elf_asm_destructor + +#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN +/* Support for -falign-* switches. Use .p2align to ensure that code + sections are padded with NOP instructions, rather than zeros. */ +#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE, LOG, MAX_SKIP) \ + do \ + { \ + if ((LOG) != 0) \ + { \ + if ((MAX_SKIP) == 0) \ + fprintf ((FILE), "\t.p2align %d\n", (int) (LOG)); \ + else \ + fprintf ((FILE), "\t.p2align %d,,%d\n", \ + (int) (LOG), (int) (MAX_SKIP)); \ + } \ + } while (0) + +#endif /* HAVE_GAS_MAX_SKIP_P2ALIGN */ + +#define JUMP_TABLES_IN_TEXT_SECTION 0 + +#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \ + do { \ + switch (GET_MODE (BODY)) \ + { \ + case QImode: \ + asm_fprintf (STREAM, "\t.byte\t(%LL%d - %LLrtx%d) / 4\n", \ + VALUE, REL); \ + break; \ + case HImode: \ + asm_fprintf (STREAM, "\t.2byte\t(%LL%d - %LLrtx%d) / 4\n", \ + VALUE, REL); \ + break; \ + case SImode: \ + case DImode: /* See comment in aarch64_output_casesi. */ \ + asm_fprintf (STREAM, "\t.word\t(%LL%d - %LLrtx%d) / 4\n", \ + VALUE, REL); \ + break; \ + default: \ + gcc_unreachable (); \ + } \ + } while (0) + +#define ASM_OUTPUT_ALIGN(STREAM, POWER) \ + fprintf(STREAM, "\t.align\t%d\n", (int)POWER) + +#define ASM_COMMENT_START "//" + +#define LOCAL_LABEL_PREFIX "." +#define USER_LABEL_PREFIX "" + +#define GLOBAL_ASM_OP "\t.global\t" + +#ifdef TARGET_BIG_ENDIAN_DEFAULT +#define ENDIAN_SPEC "-mbig-endian" +#else +#define ENDIAN_SPEC "-mlittle-endian" +#endif + +#if TARGET_DATA_MODEL == 1 +#define ABI_SPEC "-mabi=lp64" +#define MULTILIB_DEFAULTS { "mabi=lp64" } +#elif TARGET_DATA_MODEL == 2 +#define ABI_SPEC "-mabi=ilp32" +#define MULTILIB_DEFAULTS { "mabi=ilp32" } +#else +#error "Unknown or undefined TARGET_DATA_MODEL!" +#endif + +/* Force the default endianness and ABI flags onto the command line + in order to make the other specs easier to write. */ +#undef DRIVER_SELF_SPECS +#define DRIVER_SELF_SPECS \ + " %{!mbig-endian:%{!mlittle-endian:" ENDIAN_SPEC "}}" \ + " %{!mabi=*:" ABI_SPEC "}" + +#ifdef HAVE_AS_MABI_OPTION +#define ASM_MABI_SPEC "%{mabi=*:-mabi=%*}" +#else +#define ASM_MABI_SPEC "%{mabi=lp64:}" +#endif + +#ifndef ASM_SPEC +#define ASM_SPEC "\ +%{mbig-endian:-EB} \ +%{mlittle-endian:-EL} \ +%{march=*:-march=%*} \ +%(asm_cpu_spec)" \ +ASM_MABI_SPEC +#endif + +#undef TYPE_OPERAND_FMT +#define TYPE_OPERAND_FMT "%%%s" + +#undef TARGET_ASM_NAMED_SECTION +#define TARGET_ASM_NAMED_SECTION aarch64_elf_asm_named_section + +/* Stabs debug not required. */ +#undef DBX_DEBUGGING_INFO + +#endif /* GCC_AARCH64_ELF_H */ diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-linux.h b/gcc-4.9/gcc/config/aarch64/aarch64-linux.h new file mode 100644 index 000000000..a8f077156 --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64-linux.h @@ -0,0 +1,47 @@ +/* Machine description for AArch64 architecture. + Copyright (C) 2009-2014 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + +#ifndef GCC_AARCH64_LINUX_H +#define GCC_AARCH64_LINUX_H + +#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-aarch64%{mbig-endian:_be}.so.1" + +#define CPP_SPEC "%{pthread:-D_REENTRANT}" + +#define LINUX_TARGET_LINK_SPEC "%{h*} \ + %{static:-Bstatic} \ + %{shared:-shared} \ + %{symbolic:-Bsymbolic} \ + %{rdynamic:-export-dynamic} \ + -dynamic-linker " GNU_USER_DYNAMIC_LINKER " \ + -X \ + %{mbig-endian:-EB} %{mlittle-endian:-EL} \ + -maarch64linux%{mbig-endian:b}" + +#define LINK_SPEC LINUX_TARGET_LINK_SPEC + +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + GNU_USER_TARGET_OS_CPP_BUILTINS(); \ + } \ + while (0) + +#endif /* GCC_AARCH64_LINUX_H */ diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-modes.def b/gcc-4.9/gcc/config/aarch64/aarch64-modes.def new file mode 100644 index 000000000..1d2cc7679 --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64-modes.def @@ -0,0 +1,55 @@ +/* Machine description for AArch64 architecture. + Copyright (C) 2009-2014 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + +CC_MODE (CCFP); +CC_MODE (CCFPE); +CC_MODE (CC_SWP); +CC_MODE (CC_ZESWP); /* zero-extend LHS (but swap to make it RHS). */ +CC_MODE (CC_SESWP); /* sign-extend LHS (but swap to make it RHS). */ +CC_MODE (CC_NZ); /* Only N and Z bits of condition flags are valid. */ +CC_MODE (CC_Z); /* Only Z bit of condition flags is valid. */ + +/* Vector modes. */ +VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI. */ +VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI. */ +VECTOR_MODES (FLOAT, 8); /* V2SF. */ +VECTOR_MODES (FLOAT, 16); /* V4SF V2DF. */ + +/* Oct Int: 256-bit integer mode needed for 32-byte vector arguments. */ +INT_MODE (OI, 32); + +/* Opaque integer modes for 3, 6 or 8 Neon double registers (2 is + TImode). */ +INT_MODE (EI, 24); +INT_MODE (CI, 48); +INT_MODE (XI, 64); + +/* Vector modes for register lists. */ +VECTOR_MODES (INT, 32); /* V32QI V16HI V8SI V4DI. */ +VECTOR_MODES (FLOAT, 32); /* V8SF V4DF. */ + +VECTOR_MODES (INT, 48); /* V32QI V16HI V8SI V4DI. */ +VECTOR_MODES (FLOAT, 48); /* V8SF V4DF. */ + +VECTOR_MODES (INT, 64); /* V32QI V16HI V8SI V4DI. */ +VECTOR_MODES (FLOAT, 64); /* V8SF V4DF. */ + +/* Quad float: 128-bit floating mode for long doubles. */ +FLOAT_MODE (TF, 16, ieee_quad_format); diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-option-extensions.def b/gcc-4.9/gcc/config/aarch64/aarch64-option-extensions.def new file mode 100644 index 000000000..1aa65d32a --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64-option-extensions.def @@ -0,0 +1,38 @@ +/* Copyright (C) 2012-2014 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + +/* This is a list of ISA extentsions in AArch64. + + Before using #include to read this file, define a macro: + + AARCH64_OPT_EXTENSION(EXT_NAME, FLAGS_ON, FLAGS_OFF) + + EXT_NAME is the name of the extension, represented as a string constant. + FLAGS_ON are the bitwise-or of the features that the extension adds. + FLAGS_OFF are the bitwise-or of the features that the extension removes. */ + +/* V8 Architecture Extensions. + This list currently contains example extensions for CPUs that implement + AArch64, and therefore serves as a template for adding more CPUs in the + future. */ + +AARCH64_OPT_EXTENSION("fp", AARCH64_FL_FP, AARCH64_FL_FPSIMD | AARCH64_FL_CRYPTO) +AARCH64_OPT_EXTENSION("simd", AARCH64_FL_FPSIMD, AARCH64_FL_SIMD | AARCH64_FL_CRYPTO) +AARCH64_OPT_EXTENSION("crypto", AARCH64_FL_CRYPTO | AARCH64_FL_FPSIMD, AARCH64_FL_CRYPTO) +AARCH64_OPT_EXTENSION("crc", AARCH64_FL_CRC, AARCH64_FL_CRC) diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-opts.h b/gcc-4.9/gcc/config/aarch64/aarch64-opts.h new file mode 100644 index 000000000..370931536 --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64-opts.h @@ -0,0 +1,64 @@ +/* Copyright (C) 2011-2014 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + +/* Definitions for option handling for AArch64. */ + +#ifndef GCC_AARCH64_OPTS_H +#define GCC_AARCH64_OPTS_H + +/* The various cores that implement AArch64. */ +enum aarch64_processor +{ +#define AARCH64_CORE(NAME, INTERNAL_IDENT, IDENT, ARCH, FLAGS, COSTS) \ + INTERNAL_IDENT, +#include "aarch64-cores.def" +#undef AARCH64_CORE + /* Used to indicate that no processor has been specified. */ + generic, + /* Used to mark the end of the processor table. */ + aarch64_none +}; + +/* TLS types. */ +enum aarch64_tls_type { + TLS_TRADITIONAL, + TLS_DESCRIPTORS +}; + +/* The code model defines the address generation strategy. + Most have a PIC and non-PIC variant. */ +enum aarch64_code_model { + /* Static code and data fit within a 1MB region. + Not fully implemented, mostly treated as SMALL. */ + AARCH64_CMODEL_TINY, + /* Static code, data and GOT/PLT fit within a 1MB region. + Not fully implemented, mostly treated as SMALL_PIC. */ + AARCH64_CMODEL_TINY_PIC, + /* Static code and data fit within a 4GB region. + The default non-PIC code model. */ + AARCH64_CMODEL_SMALL, + /* Static code, data and GOT/PLT fit within a 4GB region. + The default PIC code model. */ + AARCH64_CMODEL_SMALL_PIC, + /* No assumptions about addresses of code and data. + The PIC variant is not yet implemented. */ + AARCH64_CMODEL_LARGE +}; + +#endif diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-protos.h b/gcc-4.9/gcc/config/aarch64/aarch64-protos.h new file mode 100644 index 000000000..5542f023b --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64-protos.h @@ -0,0 +1,292 @@ +/* Machine description for AArch64 architecture. + Copyright (C) 2009-2014 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + + +#ifndef GCC_AARCH64_PROTOS_H +#define GCC_AARCH64_PROTOS_H + +/* + SYMBOL_CONTEXT_ADR + The symbol is used in a load-address operation. + SYMBOL_CONTEXT_MEM + The symbol is used as the address in a MEM. + */ +enum aarch64_symbol_context +{ + SYMBOL_CONTEXT_MEM, + SYMBOL_CONTEXT_ADR +}; + +/* SYMBOL_SMALL_ABSOLUTE: Generate symbol accesses through + high and lo relocs that calculate the base address using a PC + relative reloc. + So to get the address of foo, we generate + adrp x0, foo + add x0, x0, :lo12:foo + + To load or store something to foo, we could use the corresponding + load store variants that generate an + ldr x0, [x0,:lo12:foo] + or + str x1, [x0, :lo12:foo] + + This corresponds to the small code model of the compiler. + + SYMBOL_SMALL_GOT: Similar to the one above but this + gives us the GOT entry of the symbol being referred to : + Thus calculating the GOT entry for foo is done using the + following sequence of instructions. The ADRP instruction + gets us to the page containing the GOT entry of the symbol + and the got_lo12 gets us the actual offset in it. + + adrp x0, :got:foo + ldr x0, [x0, :gotoff_lo12:foo] + + This corresponds to the small PIC model of the compiler. + + SYMBOL_SMALL_TLSGD + SYMBOL_SMALL_TLSDESC + SYMBOL_SMALL_GOTTPREL + SYMBOL_SMALL_TPREL + Each of of these represents a thread-local symbol, and corresponds to the + thread local storage relocation operator for the symbol being referred to. + + SYMBOL_TINY_ABSOLUTE + + Generate symbol accesses as a PC relative address using a single + instruction. To compute the address of symbol foo, we generate: + + ADR x0, foo + + SYMBOL_TINY_GOT + + Generate symbol accesses via the GOT using a single PC relative + instruction. To compute the address of symbol foo, we generate: + + ldr t0, :got:foo + + The value of foo can subsequently read using: + + ldrb t0, [t0] + + SYMBOL_FORCE_TO_MEM : Global variables are addressed using + constant pool. All variable addresses are spilled into constant + pools. The constant pools themselves are addressed using PC + relative accesses. This only works for the large code model. + */ +enum aarch64_symbol_type +{ + SYMBOL_SMALL_ABSOLUTE, + SYMBOL_SMALL_GOT, + SYMBOL_SMALL_TLSGD, + SYMBOL_SMALL_TLSDESC, + SYMBOL_SMALL_GOTTPREL, + SYMBOL_SMALL_TPREL, + SYMBOL_TINY_ABSOLUTE, + SYMBOL_TINY_GOT, + SYMBOL_FORCE_TO_MEM +}; + +/* A set of tuning parameters contains references to size and time + cost models and vectors for address cost calculations, register + move costs and memory move costs. */ + +/* Additional cost for addresses. */ +struct cpu_addrcost_table +{ + const int pre_modify; + const int post_modify; + const int register_offset; + const int register_extend; + const int imm_offset; +}; + +/* Additional costs for register copies. Cost is for one register. */ +struct cpu_regmove_cost +{ + const int GP2GP; + const int GP2FP; + const int FP2GP; + const int FP2FP; +}; + +/* Cost for vector insn classes. */ +struct cpu_vector_cost +{ + const int scalar_stmt_cost; /* Cost of any scalar operation, + excluding load and store. */ + const int scalar_load_cost; /* Cost of scalar load. */ + const int scalar_store_cost; /* Cost of scalar store. */ + const int vec_stmt_cost; /* Cost of any vector operation, + excluding load, store, + vector-to-scalar and + scalar-to-vector operation. */ + const int vec_to_scalar_cost; /* Cost of vec-to-scalar operation. */ + const int scalar_to_vec_cost; /* Cost of scalar-to-vector + operation. */ + const int vec_align_load_cost; /* Cost of aligned vector load. */ + const int vec_unalign_load_cost; /* Cost of unaligned vector load. */ + const int vec_unalign_store_cost; /* Cost of unaligned vector store. */ + const int vec_store_cost; /* Cost of vector store. */ + const int cond_taken_branch_cost; /* Cost of taken branch. */ + const int cond_not_taken_branch_cost; /* Cost of not taken branch. */ +}; + +struct tune_params +{ + const struct cpu_cost_table *const insn_extra_cost; + const struct cpu_addrcost_table *const addr_cost; + const struct cpu_regmove_cost *const regmove_cost; + const struct cpu_vector_cost *const vec_costs; + const int memmov_cost; + const int issue_rate; +}; + +HOST_WIDE_INT aarch64_initial_elimination_offset (unsigned, unsigned); +bool aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode); +bool aarch64_cannot_change_mode_class (enum machine_mode, + enum machine_mode, + enum reg_class); +enum aarch64_symbol_type +aarch64_classify_symbolic_expression (rtx, enum aarch64_symbol_context); +bool aarch64_constant_address_p (rtx); +bool aarch64_float_const_zero_rtx_p (rtx); +bool aarch64_function_arg_regno_p (unsigned); +bool aarch64_gen_movmemqi (rtx *); +bool aarch64_gimple_fold_builtin (gimple_stmt_iterator *); +bool aarch64_is_extend_from_extract (enum machine_mode, rtx, rtx); +bool aarch64_is_long_call_p (rtx); +bool aarch64_label_mentioned_p (rtx); +bool aarch64_legitimate_pic_operand_p (rtx); +bool aarch64_move_imm (HOST_WIDE_INT, enum machine_mode); +bool aarch64_mov_operand_p (rtx, enum aarch64_symbol_context, + enum machine_mode); +char *aarch64_output_scalar_simd_mov_immediate (rtx, enum machine_mode); +char *aarch64_output_simd_mov_immediate (rtx, enum machine_mode, unsigned); +bool aarch64_pad_arg_upward (enum machine_mode, const_tree); +bool aarch64_pad_reg_upward (enum machine_mode, const_tree, bool); +bool aarch64_regno_ok_for_base_p (int, bool); +bool aarch64_regno_ok_for_index_p (int, bool); +bool aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode); +bool aarch64_simd_imm_zero_p (rtx, enum machine_mode); +bool aarch64_simd_scalar_immediate_valid_for_move (rtx, enum machine_mode); +bool aarch64_simd_shift_imm_p (rtx, enum machine_mode, bool); +bool aarch64_simd_valid_immediate (rtx, enum machine_mode, bool, + struct simd_immediate_info *); +bool aarch64_symbolic_address_p (rtx); +bool aarch64_uimm12_shift (HOST_WIDE_INT); +const char *aarch64_output_casesi (rtx *); +const char *aarch64_rewrite_selected_cpu (const char *name); + +enum aarch64_symbol_type aarch64_classify_symbol (rtx, + enum aarch64_symbol_context); +enum aarch64_symbol_type aarch64_classify_tls_symbol (rtx); +enum reg_class aarch64_regno_regclass (unsigned); +int aarch64_asm_preferred_eh_data_format (int, int); +int aarch64_hard_regno_mode_ok (unsigned, enum machine_mode); +int aarch64_hard_regno_nregs (unsigned, enum machine_mode); +int aarch64_simd_attr_length_move (rtx); +int aarch64_uxt_size (int, HOST_WIDE_INT); +rtx aarch64_final_eh_return_addr (void); +rtx aarch64_legitimize_reload_address (rtx *, enum machine_mode, int, int, int); +const char *aarch64_output_move_struct (rtx *operands); +rtx aarch64_return_addr (int, rtx); +rtx aarch64_simd_gen_const_vector_dup (enum machine_mode, int); +bool aarch64_simd_mem_operand_p (rtx); +rtx aarch64_simd_vect_par_cnst_half (enum machine_mode, bool); +rtx aarch64_tls_get_addr (void); +tree aarch64_fold_builtin (tree, int, tree *, bool); +unsigned aarch64_dbx_register_number (unsigned); +unsigned aarch64_trampoline_size (void); +void aarch64_asm_output_labelref (FILE *, const char *); +void aarch64_elf_asm_named_section (const char *, unsigned, tree); +void aarch64_expand_epilogue (bool); +void aarch64_expand_mov_immediate (rtx, rtx); +void aarch64_expand_prologue (void); +void aarch64_expand_vector_init (rtx, rtx); +void aarch64_function_profiler (FILE *, int); +void aarch64_init_cumulative_args (CUMULATIVE_ARGS *, const_tree, rtx, + const_tree, unsigned); +void aarch64_init_expanders (void); +void aarch64_print_operand (FILE *, rtx, char); +void aarch64_print_operand_address (FILE *, rtx); + +/* Initialize builtins for SIMD intrinsics. */ +void init_aarch64_simd_builtins (void); + +void aarch64_simd_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT); +void aarch64_simd_disambiguate_copy (rtx *, rtx *, rtx *, unsigned int); + +/* Emit code to place a AdvSIMD pair result in memory locations (with equal + registers). */ +void aarch64_simd_emit_pair_result_insn (enum machine_mode, + rtx (*intfn) (rtx, rtx, rtx), rtx, + rtx); + +/* Expand builtins for SIMD intrinsics. */ +rtx aarch64_simd_expand_builtin (int, tree, rtx); + +void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT); + +/* Emit code for reinterprets. */ +void aarch64_simd_reinterpret (rtx, rtx); + +void aarch64_split_128bit_move (rtx, rtx); + +bool aarch64_split_128bit_move_p (rtx, rtx); + +void aarch64_split_simd_combine (rtx, rtx, rtx); + +void aarch64_split_simd_move (rtx, rtx); + +/* Check for a legitimate floating point constant for FMOV. */ +bool aarch64_float_const_representable_p (rtx); + +#if defined (RTX_CODE) + +bool aarch64_legitimate_address_p (enum machine_mode, rtx, RTX_CODE, bool); +enum machine_mode aarch64_select_cc_mode (RTX_CODE, rtx, rtx); +rtx aarch64_gen_compare_reg (RTX_CODE, rtx, rtx); +rtx aarch64_load_tp (rtx); + +void aarch64_expand_compare_and_swap (rtx op[]); +void aarch64_split_compare_and_swap (rtx op[]); +void aarch64_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx); + +#endif /* RTX_CODE */ + +void aarch64_init_builtins (void); +rtx aarch64_expand_builtin (tree exp, + rtx target, + rtx subtarget ATTRIBUTE_UNUSED, + enum machine_mode mode ATTRIBUTE_UNUSED, + int ignore ATTRIBUTE_UNUSED); +tree aarch64_builtin_decl (unsigned, bool ATTRIBUTE_UNUSED); + +tree +aarch64_builtin_vectorized_function (tree fndecl, + tree type_out, + tree type_in); + +extern void aarch64_split_combinev16qi (rtx operands[3]); +extern void aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel); +extern bool +aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel); +#endif /* GCC_AARCH64_PROTOS_H */ diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc-4.9/gcc/config/aarch64/aarch64-simd-builtins.def new file mode 100644 index 000000000..c9b7570e5 --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64-simd-builtins.def @@ -0,0 +1,395 @@ +/* Machine description for AArch64 architecture. + Copyright (C) 2012-2014 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + +/* In the list below, the BUILTIN_<ITERATOR> macros expand to create + builtins for each of the modes described by <ITERATOR>. When adding + new builtins to this list, a helpful idiom to follow is to add + a line for each pattern in the md file. Thus, ADDP, which has one + pattern defined for the VD_BHSI iterator, and one for DImode, has two + entries below. + + Parameter 1 is the 'type' of the intrinsic. This is used to + describe the type modifiers (for example; unsigned) applied to + each of the parameters to the intrinsic function. + + Parameter 2 is the name of the intrinsic. This is appended + to `__builtin_aarch64_<name><mode>` to give the intrinsic name + as exported to the front-ends. + + Parameter 3 describes how to map from the name to the CODE_FOR_ + macro holding the RTL pattern for the intrinsic. This mapping is: + 0 - CODE_FOR_aarch64_<name><mode> + 1-9 - CODE_FOR_<name><mode><1-9> + 10 - CODE_FOR_<name><mode>. */ + + BUILTIN_VD_RE (CREATE, create, 0) + BUILTIN_VDC (COMBINE, combine, 0) + BUILTIN_VB (BINOP, pmul, 0) + BUILTIN_VDQF (UNOP, sqrt, 2) + BUILTIN_VD_BHSI (BINOP, addp, 0) + VAR1 (UNOP, addp, 0, di) + BUILTIN_VDQ_BHSI (UNOP, clz, 2) + + BUILTIN_VALL (GETLANE, get_lane, 0) + VAR1 (GETLANE, get_lane, 0, di) + BUILTIN_VALL (GETLANE, be_checked_get_lane, 0) + + BUILTIN_VD_RE (REINTERP, reinterpretdi, 0) + BUILTIN_VDC (REINTERP, reinterpretv8qi, 0) + BUILTIN_VDC (REINTERP, reinterpretv4hi, 0) + BUILTIN_VDC (REINTERP, reinterpretv2si, 0) + BUILTIN_VDC (REINTERP, reinterpretv2sf, 0) + BUILTIN_VQ (REINTERP, reinterpretv16qi, 0) + BUILTIN_VQ (REINTERP, reinterpretv8hi, 0) + BUILTIN_VQ (REINTERP, reinterpretv4si, 0) + BUILTIN_VQ (REINTERP, reinterpretv4sf, 0) + BUILTIN_VQ (REINTERP, reinterpretv2di, 0) + BUILTIN_VQ (REINTERP, reinterpretv2df, 0) + + BUILTIN_VDQ_I (BINOP, dup_lane, 0) + /* Implemented by aarch64_<sur>q<r>shl<mode>. */ + BUILTIN_VSDQ_I (BINOP, sqshl, 0) + BUILTIN_VSDQ_I (BINOP, uqshl, 0) + BUILTIN_VSDQ_I (BINOP, sqrshl, 0) + BUILTIN_VSDQ_I (BINOP, uqrshl, 0) + /* Implemented by aarch64_<su_optab><optab><mode>. */ + BUILTIN_VSDQ_I (BINOP, sqadd, 0) + BUILTIN_VSDQ_I (BINOP, uqadd, 0) + BUILTIN_VSDQ_I (BINOP, sqsub, 0) + BUILTIN_VSDQ_I (BINOP, uqsub, 0) + /* Implemented by aarch64_<sur>qadd<mode>. */ + BUILTIN_VSDQ_I (BINOP, suqadd, 0) + BUILTIN_VSDQ_I (BINOP, usqadd, 0) + + /* Implemented by aarch64_get_dreg<VSTRUCT:mode><VDC:mode>. */ + BUILTIN_VDC (GETLANE, get_dregoi, 0) + BUILTIN_VDC (GETLANE, get_dregci, 0) + BUILTIN_VDC (GETLANE, get_dregxi, 0) + /* Implemented by aarch64_get_qreg<VSTRUCT:mode><VQ:mode>. */ + BUILTIN_VQ (GETLANE, get_qregoi, 0) + BUILTIN_VQ (GETLANE, get_qregci, 0) + BUILTIN_VQ (GETLANE, get_qregxi, 0) + /* Implemented by aarch64_set_qreg<VSTRUCT:mode><VQ:mode>. */ + BUILTIN_VQ (SETLANE, set_qregoi, 0) + BUILTIN_VQ (SETLANE, set_qregci, 0) + BUILTIN_VQ (SETLANE, set_qregxi, 0) + /* Implemented by aarch64_ld<VSTRUCT:nregs><VDC:mode>. */ + BUILTIN_VDC (LOADSTRUCT, ld2, 0) + BUILTIN_VDC (LOADSTRUCT, ld3, 0) + BUILTIN_VDC (LOADSTRUCT, ld4, 0) + /* Implemented by aarch64_ld<VSTRUCT:nregs><VQ:mode>. */ + BUILTIN_VQ (LOADSTRUCT, ld2, 0) + BUILTIN_VQ (LOADSTRUCT, ld3, 0) + BUILTIN_VQ (LOADSTRUCT, ld4, 0) + /* Implemented by aarch64_st<VSTRUCT:nregs><VDC:mode>. */ + BUILTIN_VDC (STORESTRUCT, st2, 0) + BUILTIN_VDC (STORESTRUCT, st3, 0) + BUILTIN_VDC (STORESTRUCT, st4, 0) + /* Implemented by aarch64_st<VSTRUCT:nregs><VQ:mode>. */ + BUILTIN_VQ (STORESTRUCT, st2, 0) + BUILTIN_VQ (STORESTRUCT, st3, 0) + BUILTIN_VQ (STORESTRUCT, st4, 0) + + BUILTIN_VQW (BINOP, saddl2, 0) + BUILTIN_VQW (BINOP, uaddl2, 0) + BUILTIN_VQW (BINOP, ssubl2, 0) + BUILTIN_VQW (BINOP, usubl2, 0) + BUILTIN_VQW (BINOP, saddw2, 0) + BUILTIN_VQW (BINOP, uaddw2, 0) + BUILTIN_VQW (BINOP, ssubw2, 0) + BUILTIN_VQW (BINOP, usubw2, 0) + /* Implemented by aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>. */ + BUILTIN_VDW (BINOP, saddl, 0) + BUILTIN_VDW (BINOP, uaddl, 0) + BUILTIN_VDW (BINOP, ssubl, 0) + BUILTIN_VDW (BINOP, usubl, 0) + /* Implemented by aarch64_<ANY_EXTEND:su><ADDSUB:optab>w<mode>. */ + BUILTIN_VDW (BINOP, saddw, 0) + BUILTIN_VDW (BINOP, uaddw, 0) + BUILTIN_VDW (BINOP, ssubw, 0) + BUILTIN_VDW (BINOP, usubw, 0) + /* Implemented by aarch64_<sur>h<addsub><mode>. */ + BUILTIN_VQ_S (BINOP, shadd, 0) + BUILTIN_VQ_S (BINOP, uhadd, 0) + BUILTIN_VQ_S (BINOP, srhadd, 0) + BUILTIN_VQ_S (BINOP, urhadd, 0) + /* Implemented by aarch64_<sur><addsub>hn<mode>. */ + BUILTIN_VQN (BINOP, addhn, 0) + BUILTIN_VQN (BINOP, raddhn, 0) + /* Implemented by aarch64_<sur><addsub>hn2<mode>. */ + BUILTIN_VQN (TERNOP, addhn2, 0) + BUILTIN_VQN (TERNOP, raddhn2, 0) + + BUILTIN_VSQN_HSDI (UNOP, sqmovun, 0) + /* Implemented by aarch64_<sur>qmovn<mode>. */ + BUILTIN_VSQN_HSDI (UNOP, sqmovn, 0) + BUILTIN_VSQN_HSDI (UNOP, uqmovn, 0) + /* Implemented by aarch64_s<optab><mode>. */ + BUILTIN_VSDQ_I_BHSI (UNOP, sqabs, 0) + BUILTIN_VSDQ_I_BHSI (UNOP, sqneg, 0) + + BUILTIN_VSD_HSI (QUADOP, sqdmlal_lane, 0) + BUILTIN_VSD_HSI (QUADOP, sqdmlsl_lane, 0) + BUILTIN_VSD_HSI (QUADOP, sqdmlal_laneq, 0) + BUILTIN_VSD_HSI (QUADOP, sqdmlsl_laneq, 0) + BUILTIN_VQ_HSI (TERNOP, sqdmlal2, 0) + BUILTIN_VQ_HSI (TERNOP, sqdmlsl2, 0) + BUILTIN_VQ_HSI (QUADOP, sqdmlal2_lane, 0) + BUILTIN_VQ_HSI (QUADOP, sqdmlsl2_lane, 0) + BUILTIN_VQ_HSI (QUADOP, sqdmlal2_laneq, 0) + BUILTIN_VQ_HSI (QUADOP, sqdmlsl2_laneq, 0) + BUILTIN_VQ_HSI (TERNOP, sqdmlal2_n, 0) + BUILTIN_VQ_HSI (TERNOP, sqdmlsl2_n, 0) + /* Implemented by aarch64_sqdml<SBINQOPS:as>l<mode>. */ + BUILTIN_VSD_HSI (TERNOP, sqdmlal, 0) + BUILTIN_VSD_HSI (TERNOP, sqdmlsl, 0) + /* Implemented by aarch64_sqdml<SBINQOPS:as>l_n<mode>. */ + BUILTIN_VD_HSI (TERNOP, sqdmlal_n, 0) + BUILTIN_VD_HSI (TERNOP, sqdmlsl_n, 0) + + BUILTIN_VSD_HSI (BINOP, sqdmull, 0) + BUILTIN_VSD_HSI (TERNOP, sqdmull_lane, 0) + BUILTIN_VD_HSI (TERNOP, sqdmull_laneq, 0) + BUILTIN_VD_HSI (BINOP, sqdmull_n, 0) + BUILTIN_VQ_HSI (BINOP, sqdmull2, 0) + BUILTIN_VQ_HSI (TERNOP, sqdmull2_lane, 0) + BUILTIN_VQ_HSI (TERNOP, sqdmull2_laneq, 0) + BUILTIN_VQ_HSI (BINOP, sqdmull2_n, 0) + /* Implemented by aarch64_sq<r>dmulh<mode>. */ + BUILTIN_VSDQ_HSI (BINOP, sqdmulh, 0) + BUILTIN_VSDQ_HSI (BINOP, sqrdmulh, 0) + /* Implemented by aarch64_sq<r>dmulh_lane<q><mode>. */ + BUILTIN_VDQHS (TERNOP, sqdmulh_lane, 0) + BUILTIN_VDQHS (TERNOP, sqdmulh_laneq, 0) + BUILTIN_VDQHS (TERNOP, sqrdmulh_lane, 0) + BUILTIN_VDQHS (TERNOP, sqrdmulh_laneq, 0) + BUILTIN_SD_HSI (TERNOP, sqdmulh_lane, 0) + BUILTIN_SD_HSI (TERNOP, sqrdmulh_lane, 0) + + BUILTIN_VSDQ_I_DI (BINOP, ashl, 3) + /* Implemented by aarch64_<sur>shl<mode>. */ + BUILTIN_VSDQ_I_DI (BINOP, sshl, 0) + BUILTIN_VSDQ_I_DI (BINOP, ushl, 0) + BUILTIN_VSDQ_I_DI (BINOP, srshl, 0) + BUILTIN_VSDQ_I_DI (BINOP, urshl, 0) + + BUILTIN_VDQ_I (SHIFTIMM, ashr, 3) + VAR1 (SHIFTIMM, ashr_simd, 0, di) + BUILTIN_VDQ_I (SHIFTIMM, lshr, 3) + VAR1 (USHIFTIMM, lshr_simd, 0, di) + /* Implemented by aarch64_<sur>shr_n<mode>. */ + BUILTIN_VSDQ_I_DI (SHIFTIMM, srshr_n, 0) + BUILTIN_VSDQ_I_DI (SHIFTIMM, urshr_n, 0) + /* Implemented by aarch64_<sur>sra_n<mode>. */ + BUILTIN_VSDQ_I_DI (SHIFTACC, ssra_n, 0) + BUILTIN_VSDQ_I_DI (SHIFTACC, usra_n, 0) + BUILTIN_VSDQ_I_DI (SHIFTACC, srsra_n, 0) + BUILTIN_VSDQ_I_DI (SHIFTACC, ursra_n, 0) + /* Implemented by aarch64_<sur>shll_n<mode>. */ + BUILTIN_VDW (SHIFTIMM, sshll_n, 0) + BUILTIN_VDW (SHIFTIMM, ushll_n, 0) + /* Implemented by aarch64_<sur>shll2_n<mode>. */ + BUILTIN_VQW (SHIFTIMM, sshll2_n, 0) + BUILTIN_VQW (SHIFTIMM, ushll2_n, 0) + /* Implemented by aarch64_<sur>q<r>shr<u>n_n<mode>. */ + BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrun_n, 0) + BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrun_n, 0) + BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrn_n, 0) + BUILTIN_VSQN_HSDI (SHIFTIMM, uqshrn_n, 0) + BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrn_n, 0) + BUILTIN_VSQN_HSDI (SHIFTIMM, uqrshrn_n, 0) + /* Implemented by aarch64_<sur>s<lr>i_n<mode>. */ + BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssri_n, 0) + BUILTIN_VSDQ_I_DI (SHIFTINSERT, usri_n, 0) + BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssli_n, 0) + BUILTIN_VSDQ_I_DI (SHIFTINSERT, usli_n, 0) + /* Implemented by aarch64_<sur>qshl<u>_n<mode>. */ + BUILTIN_VSDQ_I (SHIFTIMM, sqshlu_n, 0) + BUILTIN_VSDQ_I (SHIFTIMM, sqshl_n, 0) + BUILTIN_VSDQ_I (SHIFTIMM, uqshl_n, 0) + + /* Implemented by aarch64_cm<cmp><mode>. */ + BUILTIN_VALLDI (BINOP, cmeq, 0) + BUILTIN_VALLDI (BINOP, cmge, 0) + BUILTIN_VALLDI (BINOP, cmgt, 0) + BUILTIN_VALLDI (BINOP, cmle, 0) + BUILTIN_VALLDI (BINOP, cmlt, 0) + /* Implemented by aarch64_cm<cmp><mode>. */ + BUILTIN_VSDQ_I_DI (BINOP, cmgeu, 0) + BUILTIN_VSDQ_I_DI (BINOP, cmgtu, 0) + BUILTIN_VSDQ_I_DI (BINOP, cmtst, 0) + + /* Implemented by reduc_<sur>plus_<mode>. */ + BUILTIN_VALL (UNOP, reduc_splus_, 10) + BUILTIN_VDQ (UNOP, reduc_uplus_, 10) + + /* Implemented by reduc_<maxmin_uns>_<mode>. */ + BUILTIN_VDQIF (UNOP, reduc_smax_, 10) + BUILTIN_VDQIF (UNOP, reduc_smin_, 10) + BUILTIN_VDQ_BHSI (UNOP, reduc_umax_, 10) + BUILTIN_VDQ_BHSI (UNOP, reduc_umin_, 10) + BUILTIN_VDQF (UNOP, reduc_smax_nan_, 10) + BUILTIN_VDQF (UNOP, reduc_smin_nan_, 10) + + /* Implemented by <maxmin><mode>3. + smax variants map to fmaxnm, + smax_nan variants map to fmax. */ + BUILTIN_VDQIF (BINOP, smax, 3) + BUILTIN_VDQIF (BINOP, smin, 3) + BUILTIN_VDQ_BHSI (BINOP, umax, 3) + BUILTIN_VDQ_BHSI (BINOP, umin, 3) + BUILTIN_VDQF (BINOP, smax_nan, 3) + BUILTIN_VDQF (BINOP, smin_nan, 3) + + /* Implemented by <frint_pattern><mode>2. */ + BUILTIN_VDQF (UNOP, btrunc, 2) + BUILTIN_VDQF (UNOP, ceil, 2) + BUILTIN_VDQF (UNOP, floor, 2) + BUILTIN_VDQF (UNOP, nearbyint, 2) + BUILTIN_VDQF (UNOP, rint, 2) + BUILTIN_VDQF (UNOP, round, 2) + BUILTIN_VDQF (UNOP, frintn, 2) + + /* Implemented by l<fcvt_pattern><su_optab><VQDF:mode><vcvt_target>2. */ + VAR1 (UNOP, lbtruncv2sf, 2, v2si) + VAR1 (UNOP, lbtruncv4sf, 2, v4si) + VAR1 (UNOP, lbtruncv2df, 2, v2di) + + VAR1 (UNOP, lbtruncuv2sf, 2, v2si) + VAR1 (UNOP, lbtruncuv4sf, 2, v4si) + VAR1 (UNOP, lbtruncuv2df, 2, v2di) + + VAR1 (UNOP, lroundv2sf, 2, v2si) + VAR1 (UNOP, lroundv4sf, 2, v4si) + VAR1 (UNOP, lroundv2df, 2, v2di) + /* Implemented by l<fcvt_pattern><su_optab><GPF:mode><GPI:mode>2. */ + VAR1 (UNOP, lroundsf, 2, si) + VAR1 (UNOP, lrounddf, 2, di) + + VAR1 (UNOP, lrounduv2sf, 2, v2si) + VAR1 (UNOP, lrounduv4sf, 2, v4si) + VAR1 (UNOP, lrounduv2df, 2, v2di) + VAR1 (UNOP, lroundusf, 2, si) + VAR1 (UNOP, lroundudf, 2, di) + + VAR1 (UNOP, lceilv2sf, 2, v2si) + VAR1 (UNOP, lceilv4sf, 2, v4si) + VAR1 (UNOP, lceilv2df, 2, v2di) + + VAR1 (UNOP, lceiluv2sf, 2, v2si) + VAR1 (UNOP, lceiluv4sf, 2, v4si) + VAR1 (UNOP, lceiluv2df, 2, v2di) + VAR1 (UNOP, lceilusf, 2, si) + VAR1 (UNOP, lceiludf, 2, di) + + VAR1 (UNOP, lfloorv2sf, 2, v2si) + VAR1 (UNOP, lfloorv4sf, 2, v4si) + VAR1 (UNOP, lfloorv2df, 2, v2di) + + VAR1 (UNOP, lflooruv2sf, 2, v2si) + VAR1 (UNOP, lflooruv4sf, 2, v4si) + VAR1 (UNOP, lflooruv2df, 2, v2di) + VAR1 (UNOP, lfloorusf, 2, si) + VAR1 (UNOP, lfloorudf, 2, di) + + VAR1 (UNOP, lfrintnv2sf, 2, v2si) + VAR1 (UNOP, lfrintnv4sf, 2, v4si) + VAR1 (UNOP, lfrintnv2df, 2, v2di) + VAR1 (UNOP, lfrintnsf, 2, si) + VAR1 (UNOP, lfrintndf, 2, di) + + VAR1 (UNOP, lfrintnuv2sf, 2, v2si) + VAR1 (UNOP, lfrintnuv4sf, 2, v4si) + VAR1 (UNOP, lfrintnuv2df, 2, v2di) + VAR1 (UNOP, lfrintnusf, 2, si) + VAR1 (UNOP, lfrintnudf, 2, di) + + /* Implemented by <optab><fcvt_target><VDQF:mode>2. */ + VAR1 (UNOP, floatv2si, 2, v2sf) + VAR1 (UNOP, floatv4si, 2, v4sf) + VAR1 (UNOP, floatv2di, 2, v2df) + + VAR1 (UNOP, floatunsv2si, 2, v2sf) + VAR1 (UNOP, floatunsv4si, 2, v4sf) + VAR1 (UNOP, floatunsv2di, 2, v2df) + + /* Implemented by + aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>. */ + BUILTIN_VALL (BINOP, zip1, 0) + BUILTIN_VALL (BINOP, zip2, 0) + BUILTIN_VALL (BINOP, uzp1, 0) + BUILTIN_VALL (BINOP, uzp2, 0) + BUILTIN_VALL (BINOP, trn1, 0) + BUILTIN_VALL (BINOP, trn2, 0) + + /* Implemented by + aarch64_frecp<FRECP:frecp_suffix><mode>. */ + BUILTIN_GPF (UNOP, frecpe, 0) + BUILTIN_GPF (BINOP, frecps, 0) + BUILTIN_GPF (UNOP, frecpx, 0) + + BUILTIN_VDQF (UNOP, frecpe, 0) + BUILTIN_VDQF (BINOP, frecps, 0) + + BUILTIN_VALLDI (UNOP, abs, 2) + + VAR1 (UNOP, vec_unpacks_hi_, 10, v4sf) + VAR1 (BINOP, float_truncate_hi_, 0, v4sf) + + VAR1 (UNOP, float_extend_lo_, 0, v2df) + VAR1 (UNOP, float_truncate_lo_, 0, v2sf) + + /* Implemented by aarch64_ld1<VALL:mode>. */ + BUILTIN_VALL (LOAD1, ld1, 0) + + /* Implemented by aarch64_st1<VALL:mode>. */ + BUILTIN_VALL (STORE1, st1, 0) + + /* Implemented by fma<mode>4. */ + BUILTIN_VDQF (TERNOP, fma, 4) + + /* Implemented by aarch64_simd_bsl<mode>. */ + BUILTIN_VDQQH (BSL_P, simd_bsl, 0) + BUILTIN_VSDQ_I_DI (BSL_U, simd_bsl, 0) + BUILTIN_VALLDIF (BSL_S, simd_bsl, 0) + + /* Implemented by aarch64_crypto_aes<op><mode>. */ + VAR1 (BINOPU, crypto_aese, 0, v16qi) + VAR1 (BINOPU, crypto_aesd, 0, v16qi) + VAR1 (UNOPU, crypto_aesmc, 0, v16qi) + VAR1 (UNOPU, crypto_aesimc, 0, v16qi) + + /* Implemented by aarch64_crypto_sha1<op><mode>. */ + VAR1 (UNOPU, crypto_sha1h, 0, si) + VAR1 (BINOPU, crypto_sha1su1, 0, v4si) + VAR1 (TERNOPU, crypto_sha1c, 0, v4si) + VAR1 (TERNOPU, crypto_sha1m, 0, v4si) + VAR1 (TERNOPU, crypto_sha1p, 0, v4si) + VAR1 (TERNOPU, crypto_sha1su0, 0, v4si) + + /* Implemented by aarch64_crypto_sha256<op><mode>. */ + VAR1 (TERNOPU, crypto_sha256h, 0, v4si) + VAR1 (TERNOPU, crypto_sha256h2, 0, v4si) + VAR1 (BINOPU, crypto_sha256su0, 0, v4si) + VAR1 (TERNOPU, crypto_sha256su1, 0, v4si) + + /* Implemented by aarch64_crypto_pmull<mode>. */ + VAR1 (BINOPP, crypto_pmull, 0, di) + VAR1 (BINOPP, crypto_pmull, 0, v2di) diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-simd.md b/gcc-4.9/gcc/config/aarch64/aarch64-simd.md new file mode 100644 index 000000000..6048d605c --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64-simd.md @@ -0,0 +1,4363 @@ +;; Machine description for AArch64 AdvSIMD architecture. +;; Copyright (C) 2011-2014 Free Software Foundation, Inc. +;; Contributed by ARM Ltd. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, but +;; WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +;; General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; <http://www.gnu.org/licenses/>. + +(define_expand "mov<mode>" + [(set (match_operand:VALL 0 "aarch64_simd_nonimmediate_operand" "") + (match_operand:VALL 1 "aarch64_simd_general_operand" ""))] + "TARGET_SIMD" + " + if (GET_CODE (operands[0]) == MEM) + operands[1] = force_reg (<MODE>mode, operands[1]); + " +) + +(define_expand "movmisalign<mode>" + [(set (match_operand:VALL 0 "aarch64_simd_nonimmediate_operand" "") + (match_operand:VALL 1 "aarch64_simd_general_operand" ""))] + "TARGET_SIMD" +{ + /* This pattern is not permitted to fail during expansion: if both arguments + are non-registers (e.g. memory := constant, which can be created by the + auto-vectorizer), force operand 1 into a register. */ + if (!register_operand (operands[0], <MODE>mode) + && !register_operand (operands[1], <MODE>mode)) + operands[1] = force_reg (<MODE>mode, operands[1]); +}) + +(define_insn "aarch64_simd_dup<mode>" + [(set (match_operand:VDQ 0 "register_operand" "=w, w") + (vec_duplicate:VDQ (match_operand:<VEL> 1 "register_operand" "r, w")))] + "TARGET_SIMD" + "@ + dup\\t%0.<Vtype>, %<vw>1 + dup\\t%0.<Vtype>, %1.<Vetype>[0]" + [(set_attr "type" "neon_from_gp<q>, neon_dup<q>")] +) + +(define_insn "aarch64_simd_dup<mode>" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (vec_duplicate:VDQF (match_operand:<VEL> 1 "register_operand" "w")))] + "TARGET_SIMD" + "dup\\t%0.<Vtype>, %1.<Vetype>[0]" + [(set_attr "type" "neon_dup<q>")] +) + +(define_insn "aarch64_dup_lane<mode>" + [(set (match_operand:VALL 0 "register_operand" "=w") + (vec_duplicate:VALL + (vec_select:<VEL> + (match_operand:VALL 1 "register_operand" "w") + (parallel [(match_operand:SI 2 "immediate_operand" "i")]) + )))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2]))); + return "dup\\t%0.<Vtype>, %1.<Vetype>[%2]"; + } + [(set_attr "type" "neon_dup<q>")] +) + +(define_insn "aarch64_dup_lane_<vswap_width_name><mode>" + [(set (match_operand:VALL 0 "register_operand" "=w") + (vec_duplicate:VALL + (vec_select:<VEL> + (match_operand:<VSWAP_WIDTH> 1 "register_operand" "w") + (parallel [(match_operand:SI 2 "immediate_operand" "i")]) + )))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<VSWAP_WIDTH>mode, + INTVAL (operands[2]))); + return "dup\\t%0.<Vtype>, %1.<Vetype>[%2]"; + } + [(set_attr "type" "neon_dup<q>")] +) + +(define_insn "*aarch64_simd_mov<mode>" + [(set (match_operand:VD 0 "aarch64_simd_nonimmediate_operand" + "=w, m, w, ?r, ?w, ?r, w") + (match_operand:VD 1 "aarch64_simd_general_operand" + "m, w, w, w, r, r, Dn"))] + "TARGET_SIMD + && (register_operand (operands[0], <MODE>mode) + || register_operand (operands[1], <MODE>mode))" +{ + switch (which_alternative) + { + case 0: return "ldr\\t%d0, %1"; + case 1: return "str\\t%d1, %0"; + case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>"; + case 3: return "umov\t%0, %1.d[0]"; + case 4: return "ins\t%0.d[0], %1"; + case 5: return "mov\t%0, %1"; + case 6: + return aarch64_output_simd_mov_immediate (operands[1], + <MODE>mode, 64); + default: gcc_unreachable (); + } +} + [(set_attr "type" "neon_load1_1reg<q>, neon_store1_1reg<q>,\ + neon_logic<q>, neon_to_gp<q>, neon_from_gp<q>,\ + mov_reg, neon_move<q>")] +) + +(define_insn "*aarch64_simd_mov<mode>" + [(set (match_operand:VQ 0 "aarch64_simd_nonimmediate_operand" + "=w, m, w, ?r, ?w, ?r, w") + (match_operand:VQ 1 "aarch64_simd_general_operand" + "m, w, w, w, r, r, Dn"))] + "TARGET_SIMD + && (register_operand (operands[0], <MODE>mode) + || register_operand (operands[1], <MODE>mode))" +{ + switch (which_alternative) + { + case 0: + return "ldr\\t%q0, %1"; + case 1: + return "str\\t%q1, %0"; + case 2: + return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>"; + case 3: + case 4: + case 5: + return "#"; + case 6: + return aarch64_output_simd_mov_immediate (operands[1], <MODE>mode, 128); + default: + gcc_unreachable (); + } +} + [(set_attr "type" "neon_load1_1reg<q>, neon_store1_1reg<q>,\ + neon_logic<q>, multiple, multiple, multiple,\ + neon_move<q>") + (set_attr "length" "4,4,4,8,8,8,4")] +) + +(define_split + [(set (match_operand:VQ 0 "register_operand" "") + (match_operand:VQ 1 "register_operand" ""))] + "TARGET_SIMD && reload_completed + && GP_REGNUM_P (REGNO (operands[0])) + && GP_REGNUM_P (REGNO (operands[1]))" + [(set (match_dup 0) (match_dup 1)) + (set (match_dup 2) (match_dup 3))] +{ + int rdest = REGNO (operands[0]); + int rsrc = REGNO (operands[1]); + rtx dest[2], src[2]; + + dest[0] = gen_rtx_REG (DImode, rdest); + src[0] = gen_rtx_REG (DImode, rsrc); + dest[1] = gen_rtx_REG (DImode, rdest + 1); + src[1] = gen_rtx_REG (DImode, rsrc + 1); + + aarch64_simd_disambiguate_copy (operands, dest, src, 2); +}) + +(define_split + [(set (match_operand:VQ 0 "register_operand" "") + (match_operand:VQ 1 "register_operand" ""))] + "TARGET_SIMD && reload_completed + && ((FP_REGNUM_P (REGNO (operands[0])) && GP_REGNUM_P (REGNO (operands[1]))) + || (GP_REGNUM_P (REGNO (operands[0])) && FP_REGNUM_P (REGNO (operands[1]))))" + [(const_int 0)] +{ + aarch64_split_simd_move (operands[0], operands[1]); + DONE; +}) + +(define_expand "aarch64_split_simd_mov<mode>" + [(set (match_operand:VQ 0) + (match_operand:VQ 1))] + "TARGET_SIMD" + { + rtx dst = operands[0]; + rtx src = operands[1]; + + if (GP_REGNUM_P (REGNO (src))) + { + rtx src_low_part = gen_lowpart (<VHALF>mode, src); + rtx src_high_part = gen_highpart (<VHALF>mode, src); + + emit_insn + (gen_move_lo_quad_<mode> (dst, src_low_part)); + emit_insn + (gen_move_hi_quad_<mode> (dst, src_high_part)); + } + + else + { + rtx dst_low_part = gen_lowpart (<VHALF>mode, dst); + rtx dst_high_part = gen_highpart (<VHALF>mode, dst); + rtx lo = aarch64_simd_vect_par_cnst_half (<MODE>mode, false); + rtx hi = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + + emit_insn + (gen_aarch64_simd_mov_from_<mode>low (dst_low_part, src, lo)); + emit_insn + (gen_aarch64_simd_mov_from_<mode>high (dst_high_part, src, hi)); + } + DONE; + } +) + +(define_insn "aarch64_simd_mov_from_<mode>low" + [(set (match_operand:<VHALF> 0 "register_operand" "=r") + (vec_select:<VHALF> + (match_operand:VQ 1 "register_operand" "w") + (match_operand:VQ 2 "vect_par_cnst_lo_half" "")))] + "TARGET_SIMD && reload_completed" + "umov\t%0, %1.d[0]" + [(set_attr "type" "neon_to_gp<q>") + (set_attr "length" "4") + ]) + +(define_insn "aarch64_simd_mov_from_<mode>high" + [(set (match_operand:<VHALF> 0 "register_operand" "=r") + (vec_select:<VHALF> + (match_operand:VQ 1 "register_operand" "w") + (match_operand:VQ 2 "vect_par_cnst_hi_half" "")))] + "TARGET_SIMD && reload_completed" + "umov\t%0, %1.d[1]" + [(set_attr "type" "neon_to_gp<q>") + (set_attr "length" "4") + ]) + +(define_insn "orn<mode>3" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (ior:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w")) + (match_operand:VDQ 2 "register_operand" "w")))] + "TARGET_SIMD" + "orn\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>" + [(set_attr "type" "neon_logic<q>")] +) + +(define_insn "bic<mode>3" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (and:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w")) + (match_operand:VDQ 2 "register_operand" "w")))] + "TARGET_SIMD" + "bic\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>" + [(set_attr "type" "neon_logic<q>")] +) + +(define_insn "add<mode>3" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (plus:VDQ (match_operand:VDQ 1 "register_operand" "w") + (match_operand:VDQ 2 "register_operand" "w")))] + "TARGET_SIMD" + "add\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_add<q>")] +) + +(define_insn "sub<mode>3" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (minus:VDQ (match_operand:VDQ 1 "register_operand" "w") + (match_operand:VDQ 2 "register_operand" "w")))] + "TARGET_SIMD" + "sub\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_sub<q>")] +) + +(define_insn "mul<mode>3" + [(set (match_operand:VDQM 0 "register_operand" "=w") + (mult:VDQM (match_operand:VDQM 1 "register_operand" "w") + (match_operand:VDQM 2 "register_operand" "w")))] + "TARGET_SIMD" + "mul\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_mul_<Vetype><q>")] +) + +(define_insn "*aarch64_mul3_elt<mode>" + [(set (match_operand:VMUL 0 "register_operand" "=w") + (mult:VMUL + (vec_duplicate:VMUL + (vec_select:<VEL> + (match_operand:VMUL 1 "register_operand" "<h_con>") + (parallel [(match_operand:SI 2 "immediate_operand")]))) + (match_operand:VMUL 3 "register_operand" "w")))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2]))); + return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]"; + } + [(set_attr "type" "neon<fp>_mul_<Vetype>_scalar<q>")] +) + +(define_insn "*aarch64_mul3_elt_<vswap_width_name><mode>" + [(set (match_operand:VMUL_CHANGE_NLANES 0 "register_operand" "=w") + (mult:VMUL_CHANGE_NLANES + (vec_duplicate:VMUL_CHANGE_NLANES + (vec_select:<VEL> + (match_operand:<VSWAP_WIDTH> 1 "register_operand" "<h_con>") + (parallel [(match_operand:SI 2 "immediate_operand")]))) + (match_operand:VMUL_CHANGE_NLANES 3 "register_operand" "w")))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<VSWAP_WIDTH>mode, + INTVAL (operands[2]))); + return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]"; + } + [(set_attr "type" "neon<fp>_mul_<Vetype>_scalar<q>")] +) + +(define_insn "*aarch64_mul3_elt_to_128df" + [(set (match_operand:V2DF 0 "register_operand" "=w") + (mult:V2DF + (vec_duplicate:V2DF + (match_operand:DF 2 "register_operand" "w")) + (match_operand:V2DF 1 "register_operand" "w")))] + "TARGET_SIMD" + "fmul\\t%0.2d, %1.2d, %2.d[0]" + [(set_attr "type" "neon_fp_mul_d_scalar_q")] +) + +(define_insn "*aarch64_mul3_elt_to_64v2df" + [(set (match_operand:DF 0 "register_operand" "=w") + (mult:DF + (vec_select:DF + (match_operand:V2DF 1 "register_operand" "w") + (parallel [(match_operand:SI 2 "immediate_operand")])) + (match_operand:DF 3 "register_operand" "w")))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (V2DFmode, INTVAL (operands[2]))); + return "fmul\\t%0.2d, %3.2d, %1.d[%2]"; + } + [(set_attr "type" "neon_fp_mul_d_scalar_q")] +) + +(define_insn "neg<mode>2" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (neg:VDQ (match_operand:VDQ 1 "register_operand" "w")))] + "TARGET_SIMD" + "neg\t%0.<Vtype>, %1.<Vtype>" + [(set_attr "type" "neon_neg<q>")] +) + +(define_insn "abs<mode>2" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (abs:VDQ (match_operand:VDQ 1 "register_operand" "w")))] + "TARGET_SIMD" + "abs\t%0.<Vtype>, %1.<Vtype>" + [(set_attr "type" "neon_abs<q>")] +) + +(define_insn "abd<mode>_3" + [(set (match_operand:VDQ_BHSI 0 "register_operand" "=w") + (abs:VDQ_BHSI (minus:VDQ_BHSI + (match_operand:VDQ_BHSI 1 "register_operand" "w") + (match_operand:VDQ_BHSI 2 "register_operand" "w"))))] + "TARGET_SIMD" + "sabd\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_abd<q>")] +) + +(define_insn "aba<mode>_3" + [(set (match_operand:VDQ_BHSI 0 "register_operand" "=w") + (plus:VDQ_BHSI (abs:VDQ_BHSI (minus:VDQ_BHSI + (match_operand:VDQ_BHSI 1 "register_operand" "w") + (match_operand:VDQ_BHSI 2 "register_operand" "w"))) + (match_operand:VDQ_BHSI 3 "register_operand" "0")))] + "TARGET_SIMD" + "saba\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_arith_acc<q>")] +) + +(define_insn "fabd<mode>_3" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (abs:VDQF (minus:VDQF + (match_operand:VDQF 1 "register_operand" "w") + (match_operand:VDQF 2 "register_operand" "w"))))] + "TARGET_SIMD" + "fabd\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_fp_abd_<Vetype><q>")] +) + +(define_insn "*fabd_scalar<mode>3" + [(set (match_operand:GPF 0 "register_operand" "=w") + (abs:GPF (minus:GPF + (match_operand:GPF 1 "register_operand" "w") + (match_operand:GPF 2 "register_operand" "w"))))] + "TARGET_SIMD" + "fabd\t%<s>0, %<s>1, %<s>2" + [(set_attr "type" "neon_fp_abd_<Vetype><q>")] +) + +(define_insn "and<mode>3" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (and:VDQ (match_operand:VDQ 1 "register_operand" "w") + (match_operand:VDQ 2 "register_operand" "w")))] + "TARGET_SIMD" + "and\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>" + [(set_attr "type" "neon_logic<q>")] +) + +(define_insn "ior<mode>3" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (ior:VDQ (match_operand:VDQ 1 "register_operand" "w") + (match_operand:VDQ 2 "register_operand" "w")))] + "TARGET_SIMD" + "orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>" + [(set_attr "type" "neon_logic<q>")] +) + +(define_insn "xor<mode>3" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (xor:VDQ (match_operand:VDQ 1 "register_operand" "w") + (match_operand:VDQ 2 "register_operand" "w")))] + "TARGET_SIMD" + "eor\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>" + [(set_attr "type" "neon_logic<q>")] +) + +(define_insn "one_cmpl<mode>2" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (not:VDQ (match_operand:VDQ 1 "register_operand" "w")))] + "TARGET_SIMD" + "not\t%0.<Vbtype>, %1.<Vbtype>" + [(set_attr "type" "neon_logic<q>")] +) + +(define_insn "aarch64_simd_vec_set<mode>" + [(set (match_operand:VQ_S 0 "register_operand" "=w,w") + (vec_merge:VQ_S + (vec_duplicate:VQ_S + (match_operand:<VEL> 1 "register_operand" "r,w")) + (match_operand:VQ_S 3 "register_operand" "0,0") + (match_operand:SI 2 "immediate_operand" "i,i")))] + "TARGET_SIMD" + { + int elt = ENDIAN_LANE_N (<MODE>mode, exact_log2 (INTVAL (operands[2]))); + operands[2] = GEN_INT ((HOST_WIDE_INT) 1 << elt); + switch (which_alternative) + { + case 0: + return "ins\\t%0.<Vetype>[%p2], %w1"; + case 1: + return "ins\\t%0.<Vetype>[%p2], %1.<Vetype>[0]"; + default: + gcc_unreachable (); + } + } + [(set_attr "type" "neon_from_gp<q>, neon_ins<q>")] +) + +(define_insn "aarch64_simd_lshr<mode>" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (lshiftrt:VDQ (match_operand:VDQ 1 "register_operand" "w") + (match_operand:VDQ 2 "aarch64_simd_rshift_imm" "Dr")))] + "TARGET_SIMD" + "ushr\t%0.<Vtype>, %1.<Vtype>, %2" + [(set_attr "type" "neon_shift_imm<q>")] +) + +(define_insn "aarch64_simd_ashr<mode>" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (ashiftrt:VDQ (match_operand:VDQ 1 "register_operand" "w") + (match_operand:VDQ 2 "aarch64_simd_rshift_imm" "Dr")))] + "TARGET_SIMD" + "sshr\t%0.<Vtype>, %1.<Vtype>, %2" + [(set_attr "type" "neon_shift_imm<q>")] +) + +(define_insn "aarch64_simd_imm_shl<mode>" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (ashift:VDQ (match_operand:VDQ 1 "register_operand" "w") + (match_operand:VDQ 2 "aarch64_simd_lshift_imm" "Dl")))] + "TARGET_SIMD" + "shl\t%0.<Vtype>, %1.<Vtype>, %2" + [(set_attr "type" "neon_shift_imm<q>")] +) + +(define_insn "aarch64_simd_reg_sshl<mode>" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (ashift:VDQ (match_operand:VDQ 1 "register_operand" "w") + (match_operand:VDQ 2 "register_operand" "w")))] + "TARGET_SIMD" + "sshl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_shift_reg<q>")] +) + +(define_insn "aarch64_simd_reg_shl<mode>_unsigned" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (unspec:VDQ [(match_operand:VDQ 1 "register_operand" "w") + (match_operand:VDQ 2 "register_operand" "w")] + UNSPEC_ASHIFT_UNSIGNED))] + "TARGET_SIMD" + "ushl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_shift_reg<q>")] +) + +(define_insn "aarch64_simd_reg_shl<mode>_signed" + [(set (match_operand:VDQ 0 "register_operand" "=w") + (unspec:VDQ [(match_operand:VDQ 1 "register_operand" "w") + (match_operand:VDQ 2 "register_operand" "w")] + UNSPEC_ASHIFT_SIGNED))] + "TARGET_SIMD" + "sshl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_shift_reg<q>")] +) + +(define_expand "ashl<mode>3" + [(match_operand:VDQ 0 "register_operand" "") + (match_operand:VDQ 1 "register_operand" "") + (match_operand:SI 2 "general_operand" "")] + "TARGET_SIMD" +{ + int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT; + int shift_amount; + + if (CONST_INT_P (operands[2])) + { + shift_amount = INTVAL (operands[2]); + if (shift_amount >= 0 && shift_amount < bit_width) + { + rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode, + shift_amount); + emit_insn (gen_aarch64_simd_imm_shl<mode> (operands[0], + operands[1], + tmp)); + DONE; + } + else + { + operands[2] = force_reg (SImode, operands[2]); + } + } + else if (MEM_P (operands[2])) + { + operands[2] = force_reg (SImode, operands[2]); + } + + if (REG_P (operands[2])) + { + rtx tmp = gen_reg_rtx (<MODE>mode); + emit_insn (gen_aarch64_simd_dup<mode> (tmp, + convert_to_mode (<VEL>mode, + operands[2], + 0))); + emit_insn (gen_aarch64_simd_reg_sshl<mode> (operands[0], operands[1], + tmp)); + DONE; + } + else + FAIL; +} +) + +(define_expand "lshr<mode>3" + [(match_operand:VDQ 0 "register_operand" "") + (match_operand:VDQ 1 "register_operand" "") + (match_operand:SI 2 "general_operand" "")] + "TARGET_SIMD" +{ + int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT; + int shift_amount; + + if (CONST_INT_P (operands[2])) + { + shift_amount = INTVAL (operands[2]); + if (shift_amount > 0 && shift_amount <= bit_width) + { + rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode, + shift_amount); + emit_insn (gen_aarch64_simd_lshr<mode> (operands[0], + operands[1], + tmp)); + DONE; + } + else + operands[2] = force_reg (SImode, operands[2]); + } + else if (MEM_P (operands[2])) + { + operands[2] = force_reg (SImode, operands[2]); + } + + if (REG_P (operands[2])) + { + rtx tmp = gen_reg_rtx (SImode); + rtx tmp1 = gen_reg_rtx (<MODE>mode); + emit_insn (gen_negsi2 (tmp, operands[2])); + emit_insn (gen_aarch64_simd_dup<mode> (tmp1, + convert_to_mode (<VEL>mode, + tmp, 0))); + emit_insn (gen_aarch64_simd_reg_shl<mode>_unsigned (operands[0], + operands[1], + tmp1)); + DONE; + } + else + FAIL; +} +) + +(define_expand "ashr<mode>3" + [(match_operand:VDQ 0 "register_operand" "") + (match_operand:VDQ 1 "register_operand" "") + (match_operand:SI 2 "general_operand" "")] + "TARGET_SIMD" +{ + int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT; + int shift_amount; + + if (CONST_INT_P (operands[2])) + { + shift_amount = INTVAL (operands[2]); + if (shift_amount > 0 && shift_amount <= bit_width) + { + rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode, + shift_amount); + emit_insn (gen_aarch64_simd_ashr<mode> (operands[0], + operands[1], + tmp)); + DONE; + } + else + operands[2] = force_reg (SImode, operands[2]); + } + else if (MEM_P (operands[2])) + { + operands[2] = force_reg (SImode, operands[2]); + } + + if (REG_P (operands[2])) + { + rtx tmp = gen_reg_rtx (SImode); + rtx tmp1 = gen_reg_rtx (<MODE>mode); + emit_insn (gen_negsi2 (tmp, operands[2])); + emit_insn (gen_aarch64_simd_dup<mode> (tmp1, + convert_to_mode (<VEL>mode, + tmp, 0))); + emit_insn (gen_aarch64_simd_reg_shl<mode>_signed (operands[0], + operands[1], + tmp1)); + DONE; + } + else + FAIL; +} +) + +(define_expand "vashl<mode>3" + [(match_operand:VDQ 0 "register_operand" "") + (match_operand:VDQ 1 "register_operand" "") + (match_operand:VDQ 2 "register_operand" "")] + "TARGET_SIMD" +{ + emit_insn (gen_aarch64_simd_reg_sshl<mode> (operands[0], operands[1], + operands[2])); + DONE; +}) + +;; Using mode VQ_S as there is no V2DImode neg! +;; Negating individual lanes most certainly offsets the +;; gain from vectorization. +(define_expand "vashr<mode>3" + [(match_operand:VQ_S 0 "register_operand" "") + (match_operand:VQ_S 1 "register_operand" "") + (match_operand:VQ_S 2 "register_operand" "")] + "TARGET_SIMD" +{ + rtx neg = gen_reg_rtx (<MODE>mode); + emit (gen_neg<mode>2 (neg, operands[2])); + emit_insn (gen_aarch64_simd_reg_shl<mode>_signed (operands[0], operands[1], + neg)); + DONE; +}) + +;; DI vector shift +(define_expand "aarch64_ashr_simddi" + [(match_operand:DI 0 "register_operand" "=w") + (match_operand:DI 1 "register_operand" "w") + (match_operand:SI 2 "aarch64_shift_imm64_di" "")] + "TARGET_SIMD" + { + if (INTVAL (operands[2]) == 64) + emit_insn (gen_aarch64_sshr_simddi (operands[0], operands[1])); + else + emit_insn (gen_ashrdi3 (operands[0], operands[1], operands[2])); + DONE; + } +) + +;; SIMD shift by 64. This pattern is a special case as standard pattern does +;; not handle NEON shifts by 64. +(define_insn "aarch64_sshr_simddi" + [(set (match_operand:DI 0 "register_operand" "=w") + (unspec:DI + [(match_operand:DI 1 "register_operand" "w")] UNSPEC_SSHR64))] + "TARGET_SIMD" + "sshr\t%d0, %d1, 64" + [(set_attr "type" "neon_shift_imm")] +) + +(define_expand "vlshr<mode>3" + [(match_operand:VQ_S 0 "register_operand" "") + (match_operand:VQ_S 1 "register_operand" "") + (match_operand:VQ_S 2 "register_operand" "")] + "TARGET_SIMD" +{ + rtx neg = gen_reg_rtx (<MODE>mode); + emit (gen_neg<mode>2 (neg, operands[2])); + emit_insn (gen_aarch64_simd_reg_shl<mode>_unsigned (operands[0], operands[1], + neg)); + DONE; +}) + +(define_expand "aarch64_lshr_simddi" + [(match_operand:DI 0 "register_operand" "=w") + (match_operand:DI 1 "register_operand" "w") + (match_operand:SI 2 "aarch64_shift_imm64_di" "")] + "TARGET_SIMD" + { + if (INTVAL (operands[2]) == 64) + emit_insn (gen_aarch64_ushr_simddi (operands[0], operands[1])); + else + emit_insn (gen_lshrdi3 (operands[0], operands[1], operands[2])); + DONE; + } +) + +;; SIMD shift by 64. This pattern is a special case as standard pattern does +;; not handle NEON shifts by 64. +(define_insn "aarch64_ushr_simddi" + [(set (match_operand:DI 0 "register_operand" "=w") + (unspec:DI + [(match_operand:DI 1 "register_operand" "w")] UNSPEC_USHR64))] + "TARGET_SIMD" + "ushr\t%d0, %d1, 64" + [(set_attr "type" "neon_shift_imm")] +) + +(define_expand "vec_set<mode>" + [(match_operand:VQ_S 0 "register_operand") + (match_operand:<VEL> 1 "register_operand") + (match_operand:SI 2 "immediate_operand")] + "TARGET_SIMD" + { + HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]); + emit_insn (gen_aarch64_simd_vec_set<mode> (operands[0], operands[1], + GEN_INT (elem), operands[0])); + DONE; + } +) + +(define_insn "aarch64_simd_vec_setv2di" + [(set (match_operand:V2DI 0 "register_operand" "=w,w") + (vec_merge:V2DI + (vec_duplicate:V2DI + (match_operand:DI 1 "register_operand" "r,w")) + (match_operand:V2DI 3 "register_operand" "0,0") + (match_operand:SI 2 "immediate_operand" "i,i")))] + "TARGET_SIMD" + { + int elt = ENDIAN_LANE_N (V2DImode, exact_log2 (INTVAL (operands[2]))); + operands[2] = GEN_INT ((HOST_WIDE_INT) 1 << elt); + switch (which_alternative) + { + case 0: + return "ins\\t%0.d[%p2], %1"; + case 1: + return "ins\\t%0.d[%p2], %1.d[0]"; + default: + gcc_unreachable (); + } + } + [(set_attr "type" "neon_from_gp, neon_ins_q")] +) + +(define_expand "vec_setv2di" + [(match_operand:V2DI 0 "register_operand") + (match_operand:DI 1 "register_operand") + (match_operand:SI 2 "immediate_operand")] + "TARGET_SIMD" + { + HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]); + emit_insn (gen_aarch64_simd_vec_setv2di (operands[0], operands[1], + GEN_INT (elem), operands[0])); + DONE; + } +) + +(define_insn "aarch64_simd_vec_set<mode>" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (vec_merge:VDQF + (vec_duplicate:VDQF + (match_operand:<VEL> 1 "register_operand" "w")) + (match_operand:VDQF 3 "register_operand" "0") + (match_operand:SI 2 "immediate_operand" "i")))] + "TARGET_SIMD" + { + int elt = ENDIAN_LANE_N (<MODE>mode, exact_log2 (INTVAL (operands[2]))); + + operands[2] = GEN_INT ((HOST_WIDE_INT)1 << elt); + return "ins\t%0.<Vetype>[%p2], %1.<Vetype>[0]"; + } + [(set_attr "type" "neon_ins<q>")] +) + +(define_expand "vec_set<mode>" + [(match_operand:VDQF 0 "register_operand" "+w") + (match_operand:<VEL> 1 "register_operand" "w") + (match_operand:SI 2 "immediate_operand" "")] + "TARGET_SIMD" + { + HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]); + emit_insn (gen_aarch64_simd_vec_set<mode> (operands[0], operands[1], + GEN_INT (elem), operands[0])); + DONE; + } +) + + +(define_insn "aarch64_mla<mode>" + [(set (match_operand:VQ_S 0 "register_operand" "=w") + (plus:VQ_S (mult:VQ_S (match_operand:VQ_S 2 "register_operand" "w") + (match_operand:VQ_S 3 "register_operand" "w")) + (match_operand:VQ_S 1 "register_operand" "0")))] + "TARGET_SIMD" + "mla\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>" + [(set_attr "type" "neon_mla_<Vetype><q>")] +) + +(define_insn "*aarch64_mla_elt<mode>" + [(set (match_operand:VDQHS 0 "register_operand" "=w") + (plus:VDQHS + (mult:VDQHS + (vec_duplicate:VDQHS + (vec_select:<VEL> + (match_operand:VDQHS 1 "register_operand" "<h_con>") + (parallel [(match_operand:SI 2 "immediate_operand")]))) + (match_operand:VDQHS 3 "register_operand" "w")) + (match_operand:VDQHS 4 "register_operand" "0")))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2]))); + return "mla\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]"; + } + [(set_attr "type" "neon_mla_<Vetype>_scalar<q>")] +) + +(define_insn "*aarch64_mla_elt_<vswap_width_name><mode>" + [(set (match_operand:VDQHS 0 "register_operand" "=w") + (plus:VDQHS + (mult:VDQHS + (vec_duplicate:VDQHS + (vec_select:<VEL> + (match_operand:<VSWAP_WIDTH> 1 "register_operand" "<h_con>") + (parallel [(match_operand:SI 2 "immediate_operand")]))) + (match_operand:VDQHS 3 "register_operand" "w")) + (match_operand:VDQHS 4 "register_operand" "0")))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<VSWAP_WIDTH>mode, + INTVAL (operands[2]))); + return "mla\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]"; + } + [(set_attr "type" "neon_mla_<Vetype>_scalar<q>")] +) + +(define_insn "aarch64_mls<mode>" + [(set (match_operand:VQ_S 0 "register_operand" "=w") + (minus:VQ_S (match_operand:VQ_S 1 "register_operand" "0") + (mult:VQ_S (match_operand:VQ_S 2 "register_operand" "w") + (match_operand:VQ_S 3 "register_operand" "w"))))] + "TARGET_SIMD" + "mls\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>" + [(set_attr "type" "neon_mla_<Vetype><q>")] +) + +(define_insn "*aarch64_mls_elt<mode>" + [(set (match_operand:VDQHS 0 "register_operand" "=w") + (minus:VDQHS + (match_operand:VDQHS 4 "register_operand" "0") + (mult:VDQHS + (vec_duplicate:VDQHS + (vec_select:<VEL> + (match_operand:VDQHS 1 "register_operand" "<h_con>") + (parallel [(match_operand:SI 2 "immediate_operand")]))) + (match_operand:VDQHS 3 "register_operand" "w"))))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2]))); + return "mls\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]"; + } + [(set_attr "type" "neon_mla_<Vetype>_scalar<q>")] +) + +(define_insn "*aarch64_mls_elt_<vswap_width_name><mode>" + [(set (match_operand:VDQHS 0 "register_operand" "=w") + (minus:VDQHS + (match_operand:VDQHS 4 "register_operand" "0") + (mult:VDQHS + (vec_duplicate:VDQHS + (vec_select:<VEL> + (match_operand:<VSWAP_WIDTH> 1 "register_operand" "<h_con>") + (parallel [(match_operand:SI 2 "immediate_operand")]))) + (match_operand:VDQHS 3 "register_operand" "w"))))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<VSWAP_WIDTH>mode, + INTVAL (operands[2]))); + return "mls\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]"; + } + [(set_attr "type" "neon_mla_<Vetype>_scalar<q>")] +) + +;; Max/Min operations. +(define_insn "<su><maxmin><mode>3" + [(set (match_operand:VQ_S 0 "register_operand" "=w") + (MAXMIN:VQ_S (match_operand:VQ_S 1 "register_operand" "w") + (match_operand:VQ_S 2 "register_operand" "w")))] + "TARGET_SIMD" + "<su><maxmin>\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_minmax<q>")] +) + +;; Move into low-half clearing high half to 0. + +(define_insn "move_lo_quad_<mode>" + [(set (match_operand:VQ 0 "register_operand" "=w,w,w") + (vec_concat:VQ + (match_operand:<VHALF> 1 "register_operand" "w,r,r") + (vec_duplicate:<VHALF> (const_int 0))))] + "TARGET_SIMD" + "@ + dup\\t%d0, %1.d[0] + fmov\\t%d0, %1 + dup\\t%d0, %1" + [(set_attr "type" "neon_dup<q>,fmov,neon_dup<q>") + (set_attr "simd" "yes,*,yes") + (set_attr "fp" "*,yes,*") + (set_attr "length" "4")] +) + +;; Move into high-half. + +(define_insn "aarch64_simd_move_hi_quad_<mode>" + [(set (match_operand:VQ 0 "register_operand" "+w,w") + (vec_concat:VQ + (vec_select:<VHALF> + (match_dup 0) + (match_operand:VQ 2 "vect_par_cnst_lo_half" "")) + (match_operand:<VHALF> 1 "register_operand" "w,r")))] + "TARGET_SIMD" + "@ + ins\\t%0.d[1], %1.d[0] + ins\\t%0.d[1], %1" + [(set_attr "type" "neon_ins") + (set_attr "length" "4")] +) + +(define_expand "move_hi_quad_<mode>" + [(match_operand:VQ 0 "register_operand" "") + (match_operand:<VHALF> 1 "register_operand" "")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false); + emit_insn (gen_aarch64_simd_move_hi_quad_<mode> (operands[0], + operands[1], p)); + DONE; +}) + +;; Narrowing operations. + +;; For doubles. +(define_insn "aarch64_simd_vec_pack_trunc_<mode>" + [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w") + (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w")))] + "TARGET_SIMD" + "xtn\\t%0.<Vntype>, %1.<Vtype>" + [(set_attr "type" "neon_shift_imm_narrow_q")] +) + +(define_expand "vec_pack_trunc_<mode>" + [(match_operand:<VNARROWD> 0 "register_operand" "") + (match_operand:VDN 1 "register_operand" "") + (match_operand:VDN 2 "register_operand" "")] + "TARGET_SIMD" +{ + rtx tempreg = gen_reg_rtx (<VDBL>mode); + int lo = BYTES_BIG_ENDIAN ? 2 : 1; + int hi = BYTES_BIG_ENDIAN ? 1 : 2; + + emit_insn (gen_move_lo_quad_<Vdbl> (tempreg, operands[lo])); + emit_insn (gen_move_hi_quad_<Vdbl> (tempreg, operands[hi])); + emit_insn (gen_aarch64_simd_vec_pack_trunc_<Vdbl> (operands[0], tempreg)); + DONE; +}) + +;; For quads. + +(define_insn "vec_pack_trunc_<mode>" + [(set (match_operand:<VNARROWQ2> 0 "register_operand" "+&w") + (vec_concat:<VNARROWQ2> + (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w")) + (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))))] + "TARGET_SIMD" + { + if (BYTES_BIG_ENDIAN) + return "xtn\\t%0.<Vntype>, %2.<Vtype>\;xtn2\\t%0.<V2ntype>, %1.<Vtype>"; + else + return "xtn\\t%0.<Vntype>, %1.<Vtype>\;xtn2\\t%0.<V2ntype>, %2.<Vtype>"; + } + [(set_attr "type" "multiple") + (set_attr "length" "8")] +) + +;; Widening operations. + +(define_insn "aarch64_simd_vec_unpack<su>_lo_<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 1 "register_operand" "w") + (match_operand:VQW 2 "vect_par_cnst_lo_half" "") + )))] + "TARGET_SIMD" + "<su>shll %0.<Vwtype>, %1.<Vhalftype>, 0" + [(set_attr "type" "neon_shift_imm_long")] +) + +(define_insn "aarch64_simd_vec_unpack<su>_hi_<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 1 "register_operand" "w") + (match_operand:VQW 2 "vect_par_cnst_hi_half" "") + )))] + "TARGET_SIMD" + "<su>shll2 %0.<Vwtype>, %1.<Vtype>, 0" + [(set_attr "type" "neon_shift_imm_long")] +) + +(define_expand "vec_unpack<su>_hi_<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "") + (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand"))] + "TARGET_SIMD" + { + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_simd_vec_unpack<su>_hi_<mode> (operands[0], + operands[1], p)); + DONE; + } +) + +(define_expand "vec_unpack<su>_lo_<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "") + (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))] + "TARGET_SIMD" + { + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false); + emit_insn (gen_aarch64_simd_vec_unpack<su>_lo_<mode> (operands[0], + operands[1], p)); + DONE; + } +) + +;; Widening arithmetic. + +(define_insn "*aarch64_<su>mlal_lo<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (plus:<VWIDE> + (mult:<VWIDE> + (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 2 "register_operand" "w") + (match_operand:VQW 3 "vect_par_cnst_lo_half" ""))) + (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 4 "register_operand" "w") + (match_dup 3)))) + (match_operand:<VWIDE> 1 "register_operand" "0")))] + "TARGET_SIMD" + "<su>mlal\t%0.<Vwtype>, %2.<Vhalftype>, %4.<Vhalftype>" + [(set_attr "type" "neon_mla_<Vetype>_long")] +) + +(define_insn "*aarch64_<su>mlal_hi<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (plus:<VWIDE> + (mult:<VWIDE> + (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 2 "register_operand" "w") + (match_operand:VQW 3 "vect_par_cnst_hi_half" ""))) + (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 4 "register_operand" "w") + (match_dup 3)))) + (match_operand:<VWIDE> 1 "register_operand" "0")))] + "TARGET_SIMD" + "<su>mlal2\t%0.<Vwtype>, %2.<Vtype>, %4.<Vtype>" + [(set_attr "type" "neon_mla_<Vetype>_long")] +) + +(define_insn "*aarch64_<su>mlsl_lo<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (minus:<VWIDE> + (match_operand:<VWIDE> 1 "register_operand" "0") + (mult:<VWIDE> + (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 2 "register_operand" "w") + (match_operand:VQW 3 "vect_par_cnst_lo_half" ""))) + (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 4 "register_operand" "w") + (match_dup 3))))))] + "TARGET_SIMD" + "<su>mlsl\t%0.<Vwtype>, %2.<Vhalftype>, %4.<Vhalftype>" + [(set_attr "type" "neon_mla_<Vetype>_long")] +) + +(define_insn "*aarch64_<su>mlsl_hi<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (minus:<VWIDE> + (match_operand:<VWIDE> 1 "register_operand" "0") + (mult:<VWIDE> + (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 2 "register_operand" "w") + (match_operand:VQW 3 "vect_par_cnst_hi_half" ""))) + (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 4 "register_operand" "w") + (match_dup 3))))))] + "TARGET_SIMD" + "<su>mlsl2\t%0.<Vwtype>, %2.<Vtype>, %4.<Vtype>" + [(set_attr "type" "neon_mla_<Vetype>_long")] +) + +(define_insn "*aarch64_<su>mlal<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (plus:<VWIDE> + (mult:<VWIDE> + (ANY_EXTEND:<VWIDE> + (match_operand:VDW 1 "register_operand" "w")) + (ANY_EXTEND:<VWIDE> + (match_operand:VDW 2 "register_operand" "w"))) + (match_operand:<VWIDE> 3 "register_operand" "0")))] + "TARGET_SIMD" + "<su>mlal\t%0.<Vwtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_mla_<Vetype>_long")] +) + +(define_insn "*aarch64_<su>mlsl<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (minus:<VWIDE> + (match_operand:<VWIDE> 1 "register_operand" "0") + (mult:<VWIDE> + (ANY_EXTEND:<VWIDE> + (match_operand:VDW 2 "register_operand" "w")) + (ANY_EXTEND:<VWIDE> + (match_operand:VDW 3 "register_operand" "w")))))] + "TARGET_SIMD" + "<su>mlsl\t%0.<Vwtype>, %2.<Vtype>, %3.<Vtype>" + [(set_attr "type" "neon_mla_<Vetype>_long")] +) + +(define_insn "aarch64_simd_vec_<su>mult_lo_<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (mult:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 1 "register_operand" "w") + (match_operand:VQW 3 "vect_par_cnst_lo_half" ""))) + (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 2 "register_operand" "w") + (match_dup 3)))))] + "TARGET_SIMD" + "<su>mull\\t%0.<Vwtype>, %1.<Vhalftype>, %2.<Vhalftype>" + [(set_attr "type" "neon_mul_<Vetype>_long")] +) + +(define_expand "vec_widen_<su>mult_lo_<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "") + (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" "")) + (ANY_EXTEND:<VWIDE> (match_operand:VQW 2 "register_operand" ""))] + "TARGET_SIMD" + { + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false); + emit_insn (gen_aarch64_simd_vec_<su>mult_lo_<mode> (operands[0], + operands[1], + operands[2], p)); + DONE; + } +) + +(define_insn "aarch64_simd_vec_<su>mult_hi_<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (mult:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 1 "register_operand" "w") + (match_operand:VQW 3 "vect_par_cnst_hi_half" ""))) + (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 2 "register_operand" "w") + (match_dup 3)))))] + "TARGET_SIMD" + "<su>mull2\\t%0.<Vwtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_mul_<Vetype>_long")] +) + +(define_expand "vec_widen_<su>mult_hi_<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "") + (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" "")) + (ANY_EXTEND:<VWIDE> (match_operand:VQW 2 "register_operand" ""))] + "TARGET_SIMD" + { + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_simd_vec_<su>mult_hi_<mode> (operands[0], + operands[1], + operands[2], p)); + DONE; + + } +) + +;; FP vector operations. +;; AArch64 AdvSIMD supports single-precision (32-bit) and +;; double-precision (64-bit) floating-point data types and arithmetic as +;; defined by the IEEE 754-2008 standard. This makes them vectorizable +;; without the need for -ffast-math or -funsafe-math-optimizations. +;; +;; Floating-point operations can raise an exception. Vectorizing such +;; operations are safe because of reasons explained below. +;; +;; ARMv8 permits an extension to enable trapped floating-point +;; exception handling, however this is an optional feature. In the +;; event of a floating-point exception being raised by vectorised +;; code then: +;; 1. If trapped floating-point exceptions are available, then a trap +;; will be taken when any lane raises an enabled exception. A trap +;; handler may determine which lane raised the exception. +;; 2. Alternatively a sticky exception flag is set in the +;; floating-point status register (FPSR). Software may explicitly +;; test the exception flags, in which case the tests will either +;; prevent vectorisation, allowing precise identification of the +;; failing operation, or if tested outside of vectorisable regions +;; then the specific operation and lane are not of interest. + +;; FP arithmetic operations. + +(define_insn "add<mode>3" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (plus:VDQF (match_operand:VDQF 1 "register_operand" "w") + (match_operand:VDQF 2 "register_operand" "w")))] + "TARGET_SIMD" + "fadd\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_fp_addsub_<Vetype><q>")] +) + +(define_insn "sub<mode>3" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (minus:VDQF (match_operand:VDQF 1 "register_operand" "w") + (match_operand:VDQF 2 "register_operand" "w")))] + "TARGET_SIMD" + "fsub\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_fp_addsub_<Vetype><q>")] +) + +(define_insn "mul<mode>3" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (mult:VDQF (match_operand:VDQF 1 "register_operand" "w") + (match_operand:VDQF 2 "register_operand" "w")))] + "TARGET_SIMD" + "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_fp_mul_<Vetype><q>")] +) + +(define_insn "div<mode>3" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (div:VDQF (match_operand:VDQF 1 "register_operand" "w") + (match_operand:VDQF 2 "register_operand" "w")))] + "TARGET_SIMD" + "fdiv\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_fp_div_<Vetype><q>")] +) + +(define_insn "neg<mode>2" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (neg:VDQF (match_operand:VDQF 1 "register_operand" "w")))] + "TARGET_SIMD" + "fneg\\t%0.<Vtype>, %1.<Vtype>" + [(set_attr "type" "neon_fp_neg_<Vetype><q>")] +) + +(define_insn "abs<mode>2" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (abs:VDQF (match_operand:VDQF 1 "register_operand" "w")))] + "TARGET_SIMD" + "fabs\\t%0.<Vtype>, %1.<Vtype>" + [(set_attr "type" "neon_fp_abs_<Vetype><q>")] +) + +(define_insn "fma<mode>4" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (fma:VDQF (match_operand:VDQF 1 "register_operand" "w") + (match_operand:VDQF 2 "register_operand" "w") + (match_operand:VDQF 3 "register_operand" "0")))] + "TARGET_SIMD" + "fmla\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_fp_mla_<Vetype><q>")] +) + +(define_insn "*aarch64_fma4_elt<mode>" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (fma:VDQF + (vec_duplicate:VDQF + (vec_select:<VEL> + (match_operand:VDQF 1 "register_operand" "<h_con>") + (parallel [(match_operand:SI 2 "immediate_operand")]))) + (match_operand:VDQF 3 "register_operand" "w") + (match_operand:VDQF 4 "register_operand" "0")))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2]))); + return "fmla\\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]"; + } + [(set_attr "type" "neon_fp_mla_<Vetype>_scalar<q>")] +) + +(define_insn "*aarch64_fma4_elt_<vswap_width_name><mode>" + [(set (match_operand:VDQSF 0 "register_operand" "=w") + (fma:VDQSF + (vec_duplicate:VDQSF + (vec_select:<VEL> + (match_operand:<VSWAP_WIDTH> 1 "register_operand" "<h_con>") + (parallel [(match_operand:SI 2 "immediate_operand")]))) + (match_operand:VDQSF 3 "register_operand" "w") + (match_operand:VDQSF 4 "register_operand" "0")))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<VSWAP_WIDTH>mode, + INTVAL (operands[2]))); + return "fmla\\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]"; + } + [(set_attr "type" "neon_fp_mla_<Vetype>_scalar<q>")] +) + +(define_insn "*aarch64_fma4_elt_to_128df" + [(set (match_operand:V2DF 0 "register_operand" "=w") + (fma:V2DF + (vec_duplicate:V2DF + (match_operand:DF 1 "register_operand" "w")) + (match_operand:V2DF 2 "register_operand" "w") + (match_operand:V2DF 3 "register_operand" "0")))] + "TARGET_SIMD" + "fmla\\t%0.2d, %2.2d, %1.2d[0]" + [(set_attr "type" "neon_fp_mla_d_scalar_q")] +) + +(define_insn "*aarch64_fma4_elt_to_64v2df" + [(set (match_operand:DF 0 "register_operand" "=w") + (fma:DF + (vec_select:DF + (match_operand:V2DF 1 "register_operand" "w") + (parallel [(match_operand:SI 2 "immediate_operand")])) + (match_operand:DF 3 "register_operand" "w") + (match_operand:DF 4 "register_operand" "0")))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (V2DFmode, INTVAL (operands[2]))); + return "fmla\\t%0.2d, %3.2d, %1.2d[%2]"; + } + [(set_attr "type" "neon_fp_mla_d_scalar_q")] +) + +(define_insn "fnma<mode>4" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (fma:VDQF + (match_operand:VDQF 1 "register_operand" "w") + (neg:VDQF + (match_operand:VDQF 2 "register_operand" "w")) + (match_operand:VDQF 3 "register_operand" "0")))] + "TARGET_SIMD" + "fmls\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_fp_mla_<Vetype><q>")] +) + +(define_insn "*aarch64_fnma4_elt<mode>" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (fma:VDQF + (neg:VDQF + (match_operand:VDQF 3 "register_operand" "w")) + (vec_duplicate:VDQF + (vec_select:<VEL> + (match_operand:VDQF 1 "register_operand" "<h_con>") + (parallel [(match_operand:SI 2 "immediate_operand")]))) + (match_operand:VDQF 4 "register_operand" "0")))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2]))); + return "fmls\\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]"; + } + [(set_attr "type" "neon_fp_mla_<Vetype>_scalar<q>")] +) + +(define_insn "*aarch64_fnma4_elt_<vswap_width_name><mode>" + [(set (match_operand:VDQSF 0 "register_operand" "=w") + (fma:VDQSF + (neg:VDQSF + (match_operand:VDQSF 3 "register_operand" "w")) + (vec_duplicate:VDQSF + (vec_select:<VEL> + (match_operand:<VSWAP_WIDTH> 1 "register_operand" "<h_con>") + (parallel [(match_operand:SI 2 "immediate_operand")]))) + (match_operand:VDQSF 4 "register_operand" "0")))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<VSWAP_WIDTH>mode, + INTVAL (operands[2]))); + return "fmls\\t%0.<Vtype>, %3.<Vtype>, %1.<Vtype>[%2]"; + } + [(set_attr "type" "neon_fp_mla_<Vetype>_scalar<q>")] +) + +(define_insn "*aarch64_fnma4_elt_to_128df" + [(set (match_operand:V2DF 0 "register_operand" "=w") + (fma:V2DF + (neg:V2DF + (match_operand:V2DF 2 "register_operand" "w")) + (vec_duplicate:V2DF + (match_operand:DF 1 "register_operand" "w")) + (match_operand:V2DF 3 "register_operand" "0")))] + "TARGET_SIMD" + "fmls\\t%0.2d, %2.2d, %1.2d[0]" + [(set_attr "type" "neon_fp_mla_d_scalar_q")] +) + +(define_insn "*aarch64_fnma4_elt_to_64v2df" + [(set (match_operand:DF 0 "register_operand" "=w") + (fma:DF + (vec_select:DF + (match_operand:V2DF 1 "register_operand" "w") + (parallel [(match_operand:SI 2 "immediate_operand")])) + (neg:DF + (match_operand:DF 3 "register_operand" "w")) + (match_operand:DF 4 "register_operand" "0")))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (V2DFmode, INTVAL (operands[2]))); + return "fmls\\t%0.2d, %3.2d, %1.2d[%2]"; + } + [(set_attr "type" "neon_fp_mla_d_scalar_q")] +) + +;; Vector versions of the floating-point frint patterns. +;; Expands to btrunc, ceil, floor, nearbyint, rint, round. +(define_insn "<frint_pattern><mode>2" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")] + FRINT))] + "TARGET_SIMD" + "frint<frint_suffix>\\t%0.<Vtype>, %1.<Vtype>" + [(set_attr "type" "neon_fp_round_<Vetype><q>")] +) + +;; Vector versions of the fcvt standard patterns. +;; Expands to lbtrunc, lround, lceil, lfloor +(define_insn "l<fcvt_pattern><su_optab><VDQF:mode><fcvt_target>2" + [(set (match_operand:<FCVT_TARGET> 0 "register_operand" "=w") + (FIXUORS:<FCVT_TARGET> (unspec:<FCVT_TARGET> + [(match_operand:VDQF 1 "register_operand" "w")] + FCVT)))] + "TARGET_SIMD" + "fcvt<frint_suffix><su>\\t%0.<Vtype>, %1.<Vtype>" + [(set_attr "type" "neon_fp_to_int_<Vetype><q>")] +) + +(define_expand "<optab><VDQF:mode><fcvt_target>2" + [(set (match_operand:<FCVT_TARGET> 0 "register_operand") + (FIXUORS:<FCVT_TARGET> (unspec:<FCVT_TARGET> + [(match_operand:VDQF 1 "register_operand")] + UNSPEC_FRINTZ)))] + "TARGET_SIMD" + {}) + +(define_expand "<fix_trunc_optab><VDQF:mode><fcvt_target>2" + [(set (match_operand:<FCVT_TARGET> 0 "register_operand") + (FIXUORS:<FCVT_TARGET> (unspec:<FCVT_TARGET> + [(match_operand:VDQF 1 "register_operand")] + UNSPEC_FRINTZ)))] + "TARGET_SIMD" + {}) + +(define_expand "ftrunc<VDQF:mode>2" + [(set (match_operand:VDQF 0 "register_operand") + (unspec:VDQF [(match_operand:VDQF 1 "register_operand")] + UNSPEC_FRINTZ))] + "TARGET_SIMD" + {}) + +(define_insn "<optab><fcvt_target><VDQF:mode>2" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (FLOATUORS:VDQF + (match_operand:<FCVT_TARGET> 1 "register_operand" "w")))] + "TARGET_SIMD" + "<su_optab>cvtf\\t%0.<Vtype>, %1.<Vtype>" + [(set_attr "type" "neon_int_to_fp_<Vetype><q>")] +) + +;; Conversions between vectors of floats and doubles. +;; Contains a mix of patterns to match standard pattern names +;; and those for intrinsics. + +;; Float widening operations. + +(define_insn "vec_unpacks_lo_v4sf" + [(set (match_operand:V2DF 0 "register_operand" "=w") + (float_extend:V2DF + (vec_select:V2SF + (match_operand:V4SF 1 "register_operand" "w") + (parallel [(const_int 0) (const_int 1)]) + )))] + "TARGET_SIMD" + "fcvtl\\t%0.2d, %1.2s" + [(set_attr "type" "neon_fp_cvt_widen_s")] +) + +(define_insn "aarch64_float_extend_lo_v2df" + [(set (match_operand:V2DF 0 "register_operand" "=w") + (float_extend:V2DF + (match_operand:V2SF 1 "register_operand" "w")))] + "TARGET_SIMD" + "fcvtl\\t%0.2d, %1.2s" + [(set_attr "type" "neon_fp_cvt_widen_s")] +) + +(define_insn "vec_unpacks_hi_v4sf" + [(set (match_operand:V2DF 0 "register_operand" "=w") + (float_extend:V2DF + (vec_select:V2SF + (match_operand:V4SF 1 "register_operand" "w") + (parallel [(const_int 2) (const_int 3)]) + )))] + "TARGET_SIMD" + "fcvtl2\\t%0.2d, %1.4s" + [(set_attr "type" "neon_fp_cvt_widen_s")] +) + +;; Float narrowing operations. + +(define_insn "aarch64_float_truncate_lo_v2sf" + [(set (match_operand:V2SF 0 "register_operand" "=w") + (float_truncate:V2SF + (match_operand:V2DF 1 "register_operand" "w")))] + "TARGET_SIMD" + "fcvtn\\t%0.2s, %1.2d" + [(set_attr "type" "neon_fp_cvt_narrow_d_q")] +) + +(define_insn "aarch64_float_truncate_hi_v4sf" + [(set (match_operand:V4SF 0 "register_operand" "=w") + (vec_concat:V4SF + (match_operand:V2SF 1 "register_operand" "0") + (float_truncate:V2SF + (match_operand:V2DF 2 "register_operand" "w"))))] + "TARGET_SIMD" + "fcvtn2\\t%0.4s, %2.2d" + [(set_attr "type" "neon_fp_cvt_narrow_d_q")] +) + +(define_expand "vec_pack_trunc_v2df" + [(set (match_operand:V4SF 0 "register_operand") + (vec_concat:V4SF + (float_truncate:V2SF + (match_operand:V2DF 1 "register_operand")) + (float_truncate:V2SF + (match_operand:V2DF 2 "register_operand")) + ))] + "TARGET_SIMD" + { + rtx tmp = gen_reg_rtx (V2SFmode); + int lo = BYTES_BIG_ENDIAN ? 2 : 1; + int hi = BYTES_BIG_ENDIAN ? 1 : 2; + + emit_insn (gen_aarch64_float_truncate_lo_v2sf (tmp, operands[lo])); + emit_insn (gen_aarch64_float_truncate_hi_v4sf (operands[0], + tmp, operands[hi])); + DONE; + } +) + +(define_expand "vec_pack_trunc_df" + [(set (match_operand:V2SF 0 "register_operand") + (vec_concat:V2SF + (float_truncate:SF + (match_operand:DF 1 "register_operand")) + (float_truncate:SF + (match_operand:DF 2 "register_operand")) + ))] + "TARGET_SIMD" + { + rtx tmp = gen_reg_rtx (V2SFmode); + int lo = BYTES_BIG_ENDIAN ? 2 : 1; + int hi = BYTES_BIG_ENDIAN ? 1 : 2; + + emit_insn (gen_move_lo_quad_v2df (tmp, operands[lo])); + emit_insn (gen_move_hi_quad_v2df (tmp, operands[hi])); + emit_insn (gen_aarch64_float_truncate_lo_v2sf (operands[0], tmp)); + DONE; + } +) + +(define_insn "aarch64_vmls<mode>" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (minus:VDQF (match_operand:VDQF 1 "register_operand" "0") + (mult:VDQF (match_operand:VDQF 2 "register_operand" "w") + (match_operand:VDQF 3 "register_operand" "w"))))] + "TARGET_SIMD" + "fmls\\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>" + [(set_attr "type" "neon_fp_mla_<Vetype>_scalar<q>")] +) + +;; FP Max/Min +;; Max/Min are introduced by idiom recognition by GCC's mid-end. An +;; expression like: +;; a = (b < c) ? b : c; +;; is idiom-matched as MIN_EXPR<b,c> only if -ffinite-math-only is enabled +;; either explicitly or indirectly via -ffast-math. +;; +;; MIN_EXPR and MAX_EXPR eventually map to 'smin' and 'smax' in RTL. +;; The 'smax' and 'smin' RTL standard pattern names do not specify which +;; operand will be returned when both operands are zero (i.e. they may not +;; honour signed zeroes), or when either operand is NaN. Therefore GCC +;; only introduces MIN_EXPR/MAX_EXPR in fast math mode or when not honouring +;; NaNs. + +(define_insn "<su><maxmin><mode>3" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (FMAXMIN:VDQF (match_operand:VDQF 1 "register_operand" "w") + (match_operand:VDQF 2 "register_operand" "w")))] + "TARGET_SIMD" + "f<maxmin>nm\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_fp_minmax_<Vetype><q>")] +) + +(define_insn "<maxmin_uns><mode>3" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w") + (match_operand:VDQF 2 "register_operand" "w")] + FMAXMIN_UNS))] + "TARGET_SIMD" + "<maxmin_uns_op>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_fp_minmax_<Vetype><q>")] +) + +;; 'across lanes' add. + +(define_insn "reduc_<sur>plus_<mode>" + [(set (match_operand:VDQV 0 "register_operand" "=w") + (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")] + SUADDV))] + "TARGET_SIMD" + "add<VDQV:vp>\\t%<Vetype>0, %1.<Vtype>" + [(set_attr "type" "neon_reduc_add<q>")] +) + +(define_insn "reduc_<sur>plus_v2si" + [(set (match_operand:V2SI 0 "register_operand" "=w") + (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")] + SUADDV))] + "TARGET_SIMD" + "addp\\t%0.2s, %1.2s, %1.2s" + [(set_attr "type" "neon_reduc_add")] +) + +(define_insn "reduc_splus_<mode>" + [(set (match_operand:V2F 0 "register_operand" "=w") + (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")] + UNSPEC_FADDV))] + "TARGET_SIMD" + "faddp\\t%<Vetype>0, %1.<Vtype>" + [(set_attr "type" "neon_fp_reduc_add_<Vetype><q>")] +) + +(define_insn "aarch64_addpv4sf" + [(set (match_operand:V4SF 0 "register_operand" "=w") + (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")] + UNSPEC_FADDV))] + "TARGET_SIMD" + "faddp\\t%0.4s, %1.4s, %1.4s" + [(set_attr "type" "neon_fp_reduc_add_s_q")] +) + +(define_expand "reduc_splus_v4sf" + [(set (match_operand:V4SF 0 "register_operand") + (unspec:V4SF [(match_operand:V4SF 1 "register_operand")] + UNSPEC_FADDV))] + "TARGET_SIMD" +{ + emit_insn (gen_aarch64_addpv4sf (operands[0], operands[1])); + emit_insn (gen_aarch64_addpv4sf (operands[0], operands[0])); + DONE; +}) + +(define_insn "clz<mode>2" + [(set (match_operand:VDQ_BHSI 0 "register_operand" "=w") + (clz:VDQ_BHSI (match_operand:VDQ_BHSI 1 "register_operand" "w")))] + "TARGET_SIMD" + "clz\\t%0.<Vtype>, %1.<Vtype>" + [(set_attr "type" "neon_cls<q>")] +) + +;; 'across lanes' max and min ops. + +(define_insn "reduc_<maxmin_uns>_<mode>" + [(set (match_operand:VDQV_S 0 "register_operand" "=w") + (unspec:VDQV_S [(match_operand:VDQV_S 1 "register_operand" "w")] + MAXMINV))] + "TARGET_SIMD" + "<maxmin_uns_op>v\\t%<Vetype>0, %1.<Vtype>" + [(set_attr "type" "neon_reduc_minmax<q>")] +) + +(define_insn "reduc_<maxmin_uns>_v2si" + [(set (match_operand:V2SI 0 "register_operand" "=w") + (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")] + MAXMINV))] + "TARGET_SIMD" + "<maxmin_uns_op>p\\t%0.2s, %1.2s, %1.2s" + [(set_attr "type" "neon_reduc_minmax")] +) + +(define_insn "reduc_<maxmin_uns>_<mode>" + [(set (match_operand:V2F 0 "register_operand" "=w") + (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")] + FMAXMINV))] + "TARGET_SIMD" + "<maxmin_uns_op>p\\t%<Vetype>0, %1.<Vtype>" + [(set_attr "type" "neon_fp_reduc_minmax_<Vetype><q>")] +) + +(define_insn "reduc_<maxmin_uns>_v4sf" + [(set (match_operand:V4SF 0 "register_operand" "=w") + (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")] + FMAXMINV))] + "TARGET_SIMD" + "<maxmin_uns_op>v\\t%s0, %1.4s" + [(set_attr "type" "neon_fp_reduc_minmax_s_q")] +) + +;; aarch64_simd_bsl may compile to any of bsl/bif/bit depending on register +;; allocation. +;; Operand 1 is the mask, operands 2 and 3 are the bitfields from which +;; to select. +;; +;; Thus our BSL is of the form: +;; op0 = bsl (mask, op2, op3) +;; We can use any of: +;; +;; if (op0 = mask) +;; bsl mask, op1, op2 +;; if (op0 = op1) (so 1-bits in mask choose bits from op2, else op0) +;; bit op0, op2, mask +;; if (op0 = op2) (so 0-bits in mask choose bits from op1, else op0) +;; bif op0, op1, mask + +(define_insn "aarch64_simd_bsl<mode>_internal" + [(set (match_operand:VALLDIF 0 "register_operand" "=w,w,w") + (ior:VALLDIF + (and:VALLDIF + (match_operand:<V_cmp_result> 1 "register_operand" " 0,w,w") + (match_operand:VALLDIF 2 "register_operand" " w,w,0")) + (and:VALLDIF + (not:<V_cmp_result> + (match_dup:<V_cmp_result> 1)) + (match_operand:VALLDIF 3 "register_operand" " w,0,w")) + ))] + "TARGET_SIMD" + "@ + bsl\\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype> + bit\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype> + bif\\t%0.<Vbtype>, %3.<Vbtype>, %1.<Vbtype>" + [(set_attr "type" "neon_bsl<q>")] +) + +(define_expand "aarch64_simd_bsl<mode>" + [(match_operand:VALLDIF 0 "register_operand") + (match_operand:<V_cmp_result> 1 "register_operand") + (match_operand:VALLDIF 2 "register_operand") + (match_operand:VALLDIF 3 "register_operand")] + "TARGET_SIMD" +{ + /* We can't alias operands together if they have different modes. */ + operands[1] = gen_lowpart (<V_cmp_result>mode, operands[1]); + emit_insn (gen_aarch64_simd_bsl<mode>_internal (operands[0], operands[1], + operands[2], operands[3])); + DONE; +}) + +(define_expand "aarch64_vcond_internal<mode><mode>" + [(set (match_operand:VDQ 0 "register_operand") + (if_then_else:VDQ + (match_operator 3 "comparison_operator" + [(match_operand:VDQ 4 "register_operand") + (match_operand:VDQ 5 "nonmemory_operand")]) + (match_operand:VDQ 1 "nonmemory_operand") + (match_operand:VDQ 2 "nonmemory_operand")))] + "TARGET_SIMD" +{ + int inverse = 0, has_zero_imm_form = 0; + rtx op1 = operands[1]; + rtx op2 = operands[2]; + rtx mask = gen_reg_rtx (<MODE>mode); + + switch (GET_CODE (operands[3])) + { + case LE: + case LT: + case NE: + inverse = 1; + /* Fall through. */ + case GE: + case GT: + case EQ: + has_zero_imm_form = 1; + break; + case LEU: + case LTU: + inverse = 1; + break; + default: + break; + } + + if (!REG_P (operands[5]) + && (operands[5] != CONST0_RTX (<MODE>mode) || !has_zero_imm_form)) + operands[5] = force_reg (<MODE>mode, operands[5]); + + switch (GET_CODE (operands[3])) + { + case LT: + case GE: + emit_insn (gen_aarch64_cmge<mode> (mask, operands[4], operands[5])); + break; + + case LE: + case GT: + emit_insn (gen_aarch64_cmgt<mode> (mask, operands[4], operands[5])); + break; + + case LTU: + case GEU: + emit_insn (gen_aarch64_cmgeu<mode> (mask, operands[4], operands[5])); + break; + + case LEU: + case GTU: + emit_insn (gen_aarch64_cmgtu<mode> (mask, operands[4], operands[5])); + break; + + case NE: + case EQ: + emit_insn (gen_aarch64_cmeq<mode> (mask, operands[4], operands[5])); + break; + + default: + gcc_unreachable (); + } + + if (inverse) + { + op1 = operands[2]; + op2 = operands[1]; + } + + /* If we have (a = (b CMP c) ? -1 : 0); + Then we can simply move the generated mask. */ + + if (op1 == CONSTM1_RTX (<V_cmp_result>mode) + && op2 == CONST0_RTX (<V_cmp_result>mode)) + emit_move_insn (operands[0], mask); + else + { + if (!REG_P (op1)) + op1 = force_reg (<MODE>mode, op1); + if (!REG_P (op2)) + op2 = force_reg (<MODE>mode, op2); + emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, + op1, op2)); + } + + DONE; +}) + +(define_expand "aarch64_vcond_internal<VDQF_COND:mode><VDQF:mode>" + [(set (match_operand:VDQF_COND 0 "register_operand") + (if_then_else:VDQF + (match_operator 3 "comparison_operator" + [(match_operand:VDQF 4 "register_operand") + (match_operand:VDQF 5 "nonmemory_operand")]) + (match_operand:VDQF_COND 1 "nonmemory_operand") + (match_operand:VDQF_COND 2 "nonmemory_operand")))] + "TARGET_SIMD" +{ + int inverse = 0; + int use_zero_form = 0; + int swap_bsl_operands = 0; + rtx op1 = operands[1]; + rtx op2 = operands[2]; + rtx mask = gen_reg_rtx (<VDQF_COND:V_cmp_result>mode); + rtx tmp = gen_reg_rtx (<VDQF_COND:V_cmp_result>mode); + + rtx (*base_comparison) (rtx, rtx, rtx); + rtx (*complimentary_comparison) (rtx, rtx, rtx); + + switch (GET_CODE (operands[3])) + { + case GE: + case GT: + case LE: + case LT: + case EQ: + if (operands[5] == CONST0_RTX (<MODE>mode)) + { + use_zero_form = 1; + break; + } + /* Fall through. */ + default: + if (!REG_P (operands[5])) + operands[5] = force_reg (<VDQF:MODE>mode, operands[5]); + } + + switch (GET_CODE (operands[3])) + { + case LT: + case UNLT: + inverse = 1; + /* Fall through. */ + case GE: + case UNGE: + case ORDERED: + case UNORDERED: + base_comparison = gen_aarch64_cmge<VDQF:mode>; + complimentary_comparison = gen_aarch64_cmgt<VDQF:mode>; + break; + case LE: + case UNLE: + inverse = 1; + /* Fall through. */ + case GT: + case UNGT: + base_comparison = gen_aarch64_cmgt<VDQF:mode>; + complimentary_comparison = gen_aarch64_cmge<VDQF:mode>; + break; + case EQ: + case NE: + case UNEQ: + base_comparison = gen_aarch64_cmeq<VDQF:mode>; + complimentary_comparison = gen_aarch64_cmeq<VDQF:mode>; + break; + default: + gcc_unreachable (); + } + + switch (GET_CODE (operands[3])) + { + case LT: + case LE: + case GT: + case GE: + case EQ: + /* The easy case. Here we emit one of FCMGE, FCMGT or FCMEQ. + As a LT b <=> b GE a && a LE b <=> b GT a. Our transformations are: + a GE b -> a GE b + a GT b -> a GT b + a LE b -> b GE a + a LT b -> b GT a + a EQ b -> a EQ b + Note that there also exist direct comparison against 0 forms, + so catch those as a special case. */ + if (use_zero_form) + { + inverse = 0; + switch (GET_CODE (operands[3])) + { + case LT: + base_comparison = gen_aarch64_cmlt<VDQF:mode>; + break; + case LE: + base_comparison = gen_aarch64_cmle<VDQF:mode>; + break; + default: + /* Do nothing, other zero form cases already have the correct + base_comparison. */ + break; + } + } + + if (!inverse) + emit_insn (base_comparison (mask, operands[4], operands[5])); + else + emit_insn (complimentary_comparison (mask, operands[5], operands[4])); + break; + case UNLT: + case UNLE: + case UNGT: + case UNGE: + case NE: + /* FCM returns false for lanes which are unordered, so if we use + the inverse of the comparison we actually want to emit, then + swap the operands to BSL, we will end up with the correct result. + Note that a NE NaN and NaN NE b are true for all a, b. + + Our transformations are: + a GE b -> !(b GT a) + a GT b -> !(b GE a) + a LE b -> !(a GT b) + a LT b -> !(a GE b) + a NE b -> !(a EQ b) */ + + if (inverse) + emit_insn (base_comparison (mask, operands[4], operands[5])); + else + emit_insn (complimentary_comparison (mask, operands[5], operands[4])); + + swap_bsl_operands = 1; + break; + case UNEQ: + /* We check (a > b || b > a). combining these comparisons give us + true iff !(a != b && a ORDERED b), swapping the operands to BSL + will then give us (a == b || a UNORDERED b) as intended. */ + + emit_insn (gen_aarch64_cmgt<VDQF:mode> (mask, operands[4], operands[5])); + emit_insn (gen_aarch64_cmgt<VDQF:mode> (tmp, operands[5], operands[4])); + emit_insn (gen_ior<VDQF_COND:v_cmp_result>3 (mask, mask, tmp)); + swap_bsl_operands = 1; + break; + case UNORDERED: + /* Operands are ORDERED iff (a > b || b >= a). + Swapping the operands to BSL will give the UNORDERED case. */ + swap_bsl_operands = 1; + /* Fall through. */ + case ORDERED: + emit_insn (gen_aarch64_cmgt<VDQF:mode> (tmp, operands[4], operands[5])); + emit_insn (gen_aarch64_cmge<VDQF:mode> (mask, operands[5], operands[4])); + emit_insn (gen_ior<VDQF_COND:v_cmp_result>3 (mask, mask, tmp)); + break; + default: + gcc_unreachable (); + } + + if (swap_bsl_operands) + { + op1 = operands[2]; + op2 = operands[1]; + } + + /* If we have (a = (b CMP c) ? -1 : 0); + Then we can simply move the generated mask. */ + + if (op1 == CONSTM1_RTX (<VDQF_COND:V_cmp_result>mode) + && op2 == CONST0_RTX (<VDQF_COND:V_cmp_result>mode)) + emit_move_insn (operands[0], mask); + else + { + if (!REG_P (op1)) + op1 = force_reg (<VDQF_COND:MODE>mode, op1); + if (!REG_P (op2)) + op2 = force_reg (<VDQF_COND:MODE>mode, op2); + emit_insn (gen_aarch64_simd_bsl<VDQF_COND:mode> (operands[0], mask, + op1, op2)); + } + + DONE; +}) + +(define_expand "vcond<mode><mode>" + [(set (match_operand:VALL 0 "register_operand") + (if_then_else:VALL + (match_operator 3 "comparison_operator" + [(match_operand:VALL 4 "register_operand") + (match_operand:VALL 5 "nonmemory_operand")]) + (match_operand:VALL 1 "nonmemory_operand") + (match_operand:VALL 2 "nonmemory_operand")))] + "TARGET_SIMD" +{ + emit_insn (gen_aarch64_vcond_internal<mode><mode> (operands[0], operands[1], + operands[2], operands[3], + operands[4], operands[5])); + DONE; +}) + +(define_expand "vcond<v_cmp_result><mode>" + [(set (match_operand:<V_cmp_result> 0 "register_operand") + (if_then_else:<V_cmp_result> + (match_operator 3 "comparison_operator" + [(match_operand:VDQF 4 "register_operand") + (match_operand:VDQF 5 "nonmemory_operand")]) + (match_operand:<V_cmp_result> 1 "nonmemory_operand") + (match_operand:<V_cmp_result> 2 "nonmemory_operand")))] + "TARGET_SIMD" +{ + emit_insn (gen_aarch64_vcond_internal<v_cmp_result><mode> ( + operands[0], operands[1], + operands[2], operands[3], + operands[4], operands[5])); + DONE; +}) + +(define_expand "vcondu<mode><mode>" + [(set (match_operand:VDQ 0 "register_operand") + (if_then_else:VDQ + (match_operator 3 "comparison_operator" + [(match_operand:VDQ 4 "register_operand") + (match_operand:VDQ 5 "nonmemory_operand")]) + (match_operand:VDQ 1 "nonmemory_operand") + (match_operand:VDQ 2 "nonmemory_operand")))] + "TARGET_SIMD" +{ + emit_insn (gen_aarch64_vcond_internal<mode><mode> (operands[0], operands[1], + operands[2], operands[3], + operands[4], operands[5])); + DONE; +}) + +;; Patterns for AArch64 SIMD Intrinsics. + +(define_expand "aarch64_create<mode>" + [(match_operand:VD_RE 0 "register_operand" "") + (match_operand:DI 1 "general_operand" "")] + "TARGET_SIMD" +{ + rtx src = gen_lowpart (<MODE>mode, operands[1]); + emit_move_insn (operands[0], src); + DONE; +}) + +;; Lane extraction with sign extension to general purpose register. +(define_insn "*aarch64_get_lane_extend<GPI:mode><VDQQH:mode>" + [(set (match_operand:GPI 0 "register_operand" "=r") + (sign_extend:GPI + (vec_select:<VEL> + (match_operand:VDQQH 1 "register_operand" "w") + (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2]))); + return "smov\\t%<GPI:w>0, %1.<VDQQH:Vetype>[%2]"; + } + [(set_attr "type" "neon_to_gp<q>")] +) + +(define_insn "*aarch64_get_lane_zero_extendsi<mode>" + [(set (match_operand:SI 0 "register_operand" "=r") + (zero_extend:SI + (vec_select:<VEL> + (match_operand:VDQQH 1 "register_operand" "w") + (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2]))); + return "umov\\t%w0, %1.<Vetype>[%2]"; + } + [(set_attr "type" "neon_to_gp<q>")] +) + +(define_expand "aarch64_be_checked_get_lane<mode>" + [(match_operand:<VEL> 0 "aarch64_simd_nonimmediate_operand") + (match_operand:VALL 1 "register_operand") + (match_operand:SI 2 "immediate_operand")] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2]))); + emit_insn (gen_aarch64_get_lane<mode> (operands[0], + operands[1], + operands[2])); + DONE; + } +) + +;; Lane extraction of a value, neither sign nor zero extension +;; is guaranteed so upper bits should be considered undefined. +(define_insn "aarch64_get_lane<mode>" + [(set (match_operand:<VEL> 0 "aarch64_simd_nonimmediate_operand" "=r, w, Utv") + (vec_select:<VEL> + (match_operand:VALL 1 "register_operand" "w, w, w") + (parallel [(match_operand:SI 2 "immediate_operand" "i, i, i")])))] + "TARGET_SIMD" + { + operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2]))); + switch (which_alternative) + { + case 0: + return "umov\\t%<vwcore>0, %1.<Vetype>[%2]"; + case 1: + return "dup\\t%<Vetype>0, %1.<Vetype>[%2]"; + case 2: + return "st1\\t{%1.<Vetype>}[%2], %0"; + default: + gcc_unreachable (); + } + } + [(set_attr "type" "neon_to_gp<q>, neon_dup<q>, neon_store1_one_lane<q>")] +) + +(define_expand "aarch64_get_lanedi" + [(match_operand:DI 0 "register_operand") + (match_operand:DI 1 "register_operand") + (match_operand:SI 2 "immediate_operand")] + "TARGET_SIMD" +{ + aarch64_simd_lane_bounds (operands[2], 0, 1); + emit_move_insn (operands[0], operands[1]); + DONE; +}) + +(define_expand "aarch64_reinterpretv8qi<mode>" + [(match_operand:V8QI 0 "register_operand" "") + (match_operand:VDC 1 "register_operand" "")] + "TARGET_SIMD" +{ + aarch64_simd_reinterpret (operands[0], operands[1]); + DONE; +}) + +(define_expand "aarch64_reinterpretv4hi<mode>" + [(match_operand:V4HI 0 "register_operand" "") + (match_operand:VDC 1 "register_operand" "")] + "TARGET_SIMD" +{ + aarch64_simd_reinterpret (operands[0], operands[1]); + DONE; +}) + +(define_expand "aarch64_reinterpretv2si<mode>" + [(match_operand:V2SI 0 "register_operand" "") + (match_operand:VDC 1 "register_operand" "")] + "TARGET_SIMD" +{ + aarch64_simd_reinterpret (operands[0], operands[1]); + DONE; +}) + +(define_expand "aarch64_reinterpretv2sf<mode>" + [(match_operand:V2SF 0 "register_operand" "") + (match_operand:VDC 1 "register_operand" "")] + "TARGET_SIMD" +{ + aarch64_simd_reinterpret (operands[0], operands[1]); + DONE; +}) + +(define_expand "aarch64_reinterpretdi<mode>" + [(match_operand:DI 0 "register_operand" "") + (match_operand:VD_RE 1 "register_operand" "")] + "TARGET_SIMD" +{ + aarch64_simd_reinterpret (operands[0], operands[1]); + DONE; +}) + +(define_expand "aarch64_reinterpretv16qi<mode>" + [(match_operand:V16QI 0 "register_operand" "") + (match_operand:VQ 1 "register_operand" "")] + "TARGET_SIMD" +{ + aarch64_simd_reinterpret (operands[0], operands[1]); + DONE; +}) + +(define_expand "aarch64_reinterpretv8hi<mode>" + [(match_operand:V8HI 0 "register_operand" "") + (match_operand:VQ 1 "register_operand" "")] + "TARGET_SIMD" +{ + aarch64_simd_reinterpret (operands[0], operands[1]); + DONE; +}) + +(define_expand "aarch64_reinterpretv4si<mode>" + [(match_operand:V4SI 0 "register_operand" "") + (match_operand:VQ 1 "register_operand" "")] + "TARGET_SIMD" +{ + aarch64_simd_reinterpret (operands[0], operands[1]); + DONE; +}) + +(define_expand "aarch64_reinterpretv4sf<mode>" + [(match_operand:V4SF 0 "register_operand" "") + (match_operand:VQ 1 "register_operand" "")] + "TARGET_SIMD" +{ + aarch64_simd_reinterpret (operands[0], operands[1]); + DONE; +}) + +(define_expand "aarch64_reinterpretv2di<mode>" + [(match_operand:V2DI 0 "register_operand" "") + (match_operand:VQ 1 "register_operand" "")] + "TARGET_SIMD" +{ + aarch64_simd_reinterpret (operands[0], operands[1]); + DONE; +}) + +(define_expand "aarch64_reinterpretv2df<mode>" + [(match_operand:V2DF 0 "register_operand" "") + (match_operand:VQ 1 "register_operand" "")] + "TARGET_SIMD" +{ + aarch64_simd_reinterpret (operands[0], operands[1]); + DONE; +}) + +;; In this insn, operand 1 should be low, and operand 2 the high part of the +;; dest vector. + +(define_insn "*aarch64_combinez<mode>" + [(set (match_operand:<VDBL> 0 "register_operand" "=&w") + (vec_concat:<VDBL> + (match_operand:VDIC 1 "register_operand" "w") + (match_operand:VDIC 2 "aarch64_simd_imm_zero" "Dz")))] + "TARGET_SIMD" + "mov\\t%0.8b, %1.8b" + [(set_attr "type" "neon_move<q>")] +) + +(define_insn_and_split "aarch64_combine<mode>" + [(set (match_operand:<VDBL> 0 "register_operand" "=&w") + (vec_concat:<VDBL> (match_operand:VDC 1 "register_operand" "w") + (match_operand:VDC 2 "register_operand" "w")))] + "TARGET_SIMD" + "#" + "&& reload_completed" + [(const_int 0)] +{ + aarch64_split_simd_combine (operands[0], operands[1], operands[2]); + DONE; +} +[(set_attr "type" "multiple")] +) + +(define_expand "aarch64_simd_combine<mode>" + [(set (match_operand:<VDBL> 0 "register_operand" "=&w") + (vec_concat:<VDBL> (match_operand:VDC 1 "register_operand" "w") + (match_operand:VDC 2 "register_operand" "w")))] + "TARGET_SIMD" + { + emit_insn (gen_move_lo_quad_<Vdbl> (operands[0], operands[1])); + emit_insn (gen_move_hi_quad_<Vdbl> (operands[0], operands[2])); + DONE; + } +[(set_attr "type" "multiple")] +) + +;; <su><addsub>l<q>. + +(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>_hi_internal" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 1 "register_operand" "w") + (match_operand:VQW 3 "vect_par_cnst_hi_half" ""))) + (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 2 "register_operand" "w") + (match_dup 3)))))] + "TARGET_SIMD" + "<ANY_EXTEND:su><ADDSUB:optab>l2\t%0.<Vwtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_<ADDSUB:optab>_long")] +) + +(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>_lo_internal" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 1 "register_operand" "w") + (match_operand:VQW 3 "vect_par_cnst_lo_half" ""))) + (ANY_EXTEND:<VWIDE> (vec_select:<VHALF> + (match_operand:VQW 2 "register_operand" "w") + (match_dup 3)))))] + "TARGET_SIMD" + "<ANY_EXTEND:su><ADDSUB:optab>l\t%0.<Vwtype>, %1.<Vhalftype>, %2.<Vhalftype>" + [(set_attr "type" "neon_<ADDSUB:optab>_long")] +) + + +(define_expand "aarch64_saddl2<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:VQW 1 "register_operand" "w") + (match_operand:VQW 2 "register_operand" "w")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_saddl<mode>_hi_internal (operands[0], operands[1], + operands[2], p)); + DONE; +}) + +(define_expand "aarch64_uaddl2<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:VQW 1 "register_operand" "w") + (match_operand:VQW 2 "register_operand" "w")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_uaddl<mode>_hi_internal (operands[0], operands[1], + operands[2], p)); + DONE; +}) + +(define_expand "aarch64_ssubl2<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:VQW 1 "register_operand" "w") + (match_operand:VQW 2 "register_operand" "w")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_ssubl<mode>_hi_internal (operands[0], operands[1], + operands[2], p)); + DONE; +}) + +(define_expand "aarch64_usubl2<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:VQW 1 "register_operand" "w") + (match_operand:VQW 2 "register_operand" "w")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_usubl<mode>_hi_internal (operands[0], operands[1], + operands[2], p)); + DONE; +}) + +(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE> + (match_operand:VDW 1 "register_operand" "w")) + (ANY_EXTEND:<VWIDE> + (match_operand:VDW 2 "register_operand" "w"))))] + "TARGET_SIMD" + "<ANY_EXTEND:su><ADDSUB:optab>l %0.<Vwtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_<ADDSUB:optab>_long")] +) + +;; <su><addsub>w<q>. + +(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>w<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (ADDSUB:<VWIDE> (match_operand:<VWIDE> 1 "register_operand" "w") + (ANY_EXTEND:<VWIDE> + (match_operand:VDW 2 "register_operand" "w"))))] + "TARGET_SIMD" + "<ANY_EXTEND:su><ADDSUB:optab>w\\t%0.<Vwtype>, %1.<Vwtype>, %2.<Vtype>" + [(set_attr "type" "neon_<ADDSUB:optab>_widen")] +) + +(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>w2<mode>_internal" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (ADDSUB:<VWIDE> (match_operand:<VWIDE> 1 "register_operand" "w") + (ANY_EXTEND:<VWIDE> + (vec_select:<VHALF> + (match_operand:VQW 2 "register_operand" "w") + (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))))] + "TARGET_SIMD" + "<ANY_EXTEND:su><ADDSUB:optab>w2\\t%0.<Vwtype>, %1.<Vwtype>, %2.<Vtype>" + [(set_attr "type" "neon_<ADDSUB:optab>_widen")] +) + +(define_expand "aarch64_saddw2<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "w") + (match_operand:VQW 2 "register_operand" "w")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_saddw2<mode>_internal (operands[0], operands[1], + operands[2], p)); + DONE; +}) + +(define_expand "aarch64_uaddw2<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "w") + (match_operand:VQW 2 "register_operand" "w")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_uaddw2<mode>_internal (operands[0], operands[1], + operands[2], p)); + DONE; +}) + + +(define_expand "aarch64_ssubw2<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "w") + (match_operand:VQW 2 "register_operand" "w")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_ssubw2<mode>_internal (operands[0], operands[1], + operands[2], p)); + DONE; +}) + +(define_expand "aarch64_usubw2<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "w") + (match_operand:VQW 2 "register_operand" "w")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_usubw2<mode>_internal (operands[0], operands[1], + operands[2], p)); + DONE; +}) + +;; <su><r>h<addsub>. + +(define_insn "aarch64_<sur>h<addsub><mode>" + [(set (match_operand:VQ_S 0 "register_operand" "=w") + (unspec:VQ_S [(match_operand:VQ_S 1 "register_operand" "w") + (match_operand:VQ_S 2 "register_operand" "w")] + HADDSUB))] + "TARGET_SIMD" + "<sur>h<addsub>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_<addsub>_halve<q>")] +) + +;; <r><addsub>hn<q>. + +(define_insn "aarch64_<sur><addsub>hn<mode>" + [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w") + (unspec:<VNARROWQ> [(match_operand:VQN 1 "register_operand" "w") + (match_operand:VQN 2 "register_operand" "w")] + ADDSUBHN))] + "TARGET_SIMD" + "<sur><addsub>hn\\t%0.<Vntype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_<addsub>_halve_narrow_q")] +) + +(define_insn "aarch64_<sur><addsub>hn2<mode>" + [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w") + (unspec:<VNARROWQ2> [(match_operand:<VNARROWQ> 1 "register_operand" "0") + (match_operand:VQN 2 "register_operand" "w") + (match_operand:VQN 3 "register_operand" "w")] + ADDSUBHN2))] + "TARGET_SIMD" + "<sur><addsub>hn2\\t%0.<V2ntype>, %2.<Vtype>, %3.<Vtype>" + [(set_attr "type" "neon_<addsub>_halve_narrow_q")] +) + +;; pmul. + +(define_insn "aarch64_pmul<mode>" + [(set (match_operand:VB 0 "register_operand" "=w") + (unspec:VB [(match_operand:VB 1 "register_operand" "w") + (match_operand:VB 2 "register_operand" "w")] + UNSPEC_PMUL))] + "TARGET_SIMD" + "pmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_mul_<Vetype><q>")] +) + +;; <su>q<addsub> + +(define_insn "aarch64_<su_optab><optab><mode>" + [(set (match_operand:VSDQ_I 0 "register_operand" "=w") + (BINQOPS:VSDQ_I (match_operand:VSDQ_I 1 "register_operand" "w") + (match_operand:VSDQ_I 2 "register_operand" "w")))] + "TARGET_SIMD" + "<su_optab><optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>" + [(set_attr "type" "neon_<optab><q>")] +) + +;; suqadd and usqadd + +(define_insn "aarch64_<sur>qadd<mode>" + [(set (match_operand:VSDQ_I 0 "register_operand" "=w") + (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "0") + (match_operand:VSDQ_I 2 "register_operand" "w")] + USSUQADD))] + "TARGET_SIMD" + "<sur>qadd\\t%<v>0<Vmtype>, %<v>2<Vmtype>" + [(set_attr "type" "neon_qadd<q>")] +) + +;; sqmovun + +(define_insn "aarch64_sqmovun<mode>" + [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w") + (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")] + UNSPEC_SQXTUN))] + "TARGET_SIMD" + "sqxtun\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>" + [(set_attr "type" "neon_sat_shift_imm_narrow_q")] + ) + +;; sqmovn and uqmovn + +(define_insn "aarch64_<sur>qmovn<mode>" + [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w") + (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")] + SUQMOVN))] + "TARGET_SIMD" + "<sur>qxtn\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>" + [(set_attr "type" "neon_sat_shift_imm_narrow_q")] + ) + +;; <su>q<absneg> + +(define_insn "aarch64_s<optab><mode>" + [(set (match_operand:VSDQ_I_BHSI 0 "register_operand" "=w") + (UNQOPS:VSDQ_I_BHSI + (match_operand:VSDQ_I_BHSI 1 "register_operand" "w")))] + "TARGET_SIMD" + "s<optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>" + [(set_attr "type" "neon_<optab><q>")] +) + +;; sq<r>dmulh. + +(define_insn "aarch64_sq<r>dmulh<mode>" + [(set (match_operand:VSDQ_HSI 0 "register_operand" "=w") + (unspec:VSDQ_HSI + [(match_operand:VSDQ_HSI 1 "register_operand" "w") + (match_operand:VSDQ_HSI 2 "register_operand" "w")] + VQDMULH))] + "TARGET_SIMD" + "sq<r>dmulh\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>" + [(set_attr "type" "neon_sat_mul_<Vetype><q>")] +) + +;; sq<r>dmulh_lane + +(define_insn "aarch64_sq<r>dmulh_lane<mode>" + [(set (match_operand:VDQHS 0 "register_operand" "=w") + (unspec:VDQHS + [(match_operand:VDQHS 1 "register_operand" "w") + (vec_select:<VEL> + (match_operand:<VCOND> 2 "register_operand" "<vwx>") + (parallel [(match_operand:SI 3 "immediate_operand" "i")]))] + VQDMULH))] + "TARGET_SIMD" + "* + aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCOND>mode)); + operands[3] = GEN_INT (ENDIAN_LANE_N (<VCOND>mode, INTVAL (operands[3]))); + return \"sq<r>dmulh\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]\";" + [(set_attr "type" "neon_sat_mul_<Vetype>_scalar<q>")] +) + +(define_insn "aarch64_sq<r>dmulh_laneq<mode>" + [(set (match_operand:VDQHS 0 "register_operand" "=w") + (unspec:VDQHS + [(match_operand:VDQHS 1 "register_operand" "w") + (vec_select:<VEL> + (match_operand:<VCONQ> 2 "register_operand" "<vwx>") + (parallel [(match_operand:SI 3 "immediate_operand" "i")]))] + VQDMULH))] + "TARGET_SIMD" + "* + aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode)); + operands[3] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[3]))); + return \"sq<r>dmulh\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]\";" + [(set_attr "type" "neon_sat_mul_<Vetype>_scalar<q>")] +) + +(define_insn "aarch64_sq<r>dmulh_lane<mode>" + [(set (match_operand:SD_HSI 0 "register_operand" "=w") + (unspec:SD_HSI + [(match_operand:SD_HSI 1 "register_operand" "w") + (vec_select:<VEL> + (match_operand:<VCONQ> 2 "register_operand" "<vwx>") + (parallel [(match_operand:SI 3 "immediate_operand" "i")]))] + VQDMULH))] + "TARGET_SIMD" + "* + aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode)); + operands[3] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[3]))); + return \"sq<r>dmulh\\t%<v>0, %<v>1, %2.<v>[%3]\";" + [(set_attr "type" "neon_sat_mul_<Vetype>_scalar<q>")] +) + +;; vqdml[sa]l + +(define_insn "aarch64_sqdml<SBINQOPS:as>l<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (SBINQOPS:<VWIDE> + (match_operand:<VWIDE> 1 "register_operand" "0") + (ss_ashift:<VWIDE> + (mult:<VWIDE> + (sign_extend:<VWIDE> + (match_operand:VSD_HSI 2 "register_operand" "w")) + (sign_extend:<VWIDE> + (match_operand:VSD_HSI 3 "register_operand" "w"))) + (const_int 1))))] + "TARGET_SIMD" + "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %<v>3<Vmtype>" + [(set_attr "type" "neon_sat_mla_<Vetype>_long")] +) + +;; vqdml[sa]l_lane + +(define_insn "aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (SBINQOPS:<VWIDE> + (match_operand:<VWIDE> 1 "register_operand" "0") + (ss_ashift:<VWIDE> + (mult:<VWIDE> + (sign_extend:<VWIDE> + (match_operand:VD_HSI 2 "register_operand" "w")) + (sign_extend:<VWIDE> + (vec_duplicate:VD_HSI + (vec_select:<VEL> + (match_operand:<VCON> 3 "register_operand" "<vwx>") + (parallel [(match_operand:SI 4 "immediate_operand" "i")]))) + )) + (const_int 1))))] + "TARGET_SIMD" + { + operands[4] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[4]))); + return + "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"; + } + [(set_attr "type" "neon_sat_mla_<Vetype>_scalar_long")] +) + +(define_insn "aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (SBINQOPS:<VWIDE> + (match_operand:<VWIDE> 1 "register_operand" "0") + (ss_ashift:<VWIDE> + (mult:<VWIDE> + (sign_extend:<VWIDE> + (match_operand:SD_HSI 2 "register_operand" "w")) + (sign_extend:<VWIDE> + (vec_select:<VEL> + (match_operand:<VCON> 3 "register_operand" "<vwx>") + (parallel [(match_operand:SI 4 "immediate_operand" "i")]))) + ) + (const_int 1))))] + "TARGET_SIMD" + { + operands[4] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[4]))); + return + "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"; + } + [(set_attr "type" "neon_sat_mla_<Vetype>_scalar_long")] +) + +(define_expand "aarch64_sqdmlal_lane<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "0") + (match_operand:VSD_HSI 2 "register_operand" "w") + (match_operand:<VCON> 3 "register_operand" "<vwx>") + (match_operand:SI 4 "immediate_operand" "i")] + "TARGET_SIMD" +{ + aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode) / 2); + emit_insn (gen_aarch64_sqdmlal_lane<mode>_internal (operands[0], operands[1], + operands[2], operands[3], + operands[4])); + DONE; +}) + +(define_expand "aarch64_sqdmlal_laneq<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "0") + (match_operand:VSD_HSI 2 "register_operand" "w") + (match_operand:<VCON> 3 "register_operand" "<vwx>") + (match_operand:SI 4 "immediate_operand" "i")] + "TARGET_SIMD" +{ + aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode)); + emit_insn (gen_aarch64_sqdmlal_lane<mode>_internal (operands[0], operands[1], + operands[2], operands[3], + operands[4])); + DONE; +}) + +(define_expand "aarch64_sqdmlsl_lane<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "0") + (match_operand:VSD_HSI 2 "register_operand" "w") + (match_operand:<VCON> 3 "register_operand" "<vwx>") + (match_operand:SI 4 "immediate_operand" "i")] + "TARGET_SIMD" +{ + aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode) / 2); + emit_insn (gen_aarch64_sqdmlsl_lane<mode>_internal (operands[0], operands[1], + operands[2], operands[3], + operands[4])); + DONE; +}) + +(define_expand "aarch64_sqdmlsl_laneq<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "0") + (match_operand:VSD_HSI 2 "register_operand" "w") + (match_operand:<VCON> 3 "register_operand" "<vwx>") + (match_operand:SI 4 "immediate_operand" "i")] + "TARGET_SIMD" +{ + aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode)); + emit_insn (gen_aarch64_sqdmlsl_lane<mode>_internal (operands[0], operands[1], + operands[2], operands[3], + operands[4])); + DONE; +}) + +;; vqdml[sa]l_n + +(define_insn "aarch64_sqdml<SBINQOPS:as>l_n<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (SBINQOPS:<VWIDE> + (match_operand:<VWIDE> 1 "register_operand" "0") + (ss_ashift:<VWIDE> + (mult:<VWIDE> + (sign_extend:<VWIDE> + (match_operand:VD_HSI 2 "register_operand" "w")) + (sign_extend:<VWIDE> + (vec_duplicate:VD_HSI + (match_operand:<VEL> 3 "register_operand" "<vwx>")))) + (const_int 1))))] + "TARGET_SIMD" + "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[0]" + [(set_attr "type" "neon_sat_mla_<Vetype>_scalar_long")] +) + +;; sqdml[as]l2 + +(define_insn "aarch64_sqdml<SBINQOPS:as>l2<mode>_internal" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (SBINQOPS:<VWIDE> + (match_operand:<VWIDE> 1 "register_operand" "0") + (ss_ashift:<VWIDE> + (mult:<VWIDE> + (sign_extend:<VWIDE> + (vec_select:<VHALF> + (match_operand:VQ_HSI 2 "register_operand" "w") + (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" ""))) + (sign_extend:<VWIDE> + (vec_select:<VHALF> + (match_operand:VQ_HSI 3 "register_operand" "w") + (match_dup 4)))) + (const_int 1))))] + "TARGET_SIMD" + "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %<v>3<Vmtype>" + [(set_attr "type" "neon_sat_mla_<Vetype>_scalar_long")] +) + +(define_expand "aarch64_sqdmlal2<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "w") + (match_operand:VQ_HSI 2 "register_operand" "w") + (match_operand:VQ_HSI 3 "register_operand" "w")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_sqdmlal2<mode>_internal (operands[0], operands[1], + operands[2], operands[3], p)); + DONE; +}) + +(define_expand "aarch64_sqdmlsl2<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "w") + (match_operand:VQ_HSI 2 "register_operand" "w") + (match_operand:VQ_HSI 3 "register_operand" "w")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_sqdmlsl2<mode>_internal (operands[0], operands[1], + operands[2], operands[3], p)); + DONE; +}) + +;; vqdml[sa]l2_lane + +(define_insn "aarch64_sqdml<SBINQOPS:as>l2_lane<mode>_internal" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (SBINQOPS:<VWIDE> + (match_operand:<VWIDE> 1 "register_operand" "0") + (ss_ashift:<VWIDE> + (mult:<VWIDE> + (sign_extend:<VWIDE> + (vec_select:<VHALF> + (match_operand:VQ_HSI 2 "register_operand" "w") + (match_operand:VQ_HSI 5 "vect_par_cnst_hi_half" ""))) + (sign_extend:<VWIDE> + (vec_duplicate:<VHALF> + (vec_select:<VEL> + (match_operand:<VCON> 3 "register_operand" "<vwx>") + (parallel [(match_operand:SI 4 "immediate_operand" "i")]) + )))) + (const_int 1))))] + "TARGET_SIMD" + { + operands[4] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[4]))); + return + "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"; + } + [(set_attr "type" "neon_sat_mla_<Vetype>_scalar_long")] +) + +(define_expand "aarch64_sqdmlal2_lane<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "w") + (match_operand:VQ_HSI 2 "register_operand" "w") + (match_operand:<VCON> 3 "register_operand" "<vwx>") + (match_operand:SI 4 "immediate_operand" "i")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode) / 2); + emit_insn (gen_aarch64_sqdmlal2_lane<mode>_internal (operands[0], operands[1], + operands[2], operands[3], + operands[4], p)); + DONE; +}) + +(define_expand "aarch64_sqdmlal2_laneq<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "w") + (match_operand:VQ_HSI 2 "register_operand" "w") + (match_operand:<VCON> 3 "register_operand" "<vwx>") + (match_operand:SI 4 "immediate_operand" "i")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode)); + emit_insn (gen_aarch64_sqdmlal2_lane<mode>_internal (operands[0], operands[1], + operands[2], operands[3], + operands[4], p)); + DONE; +}) + +(define_expand "aarch64_sqdmlsl2_lane<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "w") + (match_operand:VQ_HSI 2 "register_operand" "w") + (match_operand:<VCON> 3 "register_operand" "<vwx>") + (match_operand:SI 4 "immediate_operand" "i")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode) / 2); + emit_insn (gen_aarch64_sqdmlsl2_lane<mode>_internal (operands[0], operands[1], + operands[2], operands[3], + operands[4], p)); + DONE; +}) + +(define_expand "aarch64_sqdmlsl2_laneq<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "w") + (match_operand:VQ_HSI 2 "register_operand" "w") + (match_operand:<VCON> 3 "register_operand" "<vwx>") + (match_operand:SI 4 "immediate_operand" "i")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode)); + emit_insn (gen_aarch64_sqdmlsl2_lane<mode>_internal (operands[0], operands[1], + operands[2], operands[3], + operands[4], p)); + DONE; +}) + +(define_insn "aarch64_sqdml<SBINQOPS:as>l2_n<mode>_internal" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (SBINQOPS:<VWIDE> + (match_operand:<VWIDE> 1 "register_operand" "0") + (ss_ashift:<VWIDE> + (mult:<VWIDE> + (sign_extend:<VWIDE> + (vec_select:<VHALF> + (match_operand:VQ_HSI 2 "register_operand" "w") + (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" ""))) + (sign_extend:<VWIDE> + (vec_duplicate:<VHALF> + (match_operand:<VEL> 3 "register_operand" "<vwx>")))) + (const_int 1))))] + "TARGET_SIMD" + "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[0]" + [(set_attr "type" "neon_sat_mla_<Vetype>_scalar_long")] +) + +(define_expand "aarch64_sqdmlal2_n<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "w") + (match_operand:VQ_HSI 2 "register_operand" "w") + (match_operand:<VEL> 3 "register_operand" "w")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_sqdmlal2_n<mode>_internal (operands[0], operands[1], + operands[2], operands[3], + p)); + DONE; +}) + +(define_expand "aarch64_sqdmlsl2_n<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:<VWIDE> 1 "register_operand" "w") + (match_operand:VQ_HSI 2 "register_operand" "w") + (match_operand:<VEL> 3 "register_operand" "w")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_sqdmlsl2_n<mode>_internal (operands[0], operands[1], + operands[2], operands[3], + p)); + DONE; +}) + +;; vqdmull + +(define_insn "aarch64_sqdmull<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (ss_ashift:<VWIDE> + (mult:<VWIDE> + (sign_extend:<VWIDE> + (match_operand:VSD_HSI 1 "register_operand" "w")) + (sign_extend:<VWIDE> + (match_operand:VSD_HSI 2 "register_operand" "w"))) + (const_int 1)))] + "TARGET_SIMD" + "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %<v>2<Vmtype>" + [(set_attr "type" "neon_sat_mul_<Vetype>_long")] +) + +;; vqdmull_lane + +(define_insn "aarch64_sqdmull_lane<mode>_internal" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (ss_ashift:<VWIDE> + (mult:<VWIDE> + (sign_extend:<VWIDE> + (match_operand:VD_HSI 1 "register_operand" "w")) + (sign_extend:<VWIDE> + (vec_duplicate:VD_HSI + (vec_select:<VEL> + (match_operand:<VCON> 2 "register_operand" "<vwx>") + (parallel [(match_operand:SI 3 "immediate_operand" "i")]))) + )) + (const_int 1)))] + "TARGET_SIMD" + { + operands[3] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[3]))); + return "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"; + } + [(set_attr "type" "neon_sat_mul_<Vetype>_scalar_long")] +) + +(define_insn "aarch64_sqdmull_lane<mode>_internal" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (ss_ashift:<VWIDE> + (mult:<VWIDE> + (sign_extend:<VWIDE> + (match_operand:SD_HSI 1 "register_operand" "w")) + (sign_extend:<VWIDE> + (vec_select:<VEL> + (match_operand:<VCON> 2 "register_operand" "<vwx>") + (parallel [(match_operand:SI 3 "immediate_operand" "i")])) + )) + (const_int 1)))] + "TARGET_SIMD" + { + operands[3] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[3]))); + return "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"; + } + [(set_attr "type" "neon_sat_mul_<Vetype>_scalar_long")] +) + +(define_expand "aarch64_sqdmull_lane<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:VSD_HSI 1 "register_operand" "w") + (match_operand:<VCON> 2 "register_operand" "<vwx>") + (match_operand:SI 3 "immediate_operand" "i")] + "TARGET_SIMD" +{ + aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode) / 2); + emit_insn (gen_aarch64_sqdmull_lane<mode>_internal (operands[0], operands[1], + operands[2], operands[3])); + DONE; +}) + +(define_expand "aarch64_sqdmull_laneq<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:VD_HSI 1 "register_operand" "w") + (match_operand:<VCON> 2 "register_operand" "<vwx>") + (match_operand:SI 3 "immediate_operand" "i")] + "TARGET_SIMD" +{ + aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode)); + emit_insn (gen_aarch64_sqdmull_lane<mode>_internal + (operands[0], operands[1], operands[2], operands[3])); + DONE; +}) + +;; vqdmull_n + +(define_insn "aarch64_sqdmull_n<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (ss_ashift:<VWIDE> + (mult:<VWIDE> + (sign_extend:<VWIDE> + (match_operand:VD_HSI 1 "register_operand" "w")) + (sign_extend:<VWIDE> + (vec_duplicate:VD_HSI + (match_operand:<VEL> 2 "register_operand" "<vwx>"))) + ) + (const_int 1)))] + "TARGET_SIMD" + "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[0]" + [(set_attr "type" "neon_sat_mul_<Vetype>_scalar_long")] +) + +;; vqdmull2 + + + +(define_insn "aarch64_sqdmull2<mode>_internal" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (ss_ashift:<VWIDE> + (mult:<VWIDE> + (sign_extend:<VWIDE> + (vec_select:<VHALF> + (match_operand:VQ_HSI 1 "register_operand" "w") + (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" ""))) + (sign_extend:<VWIDE> + (vec_select:<VHALF> + (match_operand:VQ_HSI 2 "register_operand" "w") + (match_dup 3))) + ) + (const_int 1)))] + "TARGET_SIMD" + "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %<v>2<Vmtype>" + [(set_attr "type" "neon_sat_mul_<Vetype>_scalar_long")] +) + +(define_expand "aarch64_sqdmull2<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:VQ_HSI 1 "register_operand" "w") + (match_operand:<VCON> 2 "register_operand" "w")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_sqdmull2<mode>_internal (operands[0], operands[1], + operands[2], p)); + DONE; +}) + +;; vqdmull2_lane + +(define_insn "aarch64_sqdmull2_lane<mode>_internal" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (ss_ashift:<VWIDE> + (mult:<VWIDE> + (sign_extend:<VWIDE> + (vec_select:<VHALF> + (match_operand:VQ_HSI 1 "register_operand" "w") + (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" ""))) + (sign_extend:<VWIDE> + (vec_duplicate:<VHALF> + (vec_select:<VEL> + (match_operand:<VCON> 2 "register_operand" "<vwx>") + (parallel [(match_operand:SI 3 "immediate_operand" "i")]))) + )) + (const_int 1)))] + "TARGET_SIMD" + { + operands[3] = GEN_INT (ENDIAN_LANE_N (<VCONQ>mode, INTVAL (operands[3]))); + return "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"; + } + [(set_attr "type" "neon_sat_mul_<Vetype>_scalar_long")] +) + +(define_expand "aarch64_sqdmull2_lane<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:VQ_HSI 1 "register_operand" "w") + (match_operand:<VCON> 2 "register_operand" "<vwx>") + (match_operand:SI 3 "immediate_operand" "i")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode) / 2); + emit_insn (gen_aarch64_sqdmull2_lane<mode>_internal (operands[0], operands[1], + operands[2], operands[3], + p)); + DONE; +}) + +(define_expand "aarch64_sqdmull2_laneq<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:VQ_HSI 1 "register_operand" "w") + (match_operand:<VCON> 2 "register_operand" "<vwx>") + (match_operand:SI 3 "immediate_operand" "i")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode)); + emit_insn (gen_aarch64_sqdmull2_lane<mode>_internal (operands[0], operands[1], + operands[2], operands[3], + p)); + DONE; +}) + +;; vqdmull2_n + +(define_insn "aarch64_sqdmull2_n<mode>_internal" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (ss_ashift:<VWIDE> + (mult:<VWIDE> + (sign_extend:<VWIDE> + (vec_select:<VHALF> + (match_operand:VQ_HSI 1 "register_operand" "w") + (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" ""))) + (sign_extend:<VWIDE> + (vec_duplicate:<VHALF> + (match_operand:<VEL> 2 "register_operand" "<vwx>"))) + ) + (const_int 1)))] + "TARGET_SIMD" + "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[0]" + [(set_attr "type" "neon_sat_mul_<Vetype>_scalar_long")] +) + +(define_expand "aarch64_sqdmull2_n<mode>" + [(match_operand:<VWIDE> 0 "register_operand" "=w") + (match_operand:VQ_HSI 1 "register_operand" "w") + (match_operand:<VEL> 2 "register_operand" "w")] + "TARGET_SIMD" +{ + rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true); + emit_insn (gen_aarch64_sqdmull2_n<mode>_internal (operands[0], operands[1], + operands[2], p)); + DONE; +}) + +;; vshl + +(define_insn "aarch64_<sur>shl<mode>" + [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w") + (unspec:VSDQ_I_DI + [(match_operand:VSDQ_I_DI 1 "register_operand" "w") + (match_operand:VSDQ_I_DI 2 "register_operand" "w")] + VSHL))] + "TARGET_SIMD" + "<sur>shl\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"; + [(set_attr "type" "neon_shift_reg<q>")] +) + + +;; vqshl + +(define_insn "aarch64_<sur>q<r>shl<mode>" + [(set (match_operand:VSDQ_I 0 "register_operand" "=w") + (unspec:VSDQ_I + [(match_operand:VSDQ_I 1 "register_operand" "w") + (match_operand:VSDQ_I 2 "register_operand" "w")] + VQSHL))] + "TARGET_SIMD" + "<sur>q<r>shl\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"; + [(set_attr "type" "neon_sat_shift_reg<q>")] +) + +;; vshll_n + +(define_insn "aarch64_<sur>shll_n<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (unspec:<VWIDE> [(match_operand:VDW 1 "register_operand" "w") + (match_operand:SI 2 "immediate_operand" "i")] + VSHLL))] + "TARGET_SIMD" + "* + int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT; + aarch64_simd_const_bounds (operands[2], 0, bit_width + 1); + if (INTVAL (operands[2]) == bit_width) + { + return \"shll\\t%0.<Vwtype>, %1.<Vtype>, %2\"; + } + else { + return \"<sur>shll\\t%0.<Vwtype>, %1.<Vtype>, %2\"; + }" + [(set_attr "type" "neon_shift_imm_long")] +) + +;; vshll_high_n + +(define_insn "aarch64_<sur>shll2_n<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (unspec:<VWIDE> [(match_operand:VQW 1 "register_operand" "w") + (match_operand:SI 2 "immediate_operand" "i")] + VSHLL))] + "TARGET_SIMD" + "* + int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT; + aarch64_simd_const_bounds (operands[2], 0, bit_width + 1); + if (INTVAL (operands[2]) == bit_width) + { + return \"shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\"; + } + else { + return \"<sur>shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\"; + }" + [(set_attr "type" "neon_shift_imm_long")] +) + +;; vrshr_n + +(define_insn "aarch64_<sur>shr_n<mode>" + [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w") + (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "w") + (match_operand:SI 2 "immediate_operand" "i")] + VRSHR_N))] + "TARGET_SIMD" + "* + int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT; + aarch64_simd_const_bounds (operands[2], 1, bit_width + 1); + return \"<sur>shr\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";" + [(set_attr "type" "neon_sat_shift_imm<q>")] +) + +;; v(r)sra_n + +(define_insn "aarch64_<sur>sra_n<mode>" + [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w") + (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0") + (match_operand:VSDQ_I_DI 2 "register_operand" "w") + (match_operand:SI 3 "immediate_operand" "i")] + VSRA))] + "TARGET_SIMD" + "* + int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT; + aarch64_simd_const_bounds (operands[3], 1, bit_width + 1); + return \"<sur>sra\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";" + [(set_attr "type" "neon_shift_acc<q>")] +) + +;; vs<lr>i_n + +(define_insn "aarch64_<sur>s<lr>i_n<mode>" + [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w") + (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0") + (match_operand:VSDQ_I_DI 2 "register_operand" "w") + (match_operand:SI 3 "immediate_operand" "i")] + VSLRI))] + "TARGET_SIMD" + "* + int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT; + aarch64_simd_const_bounds (operands[3], 1 - <VSLRI:offsetlr>, + bit_width - <VSLRI:offsetlr> + 1); + return \"s<lr>i\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";" + [(set_attr "type" "neon_shift_imm<q>")] +) + +;; vqshl(u) + +(define_insn "aarch64_<sur>qshl<u>_n<mode>" + [(set (match_operand:VSDQ_I 0 "register_operand" "=w") + (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "w") + (match_operand:SI 2 "immediate_operand" "i")] + VQSHL_N))] + "TARGET_SIMD" + "* + int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT; + aarch64_simd_const_bounds (operands[2], 0, bit_width); + return \"<sur>qshl<u>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";" + [(set_attr "type" "neon_sat_shift_imm<q>")] +) + + +;; vq(r)shr(u)n_n + +(define_insn "aarch64_<sur>q<r>shr<u>n_n<mode>" + [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w") + (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w") + (match_operand:SI 2 "immediate_operand" "i")] + VQSHRN_N))] + "TARGET_SIMD" + "* + int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT; + aarch64_simd_const_bounds (operands[2], 1, bit_width + 1); + return \"<sur>q<r>shr<u>n\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>, %2\";" + [(set_attr "type" "neon_sat_shift_imm_narrow_q")] +) + + +;; cm(eq|ge|gt|lt|le) +;; Note, we have constraints for Dz and Z as different expanders +;; have different ideas of what should be passed to this pattern. + +(define_insn "aarch64_cm<optab><mode>" + [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w") + (neg:<V_cmp_result> + (COMPARISONS:<V_cmp_result> + (match_operand:VDQ 1 "register_operand" "w,w") + (match_operand:VDQ 2 "aarch64_simd_reg_or_zero" "w,ZDz") + )))] + "TARGET_SIMD" + "@ + cm<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype> + cm<optab>\t%<v>0<Vmtype>, %<v>1<Vmtype>, #0" + [(set_attr "type" "neon_compare<q>, neon_compare_zero<q>")] +) + +(define_insn_and_split "aarch64_cm<optab>di" + [(set (match_operand:DI 0 "register_operand" "=w,w,r") + (neg:DI + (COMPARISONS:DI + (match_operand:DI 1 "register_operand" "w,w,r") + (match_operand:DI 2 "aarch64_simd_reg_or_zero" "w,ZDz,r") + ))) + (clobber (reg:CC CC_REGNUM))] + "TARGET_SIMD" + "@ + cm<n_optab>\t%d0, %d<cmp_1>, %d<cmp_2> + cm<optab>\t%d0, %d1, #0 + #" + "reload_completed + /* We need to prevent the split from + happening in the 'w' constraint cases. */ + && GP_REGNUM_P (REGNO (operands[0])) + && GP_REGNUM_P (REGNO (operands[1]))" + [(const_int 0)] + { + enum machine_mode mode = SELECT_CC_MODE (<CMP>, operands[1], operands[2]); + rtx cc_reg = aarch64_gen_compare_reg (<CMP>, operands[1], operands[2]); + rtx comparison = gen_rtx_<CMP> (mode, operands[1], operands[2]); + emit_insn (gen_cstoredi_neg (operands[0], comparison, cc_reg)); + DONE; + } + [(set_attr "type" "neon_compare, neon_compare_zero, multiple")] +) + +;; cm(hs|hi) + +(define_insn "aarch64_cm<optab><mode>" + [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w") + (neg:<V_cmp_result> + (UCOMPARISONS:<V_cmp_result> + (match_operand:VDQ 1 "register_operand" "w") + (match_operand:VDQ 2 "register_operand" "w") + )))] + "TARGET_SIMD" + "cm<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype>" + [(set_attr "type" "neon_compare<q>")] +) + +(define_insn_and_split "aarch64_cm<optab>di" + [(set (match_operand:DI 0 "register_operand" "=w,r") + (neg:DI + (UCOMPARISONS:DI + (match_operand:DI 1 "register_operand" "w,r") + (match_operand:DI 2 "aarch64_simd_reg_or_zero" "w,r") + ))) + (clobber (reg:CC CC_REGNUM))] + "TARGET_SIMD" + "@ + cm<n_optab>\t%d0, %d<cmp_1>, %d<cmp_2> + #" + "reload_completed + /* We need to prevent the split from + happening in the 'w' constraint cases. */ + && GP_REGNUM_P (REGNO (operands[0])) + && GP_REGNUM_P (REGNO (operands[1]))" + [(const_int 0)] + { + enum machine_mode mode = CCmode; + rtx cc_reg = aarch64_gen_compare_reg (<CMP>, operands[1], operands[2]); + rtx comparison = gen_rtx_<CMP> (mode, operands[1], operands[2]); + emit_insn (gen_cstoredi_neg (operands[0], comparison, cc_reg)); + DONE; + } + [(set_attr "type" "neon_compare, neon_compare_zero")] +) + +;; cmtst + +(define_insn "aarch64_cmtst<mode>" + [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w") + (neg:<V_cmp_result> + (ne:<V_cmp_result> + (and:VDQ + (match_operand:VDQ 1 "register_operand" "w") + (match_operand:VDQ 2 "register_operand" "w")) + (vec_duplicate:<V_cmp_result> (const_int 0)))))] + "TARGET_SIMD" + "cmtst\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>" + [(set_attr "type" "neon_tst<q>")] +) + +(define_insn_and_split "aarch64_cmtstdi" + [(set (match_operand:DI 0 "register_operand" "=w,r") + (neg:DI + (ne:DI + (and:DI + (match_operand:DI 1 "register_operand" "w,r") + (match_operand:DI 2 "register_operand" "w,r")) + (const_int 0)))) + (clobber (reg:CC CC_REGNUM))] + "TARGET_SIMD" + "@ + cmtst\t%d0, %d1, %d2 + #" + "reload_completed + /* We need to prevent the split from + happening in the 'w' constraint cases. */ + && GP_REGNUM_P (REGNO (operands[0])) + && GP_REGNUM_P (REGNO (operands[1]))" + [(const_int 0)] + { + rtx and_tree = gen_rtx_AND (DImode, operands[1], operands[2]); + enum machine_mode mode = SELECT_CC_MODE (NE, and_tree, const0_rtx); + rtx cc_reg = aarch64_gen_compare_reg (NE, and_tree, const0_rtx); + rtx comparison = gen_rtx_NE (mode, and_tree, const0_rtx); + emit_insn (gen_cstoredi_neg (operands[0], comparison, cc_reg)); + DONE; + } + [(set_attr "type" "neon_tst")] +) + +;; fcm(eq|ge|gt|le|lt) + +(define_insn "aarch64_cm<optab><mode>" + [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w") + (neg:<V_cmp_result> + (COMPARISONS:<V_cmp_result> + (match_operand:VALLF 1 "register_operand" "w,w") + (match_operand:VALLF 2 "aarch64_simd_reg_or_zero" "w,YDz") + )))] + "TARGET_SIMD" + "@ + fcm<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype> + fcm<optab>\t%<v>0<Vmtype>, %<v>1<Vmtype>, 0" + [(set_attr "type" "neon_fp_compare_<Vetype><q>")] +) + +;; fac(ge|gt) +;; Note we can also handle what would be fac(le|lt) by +;; generating fac(ge|gt). + +(define_insn "*aarch64_fac<optab><mode>" + [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w") + (neg:<V_cmp_result> + (FAC_COMPARISONS:<V_cmp_result> + (abs:VALLF (match_operand:VALLF 1 "register_operand" "w")) + (abs:VALLF (match_operand:VALLF 2 "register_operand" "w")) + )))] + "TARGET_SIMD" + "fac<n_optab>\t%<v>0<Vmtype>, %<v><cmp_1><Vmtype>, %<v><cmp_2><Vmtype>" + [(set_attr "type" "neon_fp_compare_<Vetype><q>")] +) + +;; addp + +(define_insn "aarch64_addp<mode>" + [(set (match_operand:VD_BHSI 0 "register_operand" "=w") + (unspec:VD_BHSI + [(match_operand:VD_BHSI 1 "register_operand" "w") + (match_operand:VD_BHSI 2 "register_operand" "w")] + UNSPEC_ADDP))] + "TARGET_SIMD" + "addp\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>" + [(set_attr "type" "neon_reduc_add<q>")] +) + +(define_insn "aarch64_addpdi" + [(set (match_operand:DI 0 "register_operand" "=w") + (unspec:DI + [(match_operand:V2DI 1 "register_operand" "w")] + UNSPEC_ADDP))] + "TARGET_SIMD" + "addp\t%d0, %1.2d" + [(set_attr "type" "neon_reduc_add")] +) + +;; sqrt + +(define_insn "sqrt<mode>2" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (sqrt:VDQF (match_operand:VDQF 1 "register_operand" "w")))] + "TARGET_SIMD" + "fsqrt\\t%0.<Vtype>, %1.<Vtype>" + [(set_attr "type" "neon_fp_sqrt_<Vetype><q>")] +) + +;; Patterns for vector struct loads and stores. + +(define_insn "vec_load_lanesoi<mode>" + [(set (match_operand:OI 0 "register_operand" "=w") + (unspec:OI [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv") + (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + UNSPEC_LD2))] + "TARGET_SIMD" + "ld2\\t{%S0.<Vtype> - %T0.<Vtype>}, %1" + [(set_attr "type" "neon_load2_2reg<q>")] +) + +(define_insn "vec_store_lanesoi<mode>" + [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:OI [(match_operand:OI 1 "register_operand" "w") + (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + UNSPEC_ST2))] + "TARGET_SIMD" + "st2\\t{%S1.<Vtype> - %T1.<Vtype>}, %0" + [(set_attr "type" "neon_store2_2reg<q>")] +) + +(define_insn "vec_load_lanesci<mode>" + [(set (match_operand:CI 0 "register_operand" "=w") + (unspec:CI [(match_operand:CI 1 "aarch64_simd_struct_operand" "Utv") + (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + UNSPEC_LD3))] + "TARGET_SIMD" + "ld3\\t{%S0.<Vtype> - %U0.<Vtype>}, %1" + [(set_attr "type" "neon_load3_3reg<q>")] +) + +(define_insn "vec_store_lanesci<mode>" + [(set (match_operand:CI 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:CI [(match_operand:CI 1 "register_operand" "w") + (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + UNSPEC_ST3))] + "TARGET_SIMD" + "st3\\t{%S1.<Vtype> - %U1.<Vtype>}, %0" + [(set_attr "type" "neon_store3_3reg<q>")] +) + +(define_insn "vec_load_lanesxi<mode>" + [(set (match_operand:XI 0 "register_operand" "=w") + (unspec:XI [(match_operand:XI 1 "aarch64_simd_struct_operand" "Utv") + (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + UNSPEC_LD4))] + "TARGET_SIMD" + "ld4\\t{%S0.<Vtype> - %V0.<Vtype>}, %1" + [(set_attr "type" "neon_load4_4reg<q>")] +) + +(define_insn "vec_store_lanesxi<mode>" + [(set (match_operand:XI 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:XI [(match_operand:XI 1 "register_operand" "w") + (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + UNSPEC_ST4))] + "TARGET_SIMD" + "st4\\t{%S1.<Vtype> - %V1.<Vtype>}, %0" + [(set_attr "type" "neon_store4_4reg<q>")] +) + +;; Reload patterns for AdvSIMD register list operands. + +(define_expand "mov<mode>" + [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "") + (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" ""))] + "TARGET_SIMD" +{ + if (can_create_pseudo_p ()) + { + if (GET_CODE (operands[0]) != REG) + operands[1] = force_reg (<MODE>mode, operands[1]); + } +}) + +(define_insn "*aarch64_mov<mode>" + [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "=w,Utv,w") + (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" " w,w,Utv"))] + "TARGET_SIMD + && (register_operand (operands[0], <MODE>mode) + || register_operand (operands[1], <MODE>mode))" + +{ + switch (which_alternative) + { + case 0: return "#"; + case 1: return "st1\\t{%S1.16b - %<Vendreg>1.16b}, %0"; + case 2: return "ld1\\t{%S0.16b - %<Vendreg>0.16b}, %1"; + default: gcc_unreachable (); + } +} + [(set_attr "type" "neon_move,neon_store<nregs>_<nregs>reg_q,\ + neon_load<nregs>_<nregs>reg_q") + (set (attr "length") (symbol_ref "aarch64_simd_attr_length_move (insn)"))] +) + +(define_insn "aarch64_be_ld1<mode>" + [(set (match_operand:VALLDI 0 "register_operand" "=w") + (unspec:VALLDI [(match_operand:VALLDI 1 "aarch64_simd_struct_operand" "Utv")] + UNSPEC_LD1))] + "TARGET_SIMD" + "ld1\\t{%0<Vmtype>}, %1" + [(set_attr "type" "neon_load1_1reg<q>")] +) + +(define_insn "aarch64_be_st1<mode>" + [(set (match_operand:VALLDI 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:VALLDI [(match_operand:VALLDI 1 "register_operand" "w")] + UNSPEC_ST1))] + "TARGET_SIMD" + "st1\\t{%1<Vmtype>}, %0" + [(set_attr "type" "neon_store1_1reg<q>")] +) + +(define_split + [(set (match_operand:OI 0 "register_operand" "") + (match_operand:OI 1 "register_operand" ""))] + "TARGET_SIMD && reload_completed" + [(set (match_dup 0) (match_dup 1)) + (set (match_dup 2) (match_dup 3))] +{ + int rdest = REGNO (operands[0]); + int rsrc = REGNO (operands[1]); + rtx dest[2], src[2]; + + dest[0] = gen_rtx_REG (TFmode, rdest); + src[0] = gen_rtx_REG (TFmode, rsrc); + dest[1] = gen_rtx_REG (TFmode, rdest + 1); + src[1] = gen_rtx_REG (TFmode, rsrc + 1); + + aarch64_simd_disambiguate_copy (operands, dest, src, 2); +}) + +(define_split + [(set (match_operand:CI 0 "register_operand" "") + (match_operand:CI 1 "register_operand" ""))] + "TARGET_SIMD && reload_completed" + [(set (match_dup 0) (match_dup 1)) + (set (match_dup 2) (match_dup 3)) + (set (match_dup 4) (match_dup 5))] +{ + int rdest = REGNO (operands[0]); + int rsrc = REGNO (operands[1]); + rtx dest[3], src[3]; + + dest[0] = gen_rtx_REG (TFmode, rdest); + src[0] = gen_rtx_REG (TFmode, rsrc); + dest[1] = gen_rtx_REG (TFmode, rdest + 1); + src[1] = gen_rtx_REG (TFmode, rsrc + 1); + dest[2] = gen_rtx_REG (TFmode, rdest + 2); + src[2] = gen_rtx_REG (TFmode, rsrc + 2); + + aarch64_simd_disambiguate_copy (operands, dest, src, 3); +}) + +(define_split + [(set (match_operand:XI 0 "register_operand" "") + (match_operand:XI 1 "register_operand" ""))] + "TARGET_SIMD && reload_completed" + [(set (match_dup 0) (match_dup 1)) + (set (match_dup 2) (match_dup 3)) + (set (match_dup 4) (match_dup 5)) + (set (match_dup 6) (match_dup 7))] +{ + int rdest = REGNO (operands[0]); + int rsrc = REGNO (operands[1]); + rtx dest[4], src[4]; + + dest[0] = gen_rtx_REG (TFmode, rdest); + src[0] = gen_rtx_REG (TFmode, rsrc); + dest[1] = gen_rtx_REG (TFmode, rdest + 1); + src[1] = gen_rtx_REG (TFmode, rsrc + 1); + dest[2] = gen_rtx_REG (TFmode, rdest + 2); + src[2] = gen_rtx_REG (TFmode, rsrc + 2); + dest[3] = gen_rtx_REG (TFmode, rdest + 3); + src[3] = gen_rtx_REG (TFmode, rsrc + 3); + + aarch64_simd_disambiguate_copy (operands, dest, src, 4); +}) + +(define_insn "aarch64_ld2<mode>_dreg" + [(set (match_operand:OI 0 "register_operand" "=w") + (subreg:OI + (vec_concat:<VRL2> + (vec_concat:<VDBL> + (unspec:VD [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")] + UNSPEC_LD2) + (vec_duplicate:VD (const_int 0))) + (vec_concat:<VDBL> + (unspec:VD [(match_dup 1)] + UNSPEC_LD2) + (vec_duplicate:VD (const_int 0)))) 0))] + "TARGET_SIMD" + "ld2\\t{%S0.<Vtype> - %T0.<Vtype>}, %1" + [(set_attr "type" "neon_load2_2reg<q>")] +) + +(define_insn "aarch64_ld2<mode>_dreg" + [(set (match_operand:OI 0 "register_operand" "=w") + (subreg:OI + (vec_concat:<VRL2> + (vec_concat:<VDBL> + (unspec:DX [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")] + UNSPEC_LD2) + (const_int 0)) + (vec_concat:<VDBL> + (unspec:DX [(match_dup 1)] + UNSPEC_LD2) + (const_int 0))) 0))] + "TARGET_SIMD" + "ld1\\t{%S0.1d - %T0.1d}, %1" + [(set_attr "type" "neon_load1_2reg<q>")] +) + +(define_insn "aarch64_ld3<mode>_dreg" + [(set (match_operand:CI 0 "register_operand" "=w") + (subreg:CI + (vec_concat:<VRL3> + (vec_concat:<VRL2> + (vec_concat:<VDBL> + (unspec:VD [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")] + UNSPEC_LD3) + (vec_duplicate:VD (const_int 0))) + (vec_concat:<VDBL> + (unspec:VD [(match_dup 1)] + UNSPEC_LD3) + (vec_duplicate:VD (const_int 0)))) + (vec_concat:<VDBL> + (unspec:VD [(match_dup 1)] + UNSPEC_LD3) + (vec_duplicate:VD (const_int 0)))) 0))] + "TARGET_SIMD" + "ld3\\t{%S0.<Vtype> - %U0.<Vtype>}, %1" + [(set_attr "type" "neon_load3_3reg<q>")] +) + +(define_insn "aarch64_ld3<mode>_dreg" + [(set (match_operand:CI 0 "register_operand" "=w") + (subreg:CI + (vec_concat:<VRL3> + (vec_concat:<VRL2> + (vec_concat:<VDBL> + (unspec:DX [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")] + UNSPEC_LD3) + (const_int 0)) + (vec_concat:<VDBL> + (unspec:DX [(match_dup 1)] + UNSPEC_LD3) + (const_int 0))) + (vec_concat:<VDBL> + (unspec:DX [(match_dup 1)] + UNSPEC_LD3) + (const_int 0))) 0))] + "TARGET_SIMD" + "ld1\\t{%S0.1d - %U0.1d}, %1" + [(set_attr "type" "neon_load1_3reg<q>")] +) + +(define_insn "aarch64_ld4<mode>_dreg" + [(set (match_operand:XI 0 "register_operand" "=w") + (subreg:XI + (vec_concat:<VRL4> + (vec_concat:<VRL2> + (vec_concat:<VDBL> + (unspec:VD [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")] + UNSPEC_LD4) + (vec_duplicate:VD (const_int 0))) + (vec_concat:<VDBL> + (unspec:VD [(match_dup 1)] + UNSPEC_LD4) + (vec_duplicate:VD (const_int 0)))) + (vec_concat:<VRL2> + (vec_concat:<VDBL> + (unspec:VD [(match_dup 1)] + UNSPEC_LD4) + (vec_duplicate:VD (const_int 0))) + (vec_concat:<VDBL> + (unspec:VD [(match_dup 1)] + UNSPEC_LD4) + (vec_duplicate:VD (const_int 0))))) 0))] + "TARGET_SIMD" + "ld4\\t{%S0.<Vtype> - %V0.<Vtype>}, %1" + [(set_attr "type" "neon_load4_4reg<q>")] +) + +(define_insn "aarch64_ld4<mode>_dreg" + [(set (match_operand:XI 0 "register_operand" "=w") + (subreg:XI + (vec_concat:<VRL4> + (vec_concat:<VRL2> + (vec_concat:<VDBL> + (unspec:DX [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")] + UNSPEC_LD4) + (const_int 0)) + (vec_concat:<VDBL> + (unspec:DX [(match_dup 1)] + UNSPEC_LD4) + (const_int 0))) + (vec_concat:<VRL2> + (vec_concat:<VDBL> + (unspec:DX [(match_dup 1)] + UNSPEC_LD4) + (const_int 0)) + (vec_concat:<VDBL> + (unspec:DX [(match_dup 1)] + UNSPEC_LD4) + (const_int 0)))) 0))] + "TARGET_SIMD" + "ld1\\t{%S0.1d - %V0.1d}, %1" + [(set_attr "type" "neon_load1_4reg<q>")] +) + +(define_expand "aarch64_ld<VSTRUCT:nregs><VDC:mode>" + [(match_operand:VSTRUCT 0 "register_operand" "=w") + (match_operand:DI 1 "register_operand" "r") + (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + "TARGET_SIMD" +{ + enum machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode; + rtx mem = gen_rtx_MEM (mode, operands[1]); + + emit_insn (gen_aarch64_ld<VSTRUCT:nregs><VDC:mode>_dreg (operands[0], mem)); + DONE; +}) + +(define_expand "aarch64_ld1<VALL:mode>" + [(match_operand:VALL 0 "register_operand") + (match_operand:DI 1 "register_operand")] + "TARGET_SIMD" +{ + enum machine_mode mode = <VALL:MODE>mode; + rtx mem = gen_rtx_MEM (mode, operands[1]); + + if (BYTES_BIG_ENDIAN) + emit_insn (gen_aarch64_be_ld1<VALL:mode> (operands[0], mem)); + else + emit_move_insn (operands[0], mem); + DONE; +}) + +(define_expand "aarch64_ld<VSTRUCT:nregs><VQ:mode>" + [(match_operand:VSTRUCT 0 "register_operand" "=w") + (match_operand:DI 1 "register_operand" "r") + (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + "TARGET_SIMD" +{ + enum machine_mode mode = <VSTRUCT:MODE>mode; + rtx mem = gen_rtx_MEM (mode, operands[1]); + + emit_insn (gen_vec_load_lanes<VSTRUCT:mode><VQ:mode> (operands[0], mem)); + DONE; +}) + +;; Expanders for builtins to extract vector registers from large +;; opaque integer modes. + +;; D-register list. + +(define_expand "aarch64_get_dreg<VSTRUCT:mode><VDC:mode>" + [(match_operand:VDC 0 "register_operand" "=w") + (match_operand:VSTRUCT 1 "register_operand" "w") + (match_operand:SI 2 "immediate_operand" "i")] + "TARGET_SIMD" +{ + int part = INTVAL (operands[2]); + rtx temp = gen_reg_rtx (<VDC:VDBL>mode); + int offset = part * 16; + + emit_move_insn (temp, gen_rtx_SUBREG (<VDC:VDBL>mode, operands[1], offset)); + emit_move_insn (operands[0], gen_lowpart (<VDC:MODE>mode, temp)); + DONE; +}) + +;; Q-register list. + +(define_expand "aarch64_get_qreg<VSTRUCT:mode><VQ:mode>" + [(match_operand:VQ 0 "register_operand" "=w") + (match_operand:VSTRUCT 1 "register_operand" "w") + (match_operand:SI 2 "immediate_operand" "i")] + "TARGET_SIMD" +{ + int part = INTVAL (operands[2]); + int offset = part * 16; + + emit_move_insn (operands[0], + gen_rtx_SUBREG (<VQ:MODE>mode, operands[1], offset)); + DONE; +}) + +;; Permuted-store expanders for neon intrinsics. + +;; Permute instructions + +;; vec_perm support + +(define_expand "vec_perm_const<mode>" + [(match_operand:VALL 0 "register_operand") + (match_operand:VALL 1 "register_operand") + (match_operand:VALL 2 "register_operand") + (match_operand:<V_cmp_result> 3)] + "TARGET_SIMD" +{ + if (aarch64_expand_vec_perm_const (operands[0], operands[1], + operands[2], operands[3])) + DONE; + else + FAIL; +}) + +(define_expand "vec_perm<mode>" + [(match_operand:VB 0 "register_operand") + (match_operand:VB 1 "register_operand") + (match_operand:VB 2 "register_operand") + (match_operand:VB 3 "register_operand")] + "TARGET_SIMD && !BYTES_BIG_ENDIAN" +{ + aarch64_expand_vec_perm (operands[0], operands[1], + operands[2], operands[3]); + DONE; +}) + +(define_insn "aarch64_tbl1<mode>" + [(set (match_operand:VB 0 "register_operand" "=w") + (unspec:VB [(match_operand:V16QI 1 "register_operand" "w") + (match_operand:VB 2 "register_operand" "w")] + UNSPEC_TBL))] + "TARGET_SIMD" + "tbl\\t%0.<Vtype>, {%1.16b}, %2.<Vtype>" + [(set_attr "type" "neon_tbl1<q>")] +) + +;; Two source registers. + +(define_insn "aarch64_tbl2v16qi" + [(set (match_operand:V16QI 0 "register_operand" "=w") + (unspec:V16QI [(match_operand:OI 1 "register_operand" "w") + (match_operand:V16QI 2 "register_operand" "w")] + UNSPEC_TBL))] + "TARGET_SIMD" + "tbl\\t%0.16b, {%S1.16b - %T1.16b}, %2.16b" + [(set_attr "type" "neon_tbl2_q")] +) + +(define_insn_and_split "aarch64_combinev16qi" + [(set (match_operand:OI 0 "register_operand" "=w") + (unspec:OI [(match_operand:V16QI 1 "register_operand" "w") + (match_operand:V16QI 2 "register_operand" "w")] + UNSPEC_CONCAT))] + "TARGET_SIMD" + "#" + "&& reload_completed" + [(const_int 0)] +{ + aarch64_split_combinev16qi (operands); + DONE; +} +[(set_attr "type" "multiple")] +) + +(define_insn "aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>" + [(set (match_operand:VALL 0 "register_operand" "=w") + (unspec:VALL [(match_operand:VALL 1 "register_operand" "w") + (match_operand:VALL 2 "register_operand" "w")] + PERMUTE))] + "TARGET_SIMD" + "<PERMUTE:perm_insn><PERMUTE:perm_hilo>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>" + [(set_attr "type" "neon_permute<q>")] +) + +(define_insn "aarch64_st2<mode>_dreg" + [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:TI [(match_operand:OI 1 "register_operand" "w") + (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + UNSPEC_ST2))] + "TARGET_SIMD" + "st2\\t{%S1.<Vtype> - %T1.<Vtype>}, %0" + [(set_attr "type" "neon_store2_2reg")] +) + +(define_insn "aarch64_st2<mode>_dreg" + [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:TI [(match_operand:OI 1 "register_operand" "w") + (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + UNSPEC_ST2))] + "TARGET_SIMD" + "st1\\t{%S1.1d - %T1.1d}, %0" + [(set_attr "type" "neon_store1_2reg")] +) + +(define_insn "aarch64_st3<mode>_dreg" + [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:EI [(match_operand:CI 1 "register_operand" "w") + (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + UNSPEC_ST3))] + "TARGET_SIMD" + "st3\\t{%S1.<Vtype> - %U1.<Vtype>}, %0" + [(set_attr "type" "neon_store3_3reg")] +) + +(define_insn "aarch64_st3<mode>_dreg" + [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:EI [(match_operand:CI 1 "register_operand" "w") + (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + UNSPEC_ST3))] + "TARGET_SIMD" + "st1\\t{%S1.1d - %U1.1d}, %0" + [(set_attr "type" "neon_store1_3reg")] +) + +(define_insn "aarch64_st4<mode>_dreg" + [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:OI [(match_operand:XI 1 "register_operand" "w") + (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + UNSPEC_ST4))] + "TARGET_SIMD" + "st4\\t{%S1.<Vtype> - %V1.<Vtype>}, %0" + [(set_attr "type" "neon_store4_4reg")] +) + +(define_insn "aarch64_st4<mode>_dreg" + [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:OI [(match_operand:XI 1 "register_operand" "w") + (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + UNSPEC_ST4))] + "TARGET_SIMD" + "st1\\t{%S1.1d - %V1.1d}, %0" + [(set_attr "type" "neon_store1_4reg")] +) + +(define_expand "aarch64_st<VSTRUCT:nregs><VDC:mode>" + [(match_operand:DI 0 "register_operand" "r") + (match_operand:VSTRUCT 1 "register_operand" "w") + (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + "TARGET_SIMD" +{ + enum machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode; + rtx mem = gen_rtx_MEM (mode, operands[0]); + + emit_insn (gen_aarch64_st<VSTRUCT:nregs><VDC:mode>_dreg (mem, operands[1])); + DONE; +}) + +(define_expand "aarch64_st<VSTRUCT:nregs><VQ:mode>" + [(match_operand:DI 0 "register_operand" "r") + (match_operand:VSTRUCT 1 "register_operand" "w") + (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + "TARGET_SIMD" +{ + enum machine_mode mode = <VSTRUCT:MODE>mode; + rtx mem = gen_rtx_MEM (mode, operands[0]); + + emit_insn (gen_vec_store_lanes<VSTRUCT:mode><VQ:mode> (mem, operands[1])); + DONE; +}) + +(define_expand "aarch64_st1<VALL:mode>" + [(match_operand:DI 0 "register_operand") + (match_operand:VALL 1 "register_operand")] + "TARGET_SIMD" +{ + enum machine_mode mode = <VALL:MODE>mode; + rtx mem = gen_rtx_MEM (mode, operands[0]); + + if (BYTES_BIG_ENDIAN) + emit_insn (gen_aarch64_be_st1<VALL:mode> (mem, operands[1])); + else + emit_move_insn (mem, operands[1]); + DONE; +}) + +;; Expander for builtins to insert vector registers into large +;; opaque integer modes. + +;; Q-register list. We don't need a D-reg inserter as we zero +;; extend them in arm_neon.h and insert the resulting Q-regs. + +(define_expand "aarch64_set_qreg<VSTRUCT:mode><VQ:mode>" + [(match_operand:VSTRUCT 0 "register_operand" "+w") + (match_operand:VSTRUCT 1 "register_operand" "0") + (match_operand:VQ 2 "register_operand" "w") + (match_operand:SI 3 "immediate_operand" "i")] + "TARGET_SIMD" +{ + int part = INTVAL (operands[3]); + int offset = part * 16; + + emit_move_insn (operands[0], operands[1]); + emit_move_insn (gen_rtx_SUBREG (<VQ:MODE>mode, operands[0], offset), + operands[2]); + DONE; +}) + +;; Standard pattern name vec_init<mode>. + +(define_expand "vec_init<mode>" + [(match_operand:VALL 0 "register_operand" "") + (match_operand 1 "" "")] + "TARGET_SIMD" +{ + aarch64_expand_vector_init (operands[0], operands[1]); + DONE; +}) + +(define_insn "*aarch64_simd_ld1r<mode>" + [(set (match_operand:VALLDI 0 "register_operand" "=w") + (vec_duplicate:VALLDI + (match_operand:<VEL> 1 "aarch64_simd_struct_operand" "Utv")))] + "TARGET_SIMD" + "ld1r\\t{%0.<Vtype>}, %1" + [(set_attr "type" "neon_load1_all_lanes")] +) + +(define_insn "aarch64_frecpe<mode>" + [(set (match_operand:VDQF 0 "register_operand" "=w") + (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")] + UNSPEC_FRECPE))] + "TARGET_SIMD" + "frecpe\\t%0.<Vtype>, %1.<Vtype>" + [(set_attr "type" "neon_fp_recpe_<Vetype><q>")] +) + +(define_insn "aarch64_frecp<FRECP:frecp_suffix><mode>" + [(set (match_operand:GPF 0 "register_operand" "=w") + (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")] + FRECP))] + "TARGET_SIMD" + "frecp<FRECP:frecp_suffix>\\t%<s>0, %<s>1" + [(set_attr "type" "neon_fp_recp<FRECP:frecp_suffix>_<GPF:Vetype><GPF:q>")] +) + +(define_insn "aarch64_frecps<mode>" + [(set (match_operand:VALLF 0 "register_operand" "=w") + (unspec:VALLF [(match_operand:VALLF 1 "register_operand" "w") + (match_operand:VALLF 2 "register_operand" "w")] + UNSPEC_FRECPS))] + "TARGET_SIMD" + "frecps\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>" + [(set_attr "type" "neon_fp_recps_<Vetype><q>")] +) + +;; Standard pattern name vec_extract<mode>. + +(define_expand "vec_extract<mode>" + [(match_operand:<VEL> 0 "aarch64_simd_nonimmediate_operand" "") + (match_operand:VALL 1 "register_operand" "") + (match_operand:SI 2 "immediate_operand" "")] + "TARGET_SIMD" +{ + emit_insn + (gen_aarch64_get_lane<mode> (operands[0], operands[1], operands[2])); + DONE; +}) + +;; aes + +(define_insn "aarch64_crypto_aes<aes_op>v16qi" + [(set (match_operand:V16QI 0 "register_operand" "=w") + (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "0") + (match_operand:V16QI 2 "register_operand" "w")] + CRYPTO_AES))] + "TARGET_SIMD && TARGET_CRYPTO" + "aes<aes_op>\\t%0.16b, %2.16b" + [(set_attr "type" "crypto_aes")] +) + +(define_insn "aarch64_crypto_aes<aesmc_op>v16qi" + [(set (match_operand:V16QI 0 "register_operand" "=w") + (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "w")] + CRYPTO_AESMC))] + "TARGET_SIMD && TARGET_CRYPTO" + "aes<aesmc_op>\\t%0.16b, %1.16b" + [(set_attr "type" "crypto_aes")] +) + +;; sha1 + +(define_insn "aarch64_crypto_sha1hsi" + [(set (match_operand:SI 0 "register_operand" "=w") + (unspec:SI [(match_operand:SI 1 + "register_operand" "w")] + UNSPEC_SHA1H))] + "TARGET_SIMD && TARGET_CRYPTO" + "sha1h\\t%s0, %s1" + [(set_attr "type" "crypto_sha1_fast")] +) + +(define_insn "aarch64_crypto_sha1su1v4si" + [(set (match_operand:V4SI 0 "register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") + (match_operand:V4SI 2 "register_operand" "w")] + UNSPEC_SHA1SU1))] + "TARGET_SIMD && TARGET_CRYPTO" + "sha1su1\\t%0.4s, %2.4s" + [(set_attr "type" "crypto_sha1_fast")] +) + +(define_insn "aarch64_crypto_sha1<sha1_op>v4si" + [(set (match_operand:V4SI 0 "register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") + (match_operand:SI 2 "register_operand" "w") + (match_operand:V4SI 3 "register_operand" "w")] + CRYPTO_SHA1))] + "TARGET_SIMD && TARGET_CRYPTO" + "sha1<sha1_op>\\t%q0, %s2, %3.4s" + [(set_attr "type" "crypto_sha1_slow")] +) + +(define_insn "aarch64_crypto_sha1su0v4si" + [(set (match_operand:V4SI 0 "register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") + (match_operand:V4SI 2 "register_operand" "w") + (match_operand:V4SI 3 "register_operand" "w")] + UNSPEC_SHA1SU0))] + "TARGET_SIMD && TARGET_CRYPTO" + "sha1su0\\t%0.4s, %2.4s, %3.4s" + [(set_attr "type" "crypto_sha1_xor")] +) + +;; sha256 + +(define_insn "aarch64_crypto_sha256h<sha256_op>v4si" + [(set (match_operand:V4SI 0 "register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") + (match_operand:V4SI 2 "register_operand" "w") + (match_operand:V4SI 3 "register_operand" "w")] + CRYPTO_SHA256))] + "TARGET_SIMD && TARGET_CRYPTO" + "sha256h<sha256_op>\\t%q0, %q2, %3.4s" + [(set_attr "type" "crypto_sha256_slow")] +) + +(define_insn "aarch64_crypto_sha256su0v4si" + [(set (match_operand:V4SI 0 "register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") + (match_operand:V4SI 2 "register_operand" "w")] + UNSPEC_SHA256SU0))] + "TARGET_SIMD &&TARGET_CRYPTO" + "sha256su0\\t%0.4s, %2.4s" + [(set_attr "type" "crypto_sha256_fast")] +) + +(define_insn "aarch64_crypto_sha256su1v4si" + [(set (match_operand:V4SI 0 "register_operand" "=w") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") + (match_operand:V4SI 2 "register_operand" "w") + (match_operand:V4SI 3 "register_operand" "w")] + UNSPEC_SHA256SU1))] + "TARGET_SIMD &&TARGET_CRYPTO" + "sha256su1\\t%0.4s, %2.4s, %3.4s" + [(set_attr "type" "crypto_sha256_slow")] +) + +;; pmull + +(define_insn "aarch64_crypto_pmulldi" + [(set (match_operand:TI 0 "register_operand" "=w") + (unspec:TI [(match_operand:DI 1 "register_operand" "w") + (match_operand:DI 2 "register_operand" "w")] + UNSPEC_PMULL))] + "TARGET_SIMD && TARGET_CRYPTO" + "pmull\\t%0.1q, %1.1d, %2.1d" + [(set_attr "type" "neon_mul_d_long")] +) + +(define_insn "aarch64_crypto_pmullv2di" + [(set (match_operand:TI 0 "register_operand" "=w") + (unspec:TI [(match_operand:V2DI 1 "register_operand" "w") + (match_operand:V2DI 2 "register_operand" "w")] + UNSPEC_PMULL2))] + "TARGET_SIMD && TARGET_CRYPTO" + "pmull2\\t%0.1q, %1.2d, %2.2d" + [(set_attr "type" "neon_mul_d_long")] +) diff --git a/gcc-4.9/gcc/config/aarch64/aarch64-tune.md b/gcc-4.9/gcc/config/aarch64/aarch64-tune.md new file mode 100644 index 000000000..b7e40e0b5 --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64-tune.md @@ -0,0 +1,5 @@ +;; -*- buffer-read-only: t -*- +;; Generated automatically by gentune.sh from aarch64-cores.def +(define_attr "tune" + "cortexa53,cortexa15,cortexa57cortexa53" + (const (symbol_ref "((enum attr_tune) aarch64_tune)"))) diff --git a/gcc-4.9/gcc/config/aarch64/aarch64.c b/gcc-4.9/gcc/config/aarch64/aarch64.c new file mode 100644 index 000000000..ebd58c007 --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64.c @@ -0,0 +1,8544 @@ +/* Machine description for AArch64 architecture. + Copyright (C) 2009-2014 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "insn-codes.h" +#include "rtl.h" +#include "insn-attr.h" +#include "tree.h" +#include "stringpool.h" +#include "stor-layout.h" +#include "calls.h" +#include "varasm.h" +#include "regs.h" +#include "df.h" +#include "hard-reg-set.h" +#include "output.h" +#include "expr.h" +#include "reload.h" +#include "toplev.h" +#include "target.h" +#include "target-def.h" +#include "targhooks.h" +#include "ggc.h" +#include "function.h" +#include "tm_p.h" +#include "recog.h" +#include "langhooks.h" +#include "diagnostic-core.h" +#include "pointer-set.h" +#include "hash-table.h" +#include "vec.h" +#include "basic-block.h" +#include "tree-ssa-alias.h" +#include "internal-fn.h" +#include "gimple-fold.h" +#include "tree-eh.h" +#include "gimple-expr.h" +#include "is-a.h" +#include "gimple.h" +#include "gimplify.h" +#include "optabs.h" +#include "dwarf2.h" +#include "cfgloop.h" +#include "tree-vectorizer.h" +#include "config/arm/aarch-cost-tables.h" + +/* Defined for convenience. */ +#define POINTER_BYTES (POINTER_SIZE / BITS_PER_UNIT) + +/* Classifies an address. + + ADDRESS_REG_IMM + A simple base register plus immediate offset. + + ADDRESS_REG_WB + A base register indexed by immediate offset with writeback. + + ADDRESS_REG_REG + A base register indexed by (optionally scaled) register. + + ADDRESS_REG_UXTW + A base register indexed by (optionally scaled) zero-extended register. + + ADDRESS_REG_SXTW + A base register indexed by (optionally scaled) sign-extended register. + + ADDRESS_LO_SUM + A LO_SUM rtx with a base register and "LO12" symbol relocation. + + ADDRESS_SYMBOLIC: + A constant symbolic address, in pc-relative literal pool. */ + +enum aarch64_address_type { + ADDRESS_REG_IMM, + ADDRESS_REG_WB, + ADDRESS_REG_REG, + ADDRESS_REG_UXTW, + ADDRESS_REG_SXTW, + ADDRESS_LO_SUM, + ADDRESS_SYMBOLIC +}; + +struct aarch64_address_info { + enum aarch64_address_type type; + rtx base; + rtx offset; + int shift; + enum aarch64_symbol_type symbol_type; +}; + +struct simd_immediate_info +{ + rtx value; + int shift; + int element_width; + bool mvn; + bool msl; +}; + +/* The current code model. */ +enum aarch64_code_model aarch64_cmodel; + +#ifdef HAVE_AS_TLS +#undef TARGET_HAVE_TLS +#define TARGET_HAVE_TLS 1 +#endif + +static bool aarch64_lra_p (void); +static bool aarch64_composite_type_p (const_tree, enum machine_mode); +static bool aarch64_vfp_is_call_or_return_candidate (enum machine_mode, + const_tree, + enum machine_mode *, int *, + bool *); +static void aarch64_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED; +static void aarch64_elf_asm_destructor (rtx, int) ATTRIBUTE_UNUSED; +static void aarch64_override_options_after_change (void); +static bool aarch64_vector_mode_supported_p (enum machine_mode); +static unsigned bit_count (unsigned HOST_WIDE_INT); +static bool aarch64_const_vec_all_same_int_p (rtx, + HOST_WIDE_INT, HOST_WIDE_INT); + +static bool aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode, + const unsigned char *sel); + +/* The processor for which instructions should be scheduled. */ +enum aarch64_processor aarch64_tune = cortexa53; + +/* The current tuning set. */ +const struct tune_params *aarch64_tune_params; + +/* Mask to specify which instructions we are allowed to generate. */ +unsigned long aarch64_isa_flags = 0; + +/* Mask to specify which instruction scheduling options should be used. */ +unsigned long aarch64_tune_flags = 0; + +/* Tuning parameters. */ + +#if HAVE_DESIGNATED_INITIALIZERS +#define NAMED_PARAM(NAME, VAL) .NAME = (VAL) +#else +#define NAMED_PARAM(NAME, VAL) (VAL) +#endif + +#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007 +__extension__ +#endif + +#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007 +__extension__ +#endif +static const struct cpu_addrcost_table generic_addrcost_table = +{ + NAMED_PARAM (pre_modify, 0), + NAMED_PARAM (post_modify, 0), + NAMED_PARAM (register_offset, 0), + NAMED_PARAM (register_extend, 0), + NAMED_PARAM (imm_offset, 0) +}; + +#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007 +__extension__ +#endif +static const struct cpu_regmove_cost generic_regmove_cost = +{ + NAMED_PARAM (GP2GP, 1), + NAMED_PARAM (GP2FP, 2), + NAMED_PARAM (FP2GP, 2), + /* We currently do not provide direct support for TFmode Q->Q move. + Therefore we need to raise the cost above 2 in order to have + reload handle the situation. */ + NAMED_PARAM (FP2FP, 4) +}; + +/* Generic costs for vector insn classes. */ +#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007 +__extension__ +#endif +static const struct cpu_vector_cost generic_vector_cost = +{ + NAMED_PARAM (scalar_stmt_cost, 1), + NAMED_PARAM (scalar_load_cost, 1), + NAMED_PARAM (scalar_store_cost, 1), + NAMED_PARAM (vec_stmt_cost, 1), + NAMED_PARAM (vec_to_scalar_cost, 1), + NAMED_PARAM (scalar_to_vec_cost, 1), + NAMED_PARAM (vec_align_load_cost, 1), + NAMED_PARAM (vec_unalign_load_cost, 1), + NAMED_PARAM (vec_unalign_store_cost, 1), + NAMED_PARAM (vec_store_cost, 1), + NAMED_PARAM (cond_taken_branch_cost, 3), + NAMED_PARAM (cond_not_taken_branch_cost, 1) +}; + +#if HAVE_DESIGNATED_INITIALIZERS && GCC_VERSION >= 2007 +__extension__ +#endif +static const struct tune_params generic_tunings = +{ + &cortexa57_extra_costs, + &generic_addrcost_table, + &generic_regmove_cost, + &generic_vector_cost, + NAMED_PARAM (memmov_cost, 4), + NAMED_PARAM (issue_rate, 2) +}; + +static const struct tune_params cortexa53_tunings = +{ + &cortexa53_extra_costs, + &generic_addrcost_table, + &generic_regmove_cost, + &generic_vector_cost, + NAMED_PARAM (memmov_cost, 4), + NAMED_PARAM (issue_rate, 2) +}; + +static const struct tune_params cortexa57_tunings = +{ + &cortexa57_extra_costs, + &generic_addrcost_table, + &generic_regmove_cost, + &generic_vector_cost, + NAMED_PARAM (memmov_cost, 4), + NAMED_PARAM (issue_rate, 3) +}; + +/* A processor implementing AArch64. */ +struct processor +{ + const char *const name; + enum aarch64_processor core; + const char *arch; + const unsigned long flags; + const struct tune_params *const tune; +}; + +/* Processor cores implementing AArch64. */ +static const struct processor all_cores[] = +{ +#define AARCH64_CORE(NAME, X, IDENT, ARCH, FLAGS, COSTS) \ + {NAME, IDENT, #ARCH, FLAGS | AARCH64_FL_FOR_ARCH##ARCH, &COSTS##_tunings}, +#include "aarch64-cores.def" +#undef AARCH64_CORE + {"generic", cortexa53, "8", AARCH64_FL_FPSIMD | AARCH64_FL_FOR_ARCH8, &generic_tunings}, + {NULL, aarch64_none, NULL, 0, NULL} +}; + +/* Architectures implementing AArch64. */ +static const struct processor all_architectures[] = +{ +#define AARCH64_ARCH(NAME, CORE, ARCH, FLAGS) \ + {NAME, CORE, #ARCH, FLAGS, NULL}, +#include "aarch64-arches.def" +#undef AARCH64_ARCH + {NULL, aarch64_none, NULL, 0, NULL} +}; + +/* Target specification. These are populated as commandline arguments + are processed, or NULL if not specified. */ +static const struct processor *selected_arch; +static const struct processor *selected_cpu; +static const struct processor *selected_tune; + +#define AARCH64_CPU_DEFAULT_FLAGS ((selected_cpu) ? selected_cpu->flags : 0) + +/* An ISA extension in the co-processor and main instruction set space. */ +struct aarch64_option_extension +{ + const char *const name; + const unsigned long flags_on; + const unsigned long flags_off; +}; + +/* ISA extensions in AArch64. */ +static const struct aarch64_option_extension all_extensions[] = +{ +#define AARCH64_OPT_EXTENSION(NAME, FLAGS_ON, FLAGS_OFF) \ + {NAME, FLAGS_ON, FLAGS_OFF}, +#include "aarch64-option-extensions.def" +#undef AARCH64_OPT_EXTENSION + {NULL, 0, 0} +}; + +/* Used to track the size of an address when generating a pre/post + increment address. */ +static enum machine_mode aarch64_memory_reference_mode; + +/* Used to force GTY into this file. */ +static GTY(()) int gty_dummy; + +/* A table of valid AArch64 "bitmask immediate" values for + logical instructions. */ + +#define AARCH64_NUM_BITMASKS 5334 +static unsigned HOST_WIDE_INT aarch64_bitmasks[AARCH64_NUM_BITMASKS]; + +/* Did we set flag_omit_frame_pointer just so + aarch64_frame_pointer_required would be called? */ +static bool faked_omit_frame_pointer; + +typedef enum aarch64_cond_code +{ + AARCH64_EQ = 0, AARCH64_NE, AARCH64_CS, AARCH64_CC, AARCH64_MI, AARCH64_PL, + AARCH64_VS, AARCH64_VC, AARCH64_HI, AARCH64_LS, AARCH64_GE, AARCH64_LT, + AARCH64_GT, AARCH64_LE, AARCH64_AL, AARCH64_NV +} +aarch64_cc; + +#define AARCH64_INVERSE_CONDITION_CODE(X) ((aarch64_cc) (((int) X) ^ 1)) + +/* The condition codes of the processor, and the inverse function. */ +static const char * const aarch64_condition_codes[] = +{ + "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc", + "hi", "ls", "ge", "lt", "gt", "le", "al", "nv" +}; + +/* Provide a mapping from gcc register numbers to dwarf register numbers. */ +unsigned +aarch64_dbx_register_number (unsigned regno) +{ + if (GP_REGNUM_P (regno)) + return AARCH64_DWARF_R0 + regno - R0_REGNUM; + else if (regno == SP_REGNUM) + return AARCH64_DWARF_SP; + else if (FP_REGNUM_P (regno)) + return AARCH64_DWARF_V0 + regno - V0_REGNUM; + + /* Return values >= DWARF_FRAME_REGISTERS indicate that there is no + equivalent DWARF register. */ + return DWARF_FRAME_REGISTERS; +} + +/* Return TRUE if MODE is any of the large INT modes. */ +static bool +aarch64_vect_struct_mode_p (enum machine_mode mode) +{ + return mode == OImode || mode == CImode || mode == XImode; +} + +/* Return TRUE if MODE is any of the vector modes. */ +static bool +aarch64_vector_mode_p (enum machine_mode mode) +{ + return aarch64_vector_mode_supported_p (mode) + || aarch64_vect_struct_mode_p (mode); +} + +/* Implement target hook TARGET_ARRAY_MODE_SUPPORTED_P. */ +static bool +aarch64_array_mode_supported_p (enum machine_mode mode, + unsigned HOST_WIDE_INT nelems) +{ + if (TARGET_SIMD + && AARCH64_VALID_SIMD_QREG_MODE (mode) + && (nelems >= 2 && nelems <= 4)) + return true; + + return false; +} + +/* Implement HARD_REGNO_NREGS. */ + +int +aarch64_hard_regno_nregs (unsigned regno, enum machine_mode mode) +{ + switch (aarch64_regno_regclass (regno)) + { + case FP_REGS: + case FP_LO_REGS: + return (GET_MODE_SIZE (mode) + UNITS_PER_VREG - 1) / UNITS_PER_VREG; + default: + return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; + } + gcc_unreachable (); +} + +/* Implement HARD_REGNO_MODE_OK. */ + +int +aarch64_hard_regno_mode_ok (unsigned regno, enum machine_mode mode) +{ + if (GET_MODE_CLASS (mode) == MODE_CC) + return regno == CC_REGNUM; + + if (regno == SP_REGNUM) + /* The purpose of comparing with ptr_mode is to support the + global register variable associated with the stack pointer + register via the syntax of asm ("wsp") in ILP32. */ + return mode == Pmode || mode == ptr_mode; + + if (regno == FRAME_POINTER_REGNUM || regno == ARG_POINTER_REGNUM) + return mode == Pmode; + + if (GP_REGNUM_P (regno) && ! aarch64_vect_struct_mode_p (mode)) + return 1; + + if (FP_REGNUM_P (regno)) + { + if (aarch64_vect_struct_mode_p (mode)) + return + (regno + aarch64_hard_regno_nregs (regno, mode) - 1) <= V31_REGNUM; + else + return 1; + } + + return 0; +} + +/* Return true if calls to DECL should be treated as + long-calls (ie called via a register). */ +static bool +aarch64_decl_is_long_call_p (const_tree decl ATTRIBUTE_UNUSED) +{ + return false; +} + +/* Return true if calls to symbol-ref SYM should be treated as + long-calls (ie called via a register). */ +bool +aarch64_is_long_call_p (rtx sym) +{ + return aarch64_decl_is_long_call_p (SYMBOL_REF_DECL (sym)); +} + +/* Return true if the offsets to a zero/sign-extract operation + represent an expression that matches an extend operation. The + operands represent the paramters from + + (extract (mult (reg) (mult_imm)) (extract_imm) (const_int 0)). */ +bool +aarch64_is_extend_from_extract (enum machine_mode mode, rtx mult_imm, + rtx extract_imm) +{ + HOST_WIDE_INT mult_val, extract_val; + + if (! CONST_INT_P (mult_imm) || ! CONST_INT_P (extract_imm)) + return false; + + mult_val = INTVAL (mult_imm); + extract_val = INTVAL (extract_imm); + + if (extract_val > 8 + && extract_val < GET_MODE_BITSIZE (mode) + && exact_log2 (extract_val & ~7) > 0 + && (extract_val & 7) <= 4 + && mult_val == (1 << (extract_val & 7))) + return true; + + return false; +} + +/* Emit an insn that's a simple single-set. Both the operands must be + known to be valid. */ +inline static rtx +emit_set_insn (rtx x, rtx y) +{ + return emit_insn (gen_rtx_SET (VOIDmode, x, y)); +} + +/* X and Y are two things to compare using CODE. Emit the compare insn and + return the rtx for register 0 in the proper mode. */ +rtx +aarch64_gen_compare_reg (RTX_CODE code, rtx x, rtx y) +{ + enum machine_mode mode = SELECT_CC_MODE (code, x, y); + rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM); + + emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y)); + return cc_reg; +} + +/* Build the SYMBOL_REF for __tls_get_addr. */ + +static GTY(()) rtx tls_get_addr_libfunc; + +rtx +aarch64_tls_get_addr (void) +{ + if (!tls_get_addr_libfunc) + tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr"); + return tls_get_addr_libfunc; +} + +/* Return the TLS model to use for ADDR. */ + +static enum tls_model +tls_symbolic_operand_type (rtx addr) +{ + enum tls_model tls_kind = TLS_MODEL_NONE; + rtx sym, addend; + + if (GET_CODE (addr) == CONST) + { + split_const (addr, &sym, &addend); + if (GET_CODE (sym) == SYMBOL_REF) + tls_kind = SYMBOL_REF_TLS_MODEL (sym); + } + else if (GET_CODE (addr) == SYMBOL_REF) + tls_kind = SYMBOL_REF_TLS_MODEL (addr); + + return tls_kind; +} + +/* We'll allow lo_sum's in addresses in our legitimate addresses + so that combine would take care of combining addresses where + necessary, but for generation purposes, we'll generate the address + as : + RTL Absolute + tmp = hi (symbol_ref); adrp x1, foo + dest = lo_sum (tmp, symbol_ref); add dest, x1, :lo_12:foo + nop + + PIC TLS + adrp x1, :got:foo adrp tmp, :tlsgd:foo + ldr x1, [:got_lo12:foo] add dest, tmp, :tlsgd_lo12:foo + bl __tls_get_addr + nop + + Load TLS symbol, depending on TLS mechanism and TLS access model. + + Global Dynamic - Traditional TLS: + adrp tmp, :tlsgd:imm + add dest, tmp, #:tlsgd_lo12:imm + bl __tls_get_addr + + Global Dynamic - TLS Descriptors: + adrp dest, :tlsdesc:imm + ldr tmp, [dest, #:tlsdesc_lo12:imm] + add dest, dest, #:tlsdesc_lo12:imm + blr tmp + mrs tp, tpidr_el0 + add dest, dest, tp + + Initial Exec: + mrs tp, tpidr_el0 + adrp tmp, :gottprel:imm + ldr dest, [tmp, #:gottprel_lo12:imm] + add dest, dest, tp + + Local Exec: + mrs tp, tpidr_el0 + add t0, tp, #:tprel_hi12:imm + add t0, #:tprel_lo12_nc:imm +*/ + +static void +aarch64_load_symref_appropriately (rtx dest, rtx imm, + enum aarch64_symbol_type type) +{ + switch (type) + { + case SYMBOL_SMALL_ABSOLUTE: + { + /* In ILP32, the mode of dest can be either SImode or DImode. */ + rtx tmp_reg = dest; + enum machine_mode mode = GET_MODE (dest); + + gcc_assert (mode == Pmode || mode == ptr_mode); + + if (can_create_pseudo_p ()) + tmp_reg = gen_reg_rtx (mode); + + emit_move_insn (tmp_reg, gen_rtx_HIGH (mode, imm)); + emit_insn (gen_add_losym (dest, tmp_reg, imm)); + return; + } + + case SYMBOL_TINY_ABSOLUTE: + emit_insn (gen_rtx_SET (Pmode, dest, imm)); + return; + + case SYMBOL_SMALL_GOT: + { + /* In ILP32, the mode of dest can be either SImode or DImode, + while the got entry is always of SImode size. The mode of + dest depends on how dest is used: if dest is assigned to a + pointer (e.g. in the memory), it has SImode; it may have + DImode if dest is dereferenced to access the memeory. + This is why we have to handle three different ldr_got_small + patterns here (two patterns for ILP32). */ + rtx tmp_reg = dest; + enum machine_mode mode = GET_MODE (dest); + + if (can_create_pseudo_p ()) + tmp_reg = gen_reg_rtx (mode); + + emit_move_insn (tmp_reg, gen_rtx_HIGH (mode, imm)); + if (mode == ptr_mode) + { + if (mode == DImode) + emit_insn (gen_ldr_got_small_di (dest, tmp_reg, imm)); + else + emit_insn (gen_ldr_got_small_si (dest, tmp_reg, imm)); + } + else + { + gcc_assert (mode == Pmode); + emit_insn (gen_ldr_got_small_sidi (dest, tmp_reg, imm)); + } + + return; + } + + case SYMBOL_SMALL_TLSGD: + { + rtx insns; + rtx result = gen_rtx_REG (Pmode, R0_REGNUM); + + start_sequence (); + emit_call_insn (gen_tlsgd_small (result, imm)); + insns = get_insns (); + end_sequence (); + + RTL_CONST_CALL_P (insns) = 1; + emit_libcall_block (insns, dest, result, imm); + return; + } + + case SYMBOL_SMALL_TLSDESC: + { + rtx x0 = gen_rtx_REG (Pmode, R0_REGNUM); + rtx tp; + + emit_insn (gen_tlsdesc_small (imm)); + tp = aarch64_load_tp (NULL); + emit_insn (gen_rtx_SET (Pmode, dest, gen_rtx_PLUS (Pmode, tp, x0))); + set_unique_reg_note (get_last_insn (), REG_EQUIV, imm); + return; + } + + case SYMBOL_SMALL_GOTTPREL: + { + rtx tmp_reg = gen_reg_rtx (Pmode); + rtx tp = aarch64_load_tp (NULL); + emit_insn (gen_tlsie_small (tmp_reg, imm)); + emit_insn (gen_rtx_SET (Pmode, dest, gen_rtx_PLUS (Pmode, tp, tmp_reg))); + set_unique_reg_note (get_last_insn (), REG_EQUIV, imm); + return; + } + + case SYMBOL_SMALL_TPREL: + { + rtx tp = aarch64_load_tp (NULL); + emit_insn (gen_tlsle_small (dest, tp, imm)); + set_unique_reg_note (get_last_insn (), REG_EQUIV, imm); + return; + } + + case SYMBOL_TINY_GOT: + emit_insn (gen_ldr_got_tiny (dest, imm)); + return; + + default: + gcc_unreachable (); + } +} + +/* Emit a move from SRC to DEST. Assume that the move expanders can + handle all moves if !can_create_pseudo_p (). The distinction is + important because, unlike emit_move_insn, the move expanders know + how to force Pmode objects into the constant pool even when the + constant pool address is not itself legitimate. */ +static rtx +aarch64_emit_move (rtx dest, rtx src) +{ + return (can_create_pseudo_p () + ? emit_move_insn (dest, src) + : emit_move_insn_1 (dest, src)); +} + +/* Split a 128-bit move operation into two 64-bit move operations, + taking care to handle partial overlap of register to register + copies. Special cases are needed when moving between GP regs and + FP regs. SRC can be a register, constant or memory; DST a register + or memory. If either operand is memory it must not have any side + effects. */ +void +aarch64_split_128bit_move (rtx dst, rtx src) +{ + rtx dst_lo, dst_hi; + rtx src_lo, src_hi; + + enum machine_mode mode = GET_MODE (dst); + + gcc_assert (mode == TImode || mode == TFmode); + gcc_assert (!(side_effects_p (src) || side_effects_p (dst))); + gcc_assert (mode == GET_MODE (src) || GET_MODE (src) == VOIDmode); + + if (REG_P (dst) && REG_P (src)) + { + int src_regno = REGNO (src); + int dst_regno = REGNO (dst); + + /* Handle FP <-> GP regs. */ + if (FP_REGNUM_P (dst_regno) && GP_REGNUM_P (src_regno)) + { + src_lo = gen_lowpart (word_mode, src); + src_hi = gen_highpart (word_mode, src); + + if (mode == TImode) + { + emit_insn (gen_aarch64_movtilow_di (dst, src_lo)); + emit_insn (gen_aarch64_movtihigh_di (dst, src_hi)); + } + else + { + emit_insn (gen_aarch64_movtflow_di (dst, src_lo)); + emit_insn (gen_aarch64_movtfhigh_di (dst, src_hi)); + } + return; + } + else if (GP_REGNUM_P (dst_regno) && FP_REGNUM_P (src_regno)) + { + dst_lo = gen_lowpart (word_mode, dst); + dst_hi = gen_highpart (word_mode, dst); + + if (mode == TImode) + { + emit_insn (gen_aarch64_movdi_tilow (dst_lo, src)); + emit_insn (gen_aarch64_movdi_tihigh (dst_hi, src)); + } + else + { + emit_insn (gen_aarch64_movdi_tflow (dst_lo, src)); + emit_insn (gen_aarch64_movdi_tfhigh (dst_hi, src)); + } + return; + } + } + + dst_lo = gen_lowpart (word_mode, dst); + dst_hi = gen_highpart (word_mode, dst); + src_lo = gen_lowpart (word_mode, src); + src_hi = gen_highpart_mode (word_mode, mode, src); + + /* At most one pairing may overlap. */ + if (reg_overlap_mentioned_p (dst_lo, src_hi)) + { + aarch64_emit_move (dst_hi, src_hi); + aarch64_emit_move (dst_lo, src_lo); + } + else + { + aarch64_emit_move (dst_lo, src_lo); + aarch64_emit_move (dst_hi, src_hi); + } +} + +bool +aarch64_split_128bit_move_p (rtx dst, rtx src) +{ + return (! REG_P (src) + || ! (FP_REGNUM_P (REGNO (dst)) && FP_REGNUM_P (REGNO (src)))); +} + +/* Split a complex SIMD combine. */ + +void +aarch64_split_simd_combine (rtx dst, rtx src1, rtx src2) +{ + enum machine_mode src_mode = GET_MODE (src1); + enum machine_mode dst_mode = GET_MODE (dst); + + gcc_assert (VECTOR_MODE_P (dst_mode)); + + if (REG_P (dst) && REG_P (src1) && REG_P (src2)) + { + rtx (*gen) (rtx, rtx, rtx); + + switch (src_mode) + { + case V8QImode: + gen = gen_aarch64_simd_combinev8qi; + break; + case V4HImode: + gen = gen_aarch64_simd_combinev4hi; + break; + case V2SImode: + gen = gen_aarch64_simd_combinev2si; + break; + case V2SFmode: + gen = gen_aarch64_simd_combinev2sf; + break; + case DImode: + gen = gen_aarch64_simd_combinedi; + break; + case DFmode: + gen = gen_aarch64_simd_combinedf; + break; + default: + gcc_unreachable (); + } + + emit_insn (gen (dst, src1, src2)); + return; + } +} + +/* Split a complex SIMD move. */ + +void +aarch64_split_simd_move (rtx dst, rtx src) +{ + enum machine_mode src_mode = GET_MODE (src); + enum machine_mode dst_mode = GET_MODE (dst); + + gcc_assert (VECTOR_MODE_P (dst_mode)); + + if (REG_P (dst) && REG_P (src)) + { + rtx (*gen) (rtx, rtx); + + gcc_assert (VECTOR_MODE_P (src_mode)); + + switch (src_mode) + { + case V16QImode: + gen = gen_aarch64_split_simd_movv16qi; + break; + case V8HImode: + gen = gen_aarch64_split_simd_movv8hi; + break; + case V4SImode: + gen = gen_aarch64_split_simd_movv4si; + break; + case V2DImode: + gen = gen_aarch64_split_simd_movv2di; + break; + case V4SFmode: + gen = gen_aarch64_split_simd_movv4sf; + break; + case V2DFmode: + gen = gen_aarch64_split_simd_movv2df; + break; + default: + gcc_unreachable (); + } + + emit_insn (gen (dst, src)); + return; + } +} + +static rtx +aarch64_force_temporary (enum machine_mode mode, rtx x, rtx value) +{ + if (can_create_pseudo_p ()) + return force_reg (mode, value); + else + { + x = aarch64_emit_move (x, value); + return x; + } +} + + +static rtx +aarch64_add_offset (enum machine_mode mode, rtx temp, rtx reg, HOST_WIDE_INT offset) +{ + if (!aarch64_plus_immediate (GEN_INT (offset), mode)) + { + rtx high; + /* Load the full offset into a register. This + might be improvable in the future. */ + high = GEN_INT (offset); + offset = 0; + high = aarch64_force_temporary (mode, temp, high); + reg = aarch64_force_temporary (mode, temp, + gen_rtx_PLUS (mode, high, reg)); + } + return plus_constant (mode, reg, offset); +} + +void +aarch64_expand_mov_immediate (rtx dest, rtx imm) +{ + enum machine_mode mode = GET_MODE (dest); + unsigned HOST_WIDE_INT mask; + int i; + bool first; + unsigned HOST_WIDE_INT val; + bool subtargets; + rtx subtarget; + int one_match, zero_match; + + gcc_assert (mode == SImode || mode == DImode); + + /* Check on what type of symbol it is. */ + if (GET_CODE (imm) == SYMBOL_REF + || GET_CODE (imm) == LABEL_REF + || GET_CODE (imm) == CONST) + { + rtx mem, base, offset; + enum aarch64_symbol_type sty; + + /* If we have (const (plus symbol offset)), separate out the offset + before we start classifying the symbol. */ + split_const (imm, &base, &offset); + + sty = aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR); + switch (sty) + { + case SYMBOL_FORCE_TO_MEM: + if (offset != const0_rtx + && targetm.cannot_force_const_mem (mode, imm)) + { + gcc_assert (can_create_pseudo_p ()); + base = aarch64_force_temporary (mode, dest, base); + base = aarch64_add_offset (mode, NULL, base, INTVAL (offset)); + aarch64_emit_move (dest, base); + return; + } + mem = force_const_mem (ptr_mode, imm); + gcc_assert (mem); + if (mode != ptr_mode) + mem = gen_rtx_ZERO_EXTEND (mode, mem); + emit_insn (gen_rtx_SET (VOIDmode, dest, mem)); + return; + + case SYMBOL_SMALL_TLSGD: + case SYMBOL_SMALL_TLSDESC: + case SYMBOL_SMALL_GOTTPREL: + case SYMBOL_SMALL_GOT: + case SYMBOL_TINY_GOT: + if (offset != const0_rtx) + { + gcc_assert(can_create_pseudo_p ()); + base = aarch64_force_temporary (mode, dest, base); + base = aarch64_add_offset (mode, NULL, base, INTVAL (offset)); + aarch64_emit_move (dest, base); + return; + } + /* FALLTHRU */ + + case SYMBOL_SMALL_TPREL: + case SYMBOL_SMALL_ABSOLUTE: + case SYMBOL_TINY_ABSOLUTE: + aarch64_load_symref_appropriately (dest, imm, sty); + return; + + default: + gcc_unreachable (); + } + } + + if (CONST_INT_P (imm) && aarch64_move_imm (INTVAL (imm), mode)) + { + emit_insn (gen_rtx_SET (VOIDmode, dest, imm)); + return; + } + + if (!CONST_INT_P (imm)) + { + if (GET_CODE (imm) == HIGH) + emit_insn (gen_rtx_SET (VOIDmode, dest, imm)); + else + { + rtx mem = force_const_mem (mode, imm); + gcc_assert (mem); + emit_insn (gen_rtx_SET (VOIDmode, dest, mem)); + } + + return; + } + + if (mode == SImode) + { + /* We know we can't do this in 1 insn, and we must be able to do it + in two; so don't mess around looking for sequences that don't buy + us anything. */ + emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (INTVAL (imm) & 0xffff))); + emit_insn (gen_insv_immsi (dest, GEN_INT (16), + GEN_INT ((INTVAL (imm) >> 16) & 0xffff))); + return; + } + + /* Remaining cases are all for DImode. */ + + val = INTVAL (imm); + subtargets = optimize && can_create_pseudo_p (); + + one_match = 0; + zero_match = 0; + mask = 0xffff; + + for (i = 0; i < 64; i += 16, mask <<= 16) + { + if ((val & mask) == 0) + zero_match++; + else if ((val & mask) == mask) + one_match++; + } + + if (one_match == 2) + { + mask = 0xffff; + for (i = 0; i < 64; i += 16, mask <<= 16) + { + if ((val & mask) != mask) + { + emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_INT (val | mask))); + emit_insn (gen_insv_immdi (dest, GEN_INT (i), + GEN_INT ((val >> i) & 0xffff))); + return; + } + } + gcc_unreachable (); + } + + if (zero_match == 2) + goto simple_sequence; + + mask = 0x0ffff0000UL; + for (i = 16; i < 64; i += 16, mask <<= 16) + { + HOST_WIDE_INT comp = mask & ~(mask - 1); + + if (aarch64_uimm12_shift (val - (val & mask))) + { + subtarget = subtargets ? gen_reg_rtx (DImode) : dest; + + emit_insn (gen_rtx_SET (VOIDmode, subtarget, GEN_INT (val & mask))); + emit_insn (gen_adddi3 (dest, subtarget, + GEN_INT (val - (val & mask)))); + return; + } + else if (aarch64_uimm12_shift (-(val - ((val + comp) & mask)))) + { + subtarget = subtargets ? gen_reg_rtx (DImode) : dest; + + emit_insn (gen_rtx_SET (VOIDmode, subtarget, + GEN_INT ((val + comp) & mask))); + emit_insn (gen_adddi3 (dest, subtarget, + GEN_INT (val - ((val + comp) & mask)))); + return; + } + else if (aarch64_uimm12_shift (val - ((val - comp) | ~mask))) + { + subtarget = subtargets ? gen_reg_rtx (DImode) : dest; + + emit_insn (gen_rtx_SET (VOIDmode, subtarget, + GEN_INT ((val - comp) | ~mask))); + emit_insn (gen_adddi3 (dest, subtarget, + GEN_INT (val - ((val - comp) | ~mask)))); + return; + } + else if (aarch64_uimm12_shift (-(val - (val | ~mask)))) + { + subtarget = subtargets ? gen_reg_rtx (DImode) : dest; + + emit_insn (gen_rtx_SET (VOIDmode, subtarget, + GEN_INT (val | ~mask))); + emit_insn (gen_adddi3 (dest, subtarget, + GEN_INT (val - (val | ~mask)))); + return; + } + } + + /* See if we can do it by arithmetically combining two + immediates. */ + for (i = 0; i < AARCH64_NUM_BITMASKS; i++) + { + int j; + mask = 0xffff; + + if (aarch64_uimm12_shift (val - aarch64_bitmasks[i]) + || aarch64_uimm12_shift (-val + aarch64_bitmasks[i])) + { + subtarget = subtargets ? gen_reg_rtx (DImode) : dest; + emit_insn (gen_rtx_SET (VOIDmode, subtarget, + GEN_INT (aarch64_bitmasks[i]))); + emit_insn (gen_adddi3 (dest, subtarget, + GEN_INT (val - aarch64_bitmasks[i]))); + return; + } + + for (j = 0; j < 64; j += 16, mask <<= 16) + { + if ((aarch64_bitmasks[i] & ~mask) == (val & ~mask)) + { + emit_insn (gen_rtx_SET (VOIDmode, dest, + GEN_INT (aarch64_bitmasks[i]))); + emit_insn (gen_insv_immdi (dest, GEN_INT (j), + GEN_INT ((val >> j) & 0xffff))); + return; + } + } + } + + /* See if we can do it by logically combining two immediates. */ + for (i = 0; i < AARCH64_NUM_BITMASKS; i++) + { + if ((aarch64_bitmasks[i] & val) == aarch64_bitmasks[i]) + { + int j; + + for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++) + if (val == (aarch64_bitmasks[i] | aarch64_bitmasks[j])) + { + subtarget = subtargets ? gen_reg_rtx (mode) : dest; + emit_insn (gen_rtx_SET (VOIDmode, subtarget, + GEN_INT (aarch64_bitmasks[i]))); + emit_insn (gen_iordi3 (dest, subtarget, + GEN_INT (aarch64_bitmasks[j]))); + return; + } + } + else if ((val & aarch64_bitmasks[i]) == val) + { + int j; + + for (j = i + 1; j < AARCH64_NUM_BITMASKS; j++) + if (val == (aarch64_bitmasks[j] & aarch64_bitmasks[i])) + { + + subtarget = subtargets ? gen_reg_rtx (mode) : dest; + emit_insn (gen_rtx_SET (VOIDmode, subtarget, + GEN_INT (aarch64_bitmasks[j]))); + emit_insn (gen_anddi3 (dest, subtarget, + GEN_INT (aarch64_bitmasks[i]))); + return; + } + } + } + + simple_sequence: + first = true; + mask = 0xffff; + for (i = 0; i < 64; i += 16, mask <<= 16) + { + if ((val & mask) != 0) + { + if (first) + { + emit_insn (gen_rtx_SET (VOIDmode, dest, + GEN_INT (val & mask))); + first = false; + } + else + emit_insn (gen_insv_immdi (dest, GEN_INT (i), + GEN_INT ((val >> i) & 0xffff))); + } + } +} + +static bool +aarch64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED) +{ + /* Indirect calls are not currently supported. */ + if (decl == NULL) + return false; + + /* Cannot tail-call to long-calls, since these are outside of the + range of a branch instruction (we could handle this if we added + support for indirect tail-calls. */ + if (aarch64_decl_is_long_call_p (decl)) + return false; + + return true; +} + +/* Implement TARGET_PASS_BY_REFERENCE. */ + +static bool +aarch64_pass_by_reference (cumulative_args_t pcum ATTRIBUTE_UNUSED, + enum machine_mode mode, + const_tree type, + bool named ATTRIBUTE_UNUSED) +{ + HOST_WIDE_INT size; + enum machine_mode dummymode; + int nregs; + + /* GET_MODE_SIZE (BLKmode) is useless since it is 0. */ + size = (mode == BLKmode && type) + ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode); + + /* Aggregates are passed by reference based on their size. */ + if (type && AGGREGATE_TYPE_P (type)) + { + size = int_size_in_bytes (type); + } + + /* Variable sized arguments are always returned by reference. */ + if (size < 0) + return true; + + /* Can this be a candidate to be passed in fp/simd register(s)? */ + if (aarch64_vfp_is_call_or_return_candidate (mode, type, + &dummymode, &nregs, + NULL)) + return false; + + /* Arguments which are variable sized or larger than 2 registers are + passed by reference unless they are a homogenous floating point + aggregate. */ + return size > 2 * UNITS_PER_WORD; +} + +/* Return TRUE if VALTYPE is padded to its least significant bits. */ +static bool +aarch64_return_in_msb (const_tree valtype) +{ + enum machine_mode dummy_mode; + int dummy_int; + + /* Never happens in little-endian mode. */ + if (!BYTES_BIG_ENDIAN) + return false; + + /* Only composite types smaller than or equal to 16 bytes can + be potentially returned in registers. */ + if (!aarch64_composite_type_p (valtype, TYPE_MODE (valtype)) + || int_size_in_bytes (valtype) <= 0 + || int_size_in_bytes (valtype) > 16) + return false; + + /* But not a composite that is an HFA (Homogeneous Floating-point Aggregate) + or an HVA (Homogeneous Short-Vector Aggregate); such a special composite + is always passed/returned in the least significant bits of fp/simd + register(s). */ + if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (valtype), valtype, + &dummy_mode, &dummy_int, NULL)) + return false; + + return true; +} + +/* Implement TARGET_FUNCTION_VALUE. + Define how to find the value returned by a function. */ + +static rtx +aarch64_function_value (const_tree type, const_tree func, + bool outgoing ATTRIBUTE_UNUSED) +{ + enum machine_mode mode; + int unsignedp; + int count; + enum machine_mode ag_mode; + + mode = TYPE_MODE (type); + if (INTEGRAL_TYPE_P (type)) + mode = promote_function_mode (type, mode, &unsignedp, func, 1); + + if (aarch64_return_in_msb (type)) + { + HOST_WIDE_INT size = int_size_in_bytes (type); + + if (size % UNITS_PER_WORD != 0) + { + size += UNITS_PER_WORD - size % UNITS_PER_WORD; + mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0); + } + } + + if (aarch64_vfp_is_call_or_return_candidate (mode, type, + &ag_mode, &count, NULL)) + { + if (!aarch64_composite_type_p (type, mode)) + { + gcc_assert (count == 1 && mode == ag_mode); + return gen_rtx_REG (mode, V0_REGNUM); + } + else + { + int i; + rtx par; + + par = gen_rtx_PARALLEL (mode, rtvec_alloc (count)); + for (i = 0; i < count; i++) + { + rtx tmp = gen_rtx_REG (ag_mode, V0_REGNUM + i); + tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, + GEN_INT (i * GET_MODE_SIZE (ag_mode))); + XVECEXP (par, 0, i) = tmp; + } + return par; + } + } + else + return gen_rtx_REG (mode, R0_REGNUM); +} + +/* Implements TARGET_FUNCTION_VALUE_REGNO_P. + Return true if REGNO is the number of a hard register in which the values + of called function may come back. */ + +static bool +aarch64_function_value_regno_p (const unsigned int regno) +{ + /* Maximum of 16 bytes can be returned in the general registers. Examples + of 16-byte return values are: 128-bit integers and 16-byte small + structures (excluding homogeneous floating-point aggregates). */ + if (regno == R0_REGNUM || regno == R1_REGNUM) + return true; + + /* Up to four fp/simd registers can return a function value, e.g. a + homogeneous floating-point aggregate having four members. */ + if (regno >= V0_REGNUM && regno < V0_REGNUM + HA_MAX_NUM_FLDS) + return !TARGET_GENERAL_REGS_ONLY; + + return false; +} + +/* Implement TARGET_RETURN_IN_MEMORY. + + If the type T of the result of a function is such that + void func (T arg) + would require that arg be passed as a value in a register (or set of + registers) according to the parameter passing rules, then the result + is returned in the same registers as would be used for such an + argument. */ + +static bool +aarch64_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED) +{ + HOST_WIDE_INT size; + enum machine_mode ag_mode; + int count; + + if (!AGGREGATE_TYPE_P (type) + && TREE_CODE (type) != COMPLEX_TYPE + && TREE_CODE (type) != VECTOR_TYPE) + /* Simple scalar types always returned in registers. */ + return false; + + if (aarch64_vfp_is_call_or_return_candidate (TYPE_MODE (type), + type, + &ag_mode, + &count, + NULL)) + return false; + + /* Types larger than 2 registers returned in memory. */ + size = int_size_in_bytes (type); + return (size < 0 || size > 2 * UNITS_PER_WORD); +} + +static bool +aarch64_vfp_is_call_candidate (cumulative_args_t pcum_v, enum machine_mode mode, + const_tree type, int *nregs) +{ + CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v); + return aarch64_vfp_is_call_or_return_candidate (mode, + type, + &pcum->aapcs_vfp_rmode, + nregs, + NULL); +} + +/* Given MODE and TYPE of a function argument, return the alignment in + bits. The idea is to suppress any stronger alignment requested by + the user and opt for the natural alignment (specified in AAPCS64 \S 4.1). + This is a helper function for local use only. */ + +static unsigned int +aarch64_function_arg_alignment (enum machine_mode mode, const_tree type) +{ + unsigned int alignment; + + if (type) + { + if (!integer_zerop (TYPE_SIZE (type))) + { + if (TYPE_MODE (type) == mode) + alignment = TYPE_ALIGN (type); + else + alignment = GET_MODE_ALIGNMENT (mode); + } + else + alignment = 0; + } + else + alignment = GET_MODE_ALIGNMENT (mode); + + return alignment; +} + +/* Layout a function argument according to the AAPCS64 rules. The rule + numbers refer to the rule numbers in the AAPCS64. */ + +static void +aarch64_layout_arg (cumulative_args_t pcum_v, enum machine_mode mode, + const_tree type, + bool named ATTRIBUTE_UNUSED) +{ + CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v); + int ncrn, nvrn, nregs; + bool allocate_ncrn, allocate_nvrn; + + /* We need to do this once per argument. */ + if (pcum->aapcs_arg_processed) + return; + + pcum->aapcs_arg_processed = true; + + allocate_ncrn = (type) ? !(FLOAT_TYPE_P (type)) : !FLOAT_MODE_P (mode); + allocate_nvrn = aarch64_vfp_is_call_candidate (pcum_v, + mode, + type, + &nregs); + + /* allocate_ncrn may be false-positive, but allocate_nvrn is quite reliable. + The following code thus handles passing by SIMD/FP registers first. */ + + nvrn = pcum->aapcs_nvrn; + + /* C1 - C5 for floating point, homogenous floating point aggregates (HFA) + and homogenous short-vector aggregates (HVA). */ + if (allocate_nvrn) + { + if (nvrn + nregs <= NUM_FP_ARG_REGS) + { + pcum->aapcs_nextnvrn = nvrn + nregs; + if (!aarch64_composite_type_p (type, mode)) + { + gcc_assert (nregs == 1); + pcum->aapcs_reg = gen_rtx_REG (mode, V0_REGNUM + nvrn); + } + else + { + rtx par; + int i; + par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs)); + for (i = 0; i < nregs; i++) + { + rtx tmp = gen_rtx_REG (pcum->aapcs_vfp_rmode, + V0_REGNUM + nvrn + i); + tmp = gen_rtx_EXPR_LIST + (VOIDmode, tmp, + GEN_INT (i * GET_MODE_SIZE (pcum->aapcs_vfp_rmode))); + XVECEXP (par, 0, i) = tmp; + } + pcum->aapcs_reg = par; + } + return; + } + else + { + /* C.3 NSRN is set to 8. */ + pcum->aapcs_nextnvrn = NUM_FP_ARG_REGS; + goto on_stack; + } + } + + ncrn = pcum->aapcs_ncrn; + nregs = ((type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode)) + + UNITS_PER_WORD - 1) / UNITS_PER_WORD; + + + /* C6 - C9. though the sign and zero extension semantics are + handled elsewhere. This is the case where the argument fits + entirely general registers. */ + if (allocate_ncrn && (ncrn + nregs <= NUM_ARG_REGS)) + { + unsigned int alignment = aarch64_function_arg_alignment (mode, type); + + gcc_assert (nregs == 0 || nregs == 1 || nregs == 2); + + /* C.8 if the argument has an alignment of 16 then the NGRN is + rounded up to the next even number. */ + if (nregs == 2 && alignment == 16 * BITS_PER_UNIT && ncrn % 2) + { + ++ncrn; + gcc_assert (ncrn + nregs <= NUM_ARG_REGS); + } + /* NREGS can be 0 when e.g. an empty structure is to be passed. + A reg is still generated for it, but the caller should be smart + enough not to use it. */ + if (nregs == 0 || nregs == 1 || GET_MODE_CLASS (mode) == MODE_INT) + { + pcum->aapcs_reg = gen_rtx_REG (mode, R0_REGNUM + ncrn); + } + else + { + rtx par; + int i; + + par = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs)); + for (i = 0; i < nregs; i++) + { + rtx tmp = gen_rtx_REG (word_mode, R0_REGNUM + ncrn + i); + tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, + GEN_INT (i * UNITS_PER_WORD)); + XVECEXP (par, 0, i) = tmp; + } + pcum->aapcs_reg = par; + } + + pcum->aapcs_nextncrn = ncrn + nregs; + return; + } + + /* C.11 */ + pcum->aapcs_nextncrn = NUM_ARG_REGS; + + /* The argument is passed on stack; record the needed number of words for + this argument (we can re-use NREGS) and align the total size if + necessary. */ +on_stack: + pcum->aapcs_stack_words = nregs; + if (aarch64_function_arg_alignment (mode, type) == 16 * BITS_PER_UNIT) + pcum->aapcs_stack_size = AARCH64_ROUND_UP (pcum->aapcs_stack_size, + 16 / UNITS_PER_WORD) + 1; + return; +} + +/* Implement TARGET_FUNCTION_ARG. */ + +static rtx +aarch64_function_arg (cumulative_args_t pcum_v, enum machine_mode mode, + const_tree type, bool named) +{ + CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v); + gcc_assert (pcum->pcs_variant == ARM_PCS_AAPCS64); + + if (mode == VOIDmode) + return NULL_RTX; + + aarch64_layout_arg (pcum_v, mode, type, named); + return pcum->aapcs_reg; +} + +void +aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum, + const_tree fntype ATTRIBUTE_UNUSED, + rtx libname ATTRIBUTE_UNUSED, + const_tree fndecl ATTRIBUTE_UNUSED, + unsigned n_named ATTRIBUTE_UNUSED) +{ + pcum->aapcs_ncrn = 0; + pcum->aapcs_nvrn = 0; + pcum->aapcs_nextncrn = 0; + pcum->aapcs_nextnvrn = 0; + pcum->pcs_variant = ARM_PCS_AAPCS64; + pcum->aapcs_reg = NULL_RTX; + pcum->aapcs_arg_processed = false; + pcum->aapcs_stack_words = 0; + pcum->aapcs_stack_size = 0; + + return; +} + +static void +aarch64_function_arg_advance (cumulative_args_t pcum_v, + enum machine_mode mode, + const_tree type, + bool named) +{ + CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v); + if (pcum->pcs_variant == ARM_PCS_AAPCS64) + { + aarch64_layout_arg (pcum_v, mode, type, named); + gcc_assert ((pcum->aapcs_reg != NULL_RTX) + != (pcum->aapcs_stack_words != 0)); + pcum->aapcs_arg_processed = false; + pcum->aapcs_ncrn = pcum->aapcs_nextncrn; + pcum->aapcs_nvrn = pcum->aapcs_nextnvrn; + pcum->aapcs_stack_size += pcum->aapcs_stack_words; + pcum->aapcs_stack_words = 0; + pcum->aapcs_reg = NULL_RTX; + } +} + +bool +aarch64_function_arg_regno_p (unsigned regno) +{ + return ((GP_REGNUM_P (regno) && regno < R0_REGNUM + NUM_ARG_REGS) + || (FP_REGNUM_P (regno) && regno < V0_REGNUM + NUM_FP_ARG_REGS)); +} + +/* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least + PARM_BOUNDARY bits of alignment, but will be given anything up + to STACK_BOUNDARY bits if the type requires it. This makes sure + that both before and after the layout of each argument, the Next + Stacked Argument Address (NSAA) will have a minimum alignment of + 8 bytes. */ + +static unsigned int +aarch64_function_arg_boundary (enum machine_mode mode, const_tree type) +{ + unsigned int alignment = aarch64_function_arg_alignment (mode, type); + + if (alignment < PARM_BOUNDARY) + alignment = PARM_BOUNDARY; + if (alignment > STACK_BOUNDARY) + alignment = STACK_BOUNDARY; + return alignment; +} + +/* For use by FUNCTION_ARG_PADDING (MODE, TYPE). + + Return true if an argument passed on the stack should be padded upwards, + i.e. if the least-significant byte of the stack slot has useful data. + + Small aggregate types are placed in the lowest memory address. + + The related parameter passing rules are B.4, C.3, C.5 and C.14. */ + +bool +aarch64_pad_arg_upward (enum machine_mode mode, const_tree type) +{ + /* On little-endian targets, the least significant byte of every stack + argument is passed at the lowest byte address of the stack slot. */ + if (!BYTES_BIG_ENDIAN) + return true; + + /* Otherwise, integral, floating-point and pointer types are padded downward: + the least significant byte of a stack argument is passed at the highest + byte address of the stack slot. */ + if (type + ? (INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type) + || POINTER_TYPE_P (type)) + : (SCALAR_INT_MODE_P (mode) || SCALAR_FLOAT_MODE_P (mode))) + return false; + + /* Everything else padded upward, i.e. data in first byte of stack slot. */ + return true; +} + +/* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST). + + It specifies padding for the last (may also be the only) + element of a block move between registers and memory. If + assuming the block is in the memory, padding upward means that + the last element is padded after its highest significant byte, + while in downward padding, the last element is padded at the + its least significant byte side. + + Small aggregates and small complex types are always padded + upwards. + + We don't need to worry about homogeneous floating-point or + short-vector aggregates; their move is not affected by the + padding direction determined here. Regardless of endianness, + each element of such an aggregate is put in the least + significant bits of a fp/simd register. + + Return !BYTES_BIG_ENDIAN if the least significant byte of the + register has useful data, and return the opposite if the most + significant byte does. */ + +bool +aarch64_pad_reg_upward (enum machine_mode mode, const_tree type, + bool first ATTRIBUTE_UNUSED) +{ + + /* Small composite types are always padded upward. */ + if (BYTES_BIG_ENDIAN && aarch64_composite_type_p (type, mode)) + { + HOST_WIDE_INT size = (type ? int_size_in_bytes (type) + : GET_MODE_SIZE (mode)); + if (size < 2 * UNITS_PER_WORD) + return true; + } + + /* Otherwise, use the default padding. */ + return !BYTES_BIG_ENDIAN; +} + +static enum machine_mode +aarch64_libgcc_cmp_return_mode (void) +{ + return SImode; +} + +static bool +aarch64_frame_pointer_required (void) +{ + /* If the function contains dynamic stack allocations, we need to + use the frame pointer to access the static parts of the frame. */ + if (cfun->calls_alloca) + return true; + + /* We may have turned flag_omit_frame_pointer on in order to have this + function called; if we did, we also set the 'faked_omit_frame_pointer' flag + and we'll check it here. + If we really did set flag_omit_frame_pointer normally, then we return false + (no frame pointer required) in all cases. */ + + if (flag_omit_frame_pointer && !faked_omit_frame_pointer) + return false; + else if (flag_omit_leaf_frame_pointer) + return !crtl->is_leaf || df_regs_ever_live_p (LR_REGNUM); + return true; +} + +/* Mark the registers that need to be saved by the callee and calculate + the size of the callee-saved registers area and frame record (both FP + and LR may be omitted). */ +static void +aarch64_layout_frame (void) +{ + HOST_WIDE_INT offset = 0; + int regno; + + if (reload_completed && cfun->machine->frame.laid_out) + return; + + cfun->machine->frame.fp_lr_offset = 0; + + /* First mark all the registers that really need to be saved... */ + for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++) + cfun->machine->frame.reg_offset[regno] = -1; + + for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++) + cfun->machine->frame.reg_offset[regno] = -1; + + /* ... that includes the eh data registers (if needed)... */ + if (crtl->calls_eh_return) + for (regno = 0; EH_RETURN_DATA_REGNO (regno) != INVALID_REGNUM; regno++) + cfun->machine->frame.reg_offset[EH_RETURN_DATA_REGNO (regno)] = 0; + + /* ... and any callee saved register that dataflow says is live. */ + for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++) + if (df_regs_ever_live_p (regno) + && !call_used_regs[regno]) + cfun->machine->frame.reg_offset[regno] = 0; + + for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++) + if (df_regs_ever_live_p (regno) + && !call_used_regs[regno]) + cfun->machine->frame.reg_offset[regno] = 0; + + if (frame_pointer_needed) + { + cfun->machine->frame.reg_offset[R30_REGNUM] = 0; + cfun->machine->frame.reg_offset[R29_REGNUM] = 0; + cfun->machine->frame.hardfp_offset = 2 * UNITS_PER_WORD; + } + + /* Now assign stack slots for them. */ + for (regno = R0_REGNUM; regno <= R28_REGNUM; regno++) + if (cfun->machine->frame.reg_offset[regno] != -1) + { + cfun->machine->frame.reg_offset[regno] = offset; + offset += UNITS_PER_WORD; + } + + for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++) + if (cfun->machine->frame.reg_offset[regno] != -1) + { + cfun->machine->frame.reg_offset[regno] = offset; + offset += UNITS_PER_WORD; + } + + if (frame_pointer_needed) + { + cfun->machine->frame.reg_offset[R29_REGNUM] = offset; + offset += UNITS_PER_WORD; + cfun->machine->frame.fp_lr_offset = UNITS_PER_WORD; + } + + if (cfun->machine->frame.reg_offset[R30_REGNUM] != -1) + { + cfun->machine->frame.reg_offset[R30_REGNUM] = offset; + offset += UNITS_PER_WORD; + cfun->machine->frame.fp_lr_offset += UNITS_PER_WORD; + } + + cfun->machine->frame.padding0 = + (AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT) - offset); + offset = AARCH64_ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT); + + cfun->machine->frame.saved_regs_size = offset; + cfun->machine->frame.laid_out = true; +} + +/* Make the last instruction frame-related and note that it performs + the operation described by FRAME_PATTERN. */ + +static void +aarch64_set_frame_expr (rtx frame_pattern) +{ + rtx insn; + + insn = get_last_insn (); + RTX_FRAME_RELATED_P (insn) = 1; + RTX_FRAME_RELATED_P (frame_pattern) = 1; + REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR, + frame_pattern, + REG_NOTES (insn)); +} + +static bool +aarch64_register_saved_on_entry (int regno) +{ + return cfun->machine->frame.reg_offset[regno] != -1; +} + + +static void +aarch64_save_or_restore_fprs (int start_offset, int increment, + bool restore, rtx base_rtx) + +{ + unsigned regno; + unsigned regno2; + rtx insn; + rtx (*gen_mem_ref)(enum machine_mode, rtx) + = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM; + + + for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++) + { + if (aarch64_register_saved_on_entry (regno)) + { + rtx mem; + mem = gen_mem_ref (DFmode, + plus_constant (Pmode, + base_rtx, + start_offset)); + + for (regno2 = regno + 1; + regno2 <= V31_REGNUM + && !aarch64_register_saved_on_entry (regno2); + regno2++) + { + /* Empty loop. */ + } + if (regno2 <= V31_REGNUM && + aarch64_register_saved_on_entry (regno2)) + { + rtx mem2; + /* Next highest register to be saved. */ + mem2 = gen_mem_ref (DFmode, + plus_constant + (Pmode, + base_rtx, + start_offset + increment)); + if (restore == false) + { + insn = emit_insn + ( gen_store_pairdf (mem, gen_rtx_REG (DFmode, regno), + mem2, gen_rtx_REG (DFmode, regno2))); + + } + else + { + insn = emit_insn + ( gen_load_pairdf (gen_rtx_REG (DFmode, regno), mem, + gen_rtx_REG (DFmode, regno2), mem2)); + + add_reg_note (insn, REG_CFA_RESTORE, + gen_rtx_REG (DFmode, regno)); + add_reg_note (insn, REG_CFA_RESTORE, + gen_rtx_REG (DFmode, regno2)); + } + + /* The first part of a frame-related parallel insn + is always assumed to be relevant to the frame + calculations; subsequent parts, are only + frame-related if explicitly marked. */ + RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1; + regno = regno2; + start_offset += increment * 2; + } + else + { + if (restore == false) + insn = emit_move_insn (mem, gen_rtx_REG (DFmode, regno)); + else + { + insn = emit_move_insn (gen_rtx_REG (DFmode, regno), mem); + add_reg_note (insn, REG_CFA_RESTORE, + gen_rtx_REG (DImode, regno)); + } + start_offset += increment; + } + RTX_FRAME_RELATED_P (insn) = 1; + } + } + +} + + +/* offset from the stack pointer of where the saves and + restore's have to happen. */ +static void +aarch64_save_or_restore_callee_save_registers (HOST_WIDE_INT offset, + bool restore) +{ + rtx insn; + rtx base_rtx = stack_pointer_rtx; + HOST_WIDE_INT start_offset = offset; + HOST_WIDE_INT increment = UNITS_PER_WORD; + rtx (*gen_mem_ref)(enum machine_mode, rtx) = (frame_pointer_needed)? gen_frame_mem : gen_rtx_MEM; + unsigned limit = (frame_pointer_needed)? R28_REGNUM: R30_REGNUM; + unsigned regno; + unsigned regno2; + + for (regno = R0_REGNUM; regno <= limit; regno++) + { + if (aarch64_register_saved_on_entry (regno)) + { + rtx mem; + mem = gen_mem_ref (Pmode, + plus_constant (Pmode, + base_rtx, + start_offset)); + + for (regno2 = regno + 1; + regno2 <= limit + && !aarch64_register_saved_on_entry (regno2); + regno2++) + { + /* Empty loop. */ + } + if (regno2 <= limit && + aarch64_register_saved_on_entry (regno2)) + { + rtx mem2; + /* Next highest register to be saved. */ + mem2 = gen_mem_ref (Pmode, + plus_constant + (Pmode, + base_rtx, + start_offset + increment)); + if (restore == false) + { + insn = emit_insn + ( gen_store_pairdi (mem, gen_rtx_REG (DImode, regno), + mem2, gen_rtx_REG (DImode, regno2))); + + } + else + { + insn = emit_insn + ( gen_load_pairdi (gen_rtx_REG (DImode, regno), mem, + gen_rtx_REG (DImode, regno2), mem2)); + + add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno)); + add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno2)); + } + + /* The first part of a frame-related parallel insn + is always assumed to be relevant to the frame + calculations; subsequent parts, are only + frame-related if explicitly marked. */ + RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, + 1)) = 1; + regno = regno2; + start_offset += increment * 2; + } + else + { + if (restore == false) + insn = emit_move_insn (mem, gen_rtx_REG (DImode, regno)); + else + { + insn = emit_move_insn (gen_rtx_REG (DImode, regno), mem); + add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, regno)); + } + start_offset += increment; + } + RTX_FRAME_RELATED_P (insn) = 1; + } + } + + aarch64_save_or_restore_fprs (start_offset, increment, restore, base_rtx); + +} + +/* AArch64 stack frames generated by this compiler look like: + + +-------------------------------+ + | | + | incoming stack arguments | + | | + +-------------------------------+ <-- arg_pointer_rtx + | | + | callee-allocated save area | + | for register varargs | + | | + +-------------------------------+ <-- frame_pointer_rtx + | | + | local variables | + | | + +-------------------------------+ + | padding0 | \ + +-------------------------------+ | + | | | + | | | + | callee-saved registers | | frame.saved_regs_size + | | | + +-------------------------------+ | + | LR' | | + +-------------------------------+ | + | FP' | / + P +-------------------------------+ <-- hard_frame_pointer_rtx + | dynamic allocation | + +-------------------------------+ + | | + | outgoing stack arguments | + | | + +-------------------------------+ <-- stack_pointer_rtx + + Dynamic stack allocations such as alloca insert data at point P. + They decrease stack_pointer_rtx but leave frame_pointer_rtx and + hard_frame_pointer_rtx unchanged. */ + +/* Generate the prologue instructions for entry into a function. + Establish the stack frame by decreasing the stack pointer with a + properly calculated size and, if necessary, create a frame record + filled with the values of LR and previous frame pointer. The + current FP is also set up if it is in use. */ + +void +aarch64_expand_prologue (void) +{ + /* sub sp, sp, #<frame_size> + stp {fp, lr}, [sp, #<frame_size> - 16] + add fp, sp, #<frame_size> - hardfp_offset + stp {cs_reg}, [fp, #-16] etc. + + sub sp, sp, <final_adjustment_if_any> + */ + HOST_WIDE_INT original_frame_size; /* local variables + vararg save */ + HOST_WIDE_INT frame_size, offset; + HOST_WIDE_INT fp_offset; /* FP offset from SP */ + rtx insn; + + aarch64_layout_frame (); + original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size; + gcc_assert ((!cfun->machine->saved_varargs_size || cfun->stdarg) + && (cfun->stdarg || !cfun->machine->saved_varargs_size)); + frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size + + crtl->outgoing_args_size); + offset = frame_size = AARCH64_ROUND_UP (frame_size, + STACK_BOUNDARY / BITS_PER_UNIT); + + if (flag_stack_usage_info) + current_function_static_stack_size = frame_size; + + fp_offset = (offset + - original_frame_size + - cfun->machine->frame.saved_regs_size); + + /* Store pairs and load pairs have a range only -512 to 504. */ + if (offset >= 512) + { + /* When the frame has a large size, an initial decrease is done on + the stack pointer to jump over the callee-allocated save area for + register varargs, the local variable area and/or the callee-saved + register area. This will allow the pre-index write-back + store pair instructions to be used for setting up the stack frame + efficiently. */ + offset = original_frame_size + cfun->machine->frame.saved_regs_size; + if (offset >= 512) + offset = cfun->machine->frame.saved_regs_size; + + frame_size -= (offset + crtl->outgoing_args_size); + fp_offset = 0; + + if (frame_size >= 0x1000000) + { + rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM); + emit_move_insn (op0, GEN_INT (-frame_size)); + emit_insn (gen_add2_insn (stack_pointer_rtx, op0)); + aarch64_set_frame_expr (gen_rtx_SET + (Pmode, stack_pointer_rtx, + plus_constant (Pmode, + stack_pointer_rtx, + -frame_size))); + } + else if (frame_size > 0) + { + if ((frame_size & 0xfff) != frame_size) + { + insn = emit_insn (gen_add2_insn + (stack_pointer_rtx, + GEN_INT (-(frame_size + & ~(HOST_WIDE_INT)0xfff)))); + RTX_FRAME_RELATED_P (insn) = 1; + } + if ((frame_size & 0xfff) != 0) + { + insn = emit_insn (gen_add2_insn + (stack_pointer_rtx, + GEN_INT (-(frame_size + & (HOST_WIDE_INT)0xfff)))); + RTX_FRAME_RELATED_P (insn) = 1; + } + } + } + else + frame_size = -1; + + if (offset > 0) + { + /* Save the frame pointer and lr if the frame pointer is needed + first. Make the frame pointer point to the location of the + old frame pointer on the stack. */ + if (frame_pointer_needed) + { + rtx mem_fp, mem_lr; + + if (fp_offset) + { + insn = emit_insn (gen_add2_insn (stack_pointer_rtx, + GEN_INT (-offset))); + RTX_FRAME_RELATED_P (insn) = 1; + aarch64_set_frame_expr (gen_rtx_SET + (Pmode, stack_pointer_rtx, + gen_rtx_MINUS (Pmode, + stack_pointer_rtx, + GEN_INT (offset)))); + mem_fp = gen_frame_mem (DImode, + plus_constant (Pmode, + stack_pointer_rtx, + fp_offset)); + mem_lr = gen_frame_mem (DImode, + plus_constant (Pmode, + stack_pointer_rtx, + fp_offset + + UNITS_PER_WORD)); + insn = emit_insn (gen_store_pairdi (mem_fp, + hard_frame_pointer_rtx, + mem_lr, + gen_rtx_REG (DImode, + LR_REGNUM))); + } + else + { + insn = emit_insn (gen_storewb_pairdi_di + (stack_pointer_rtx, stack_pointer_rtx, + hard_frame_pointer_rtx, + gen_rtx_REG (DImode, LR_REGNUM), + GEN_INT (-offset), + GEN_INT (GET_MODE_SIZE (DImode) - offset))); + RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1; + } + + /* The first part of a frame-related parallel insn is always + assumed to be relevant to the frame calculations; + subsequent parts, are only frame-related if explicitly + marked. */ + RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1; + RTX_FRAME_RELATED_P (insn) = 1; + + /* Set up frame pointer to point to the location of the + previous frame pointer on the stack. */ + insn = emit_insn (gen_add3_insn (hard_frame_pointer_rtx, + stack_pointer_rtx, + GEN_INT (fp_offset))); + aarch64_set_frame_expr (gen_rtx_SET + (Pmode, hard_frame_pointer_rtx, + plus_constant (Pmode, + stack_pointer_rtx, + fp_offset))); + RTX_FRAME_RELATED_P (insn) = 1; + insn = emit_insn (gen_stack_tie (stack_pointer_rtx, + hard_frame_pointer_rtx)); + } + else + { + insn = emit_insn (gen_add2_insn (stack_pointer_rtx, + GEN_INT (-offset))); + RTX_FRAME_RELATED_P (insn) = 1; + } + + aarch64_save_or_restore_callee_save_registers + (fp_offset + cfun->machine->frame.hardfp_offset, 0); + } + + /* when offset >= 512, + sub sp, sp, #<outgoing_args_size> */ + if (frame_size > -1) + { + if (crtl->outgoing_args_size > 0) + { + insn = emit_insn (gen_add2_insn + (stack_pointer_rtx, + GEN_INT (- crtl->outgoing_args_size))); + RTX_FRAME_RELATED_P (insn) = 1; + } + } +} + +/* Generate the epilogue instructions for returning from a function. */ +void +aarch64_expand_epilogue (bool for_sibcall) +{ + HOST_WIDE_INT original_frame_size, frame_size, offset; + HOST_WIDE_INT fp_offset; + rtx insn; + rtx cfa_reg; + + aarch64_layout_frame (); + original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size; + frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size + + crtl->outgoing_args_size); + offset = frame_size = AARCH64_ROUND_UP (frame_size, + STACK_BOUNDARY / BITS_PER_UNIT); + + fp_offset = (offset + - original_frame_size + - cfun->machine->frame.saved_regs_size); + + cfa_reg = frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx; + + /* Store pairs and load pairs have a range only -512 to 504. */ + if (offset >= 512) + { + offset = original_frame_size + cfun->machine->frame.saved_regs_size; + if (offset >= 512) + offset = cfun->machine->frame.saved_regs_size; + + frame_size -= (offset + crtl->outgoing_args_size); + fp_offset = 0; + if (!frame_pointer_needed && crtl->outgoing_args_size > 0) + { + insn = emit_insn (gen_add2_insn + (stack_pointer_rtx, + GEN_INT (crtl->outgoing_args_size))); + RTX_FRAME_RELATED_P (insn) = 1; + } + } + else + frame_size = -1; + + /* If there were outgoing arguments or we've done dynamic stack + allocation, then restore the stack pointer from the frame + pointer. This is at most one insn and more efficient than using + GCC's internal mechanism. */ + if (frame_pointer_needed + && (crtl->outgoing_args_size || cfun->calls_alloca)) + { + insn = emit_insn (gen_add3_insn (stack_pointer_rtx, + hard_frame_pointer_rtx, + GEN_INT (- fp_offset))); + RTX_FRAME_RELATED_P (insn) = 1; + /* As SP is set to (FP - fp_offset), according to the rules in + dwarf2cfi.c:dwarf2out_frame_debug_expr, CFA should be calculated + from the value of SP from now on. */ + cfa_reg = stack_pointer_rtx; + } + + aarch64_save_or_restore_callee_save_registers + (fp_offset + cfun->machine->frame.hardfp_offset, 1); + + /* Restore the frame pointer and lr if the frame pointer is needed. */ + if (offset > 0) + { + if (frame_pointer_needed) + { + rtx mem_fp, mem_lr; + + if (fp_offset) + { + mem_fp = gen_frame_mem (DImode, + plus_constant (Pmode, + stack_pointer_rtx, + fp_offset)); + mem_lr = gen_frame_mem (DImode, + plus_constant (Pmode, + stack_pointer_rtx, + fp_offset + + UNITS_PER_WORD)); + insn = emit_insn (gen_load_pairdi (hard_frame_pointer_rtx, + mem_fp, + gen_rtx_REG (DImode, + LR_REGNUM), + mem_lr)); + } + else + { + insn = emit_insn (gen_loadwb_pairdi_di + (stack_pointer_rtx, + stack_pointer_rtx, + hard_frame_pointer_rtx, + gen_rtx_REG (DImode, LR_REGNUM), + GEN_INT (offset), + GEN_INT (GET_MODE_SIZE (DImode) + offset))); + RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 2)) = 1; + add_reg_note (insn, REG_CFA_ADJUST_CFA, + (gen_rtx_SET (Pmode, stack_pointer_rtx, + plus_constant (Pmode, cfa_reg, + offset)))); + } + + /* The first part of a frame-related parallel insn + is always assumed to be relevant to the frame + calculations; subsequent parts, are only + frame-related if explicitly marked. */ + RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1; + RTX_FRAME_RELATED_P (insn) = 1; + add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx); + add_reg_note (insn, REG_CFA_RESTORE, + gen_rtx_REG (DImode, LR_REGNUM)); + + if (fp_offset) + { + insn = emit_insn (gen_add2_insn (stack_pointer_rtx, + GEN_INT (offset))); + RTX_FRAME_RELATED_P (insn) = 1; + } + } + else + { + insn = emit_insn (gen_add2_insn (stack_pointer_rtx, + GEN_INT (offset))); + RTX_FRAME_RELATED_P (insn) = 1; + } + } + + /* Stack adjustment for exception handler. */ + if (crtl->calls_eh_return) + { + /* We need to unwind the stack by the offset computed by + EH_RETURN_STACKADJ_RTX. However, at this point the CFA is + based on SP. Ideally we would update the SP and define the + CFA along the lines of: + + SP = SP + EH_RETURN_STACKADJ_RTX + (regnote CFA = SP - EH_RETURN_STACKADJ_RTX) + + However the dwarf emitter only understands a constant + register offset. + + The solution chosen here is to use the otherwise unused IP0 + as a temporary register to hold the current SP value. The + CFA is described using IP0 then SP is modified. */ + + rtx ip0 = gen_rtx_REG (DImode, IP0_REGNUM); + + insn = emit_move_insn (ip0, stack_pointer_rtx); + add_reg_note (insn, REG_CFA_DEF_CFA, ip0); + RTX_FRAME_RELATED_P (insn) = 1; + + emit_insn (gen_add2_insn (stack_pointer_rtx, EH_RETURN_STACKADJ_RTX)); + + /* Ensure the assignment to IP0 does not get optimized away. */ + emit_use (ip0); + } + + if (frame_size > -1) + { + if (frame_size >= 0x1000000) + { + rtx op0 = gen_rtx_REG (Pmode, IP0_REGNUM); + emit_move_insn (op0, GEN_INT (frame_size)); + emit_insn (gen_add2_insn (stack_pointer_rtx, op0)); + aarch64_set_frame_expr (gen_rtx_SET + (Pmode, stack_pointer_rtx, + plus_constant (Pmode, + stack_pointer_rtx, + frame_size))); + } + else if (frame_size > 0) + { + if ((frame_size & 0xfff) != 0) + { + insn = emit_insn (gen_add2_insn + (stack_pointer_rtx, + GEN_INT ((frame_size + & (HOST_WIDE_INT) 0xfff)))); + RTX_FRAME_RELATED_P (insn) = 1; + } + if ((frame_size & 0xfff) != frame_size) + { + insn = emit_insn (gen_add2_insn + (stack_pointer_rtx, + GEN_INT ((frame_size + & ~ (HOST_WIDE_INT) 0xfff)))); + RTX_FRAME_RELATED_P (insn) = 1; + } + } + + aarch64_set_frame_expr (gen_rtx_SET (Pmode, stack_pointer_rtx, + plus_constant (Pmode, + stack_pointer_rtx, + offset))); + } + + emit_use (gen_rtx_REG (DImode, LR_REGNUM)); + if (!for_sibcall) + emit_jump_insn (ret_rtx); +} + +/* Return the place to copy the exception unwinding return address to. + This will probably be a stack slot, but could (in theory be the + return register). */ +rtx +aarch64_final_eh_return_addr (void) +{ + HOST_WIDE_INT original_frame_size, frame_size, offset, fp_offset; + aarch64_layout_frame (); + original_frame_size = get_frame_size () + cfun->machine->saved_varargs_size; + frame_size = (original_frame_size + cfun->machine->frame.saved_regs_size + + crtl->outgoing_args_size); + offset = frame_size = AARCH64_ROUND_UP (frame_size, + STACK_BOUNDARY / BITS_PER_UNIT); + fp_offset = offset + - original_frame_size + - cfun->machine->frame.saved_regs_size; + + if (cfun->machine->frame.reg_offset[LR_REGNUM] < 0) + return gen_rtx_REG (DImode, LR_REGNUM); + + /* DSE and CSELIB do not detect an alias between sp+k1 and fp+k2. This can + result in a store to save LR introduced by builtin_eh_return () being + incorrectly deleted because the alias is not detected. + So in the calculation of the address to copy the exception unwinding + return address to, we note 2 cases. + If FP is needed and the fp_offset is 0, it means that SP = FP and hence + we return a SP-relative location since all the addresses are SP-relative + in this case. This prevents the store from being optimized away. + If the fp_offset is not 0, then the addresses will be FP-relative and + therefore we return a FP-relative location. */ + + if (frame_pointer_needed) + { + if (fp_offset) + return gen_frame_mem (DImode, + plus_constant (Pmode, hard_frame_pointer_rtx, UNITS_PER_WORD)); + else + return gen_frame_mem (DImode, + plus_constant (Pmode, stack_pointer_rtx, UNITS_PER_WORD)); + } + + /* If FP is not needed, we calculate the location of LR, which would be + at the top of the saved registers block. */ + + return gen_frame_mem (DImode, + plus_constant (Pmode, + stack_pointer_rtx, + fp_offset + + cfun->machine->frame.saved_regs_size + - 2 * UNITS_PER_WORD)); +} + +/* Output code to build up a constant in a register. */ +static void +aarch64_build_constant (int regnum, HOST_WIDE_INT val) +{ + if (aarch64_bitmask_imm (val, DImode)) + emit_move_insn (gen_rtx_REG (Pmode, regnum), GEN_INT (val)); + else + { + int i; + int ncount = 0; + int zcount = 0; + HOST_WIDE_INT valp = val >> 16; + HOST_WIDE_INT valm; + HOST_WIDE_INT tval; + + for (i = 16; i < 64; i += 16) + { + valm = (valp & 0xffff); + + if (valm != 0) + ++ zcount; + + if (valm != 0xffff) + ++ ncount; + + valp >>= 16; + } + + /* zcount contains the number of additional MOVK instructions + required if the constant is built up with an initial MOVZ instruction, + while ncount is the number of MOVK instructions required if starting + with a MOVN instruction. Choose the sequence that yields the fewest + number of instructions, preferring MOVZ instructions when they are both + the same. */ + if (ncount < zcount) + { + emit_move_insn (gen_rtx_REG (Pmode, regnum), + GEN_INT (val | ~(HOST_WIDE_INT) 0xffff)); + tval = 0xffff; + } + else + { + emit_move_insn (gen_rtx_REG (Pmode, regnum), + GEN_INT (val & 0xffff)); + tval = 0; + } + + val >>= 16; + + for (i = 16; i < 64; i += 16) + { + if ((val & 0xffff) != tval) + emit_insn (gen_insv_immdi (gen_rtx_REG (Pmode, regnum), + GEN_INT (i), GEN_INT (val & 0xffff))); + val >>= 16; + } + } +} + +static void +aarch64_add_constant (int regnum, int scratchreg, HOST_WIDE_INT delta) +{ + HOST_WIDE_INT mdelta = delta; + rtx this_rtx = gen_rtx_REG (Pmode, regnum); + rtx scratch_rtx = gen_rtx_REG (Pmode, scratchreg); + + if (mdelta < 0) + mdelta = -mdelta; + + if (mdelta >= 4096 * 4096) + { + aarch64_build_constant (scratchreg, delta); + emit_insn (gen_add3_insn (this_rtx, this_rtx, scratch_rtx)); + } + else if (mdelta > 0) + { + if (mdelta >= 4096) + { + emit_insn (gen_rtx_SET (Pmode, scratch_rtx, GEN_INT (mdelta / 4096))); + rtx shift = gen_rtx_ASHIFT (Pmode, scratch_rtx, GEN_INT (12)); + if (delta < 0) + emit_insn (gen_rtx_SET (Pmode, this_rtx, + gen_rtx_MINUS (Pmode, this_rtx, shift))); + else + emit_insn (gen_rtx_SET (Pmode, this_rtx, + gen_rtx_PLUS (Pmode, this_rtx, shift))); + } + if (mdelta % 4096 != 0) + { + scratch_rtx = GEN_INT ((delta < 0 ? -1 : 1) * (mdelta % 4096)); + emit_insn (gen_rtx_SET (Pmode, this_rtx, + gen_rtx_PLUS (Pmode, this_rtx, scratch_rtx))); + } + } +} + +/* Output code to add DELTA to the first argument, and then jump + to FUNCTION. Used for C++ multiple inheritance. */ +static void +aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED, + HOST_WIDE_INT delta, + HOST_WIDE_INT vcall_offset, + tree function) +{ + /* The this pointer is always in x0. Note that this differs from + Arm where the this pointer maybe bumped to r1 if r0 is required + to return a pointer to an aggregate. On AArch64 a result value + pointer will be in x8. */ + int this_regno = R0_REGNUM; + rtx this_rtx, temp0, temp1, addr, insn, funexp; + + reload_completed = 1; + emit_note (NOTE_INSN_PROLOGUE_END); + + if (vcall_offset == 0) + aarch64_add_constant (this_regno, IP1_REGNUM, delta); + else + { + gcc_assert ((vcall_offset & (POINTER_BYTES - 1)) == 0); + + this_rtx = gen_rtx_REG (Pmode, this_regno); + temp0 = gen_rtx_REG (Pmode, IP0_REGNUM); + temp1 = gen_rtx_REG (Pmode, IP1_REGNUM); + + addr = this_rtx; + if (delta != 0) + { + if (delta >= -256 && delta < 256) + addr = gen_rtx_PRE_MODIFY (Pmode, this_rtx, + plus_constant (Pmode, this_rtx, delta)); + else + aarch64_add_constant (this_regno, IP1_REGNUM, delta); + } + + if (Pmode == ptr_mode) + aarch64_emit_move (temp0, gen_rtx_MEM (ptr_mode, addr)); + else + aarch64_emit_move (temp0, + gen_rtx_ZERO_EXTEND (Pmode, + gen_rtx_MEM (ptr_mode, addr))); + + if (vcall_offset >= -256 && vcall_offset < 4096 * POINTER_BYTES) + addr = plus_constant (Pmode, temp0, vcall_offset); + else + { + aarch64_build_constant (IP1_REGNUM, vcall_offset); + addr = gen_rtx_PLUS (Pmode, temp0, temp1); + } + + if (Pmode == ptr_mode) + aarch64_emit_move (temp1, gen_rtx_MEM (ptr_mode,addr)); + else + aarch64_emit_move (temp1, + gen_rtx_SIGN_EXTEND (Pmode, + gen_rtx_MEM (ptr_mode, addr))); + + emit_insn (gen_add2_insn (this_rtx, temp1)); + } + + /* Generate a tail call to the target function. */ + if (!TREE_USED (function)) + { + assemble_external (function); + TREE_USED (function) = 1; + } + funexp = XEXP (DECL_RTL (function), 0); + funexp = gen_rtx_MEM (FUNCTION_MODE, funexp); + insn = emit_call_insn (gen_sibcall (funexp, const0_rtx, NULL_RTX)); + SIBLING_CALL_P (insn) = 1; + + insn = get_insns (); + shorten_branches (insn); + final_start_function (insn, file, 1); + final (insn, file, 1); + final_end_function (); + + /* Stop pretending to be a post-reload pass. */ + reload_completed = 0; +} + +static int +aarch64_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED) +{ + if (GET_CODE (*x) == SYMBOL_REF) + return SYMBOL_REF_TLS_MODEL (*x) != 0; + + /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are + TLS offsets, not real symbol references. */ + if (GET_CODE (*x) == UNSPEC + && XINT (*x, 1) == UNSPEC_TLS) + return -1; + + return 0; +} + +static bool +aarch64_tls_referenced_p (rtx x) +{ + if (!TARGET_HAVE_TLS) + return false; + + return for_each_rtx (&x, aarch64_tls_operand_p_1, NULL); +} + + +static int +aarch64_bitmasks_cmp (const void *i1, const void *i2) +{ + const unsigned HOST_WIDE_INT *imm1 = (const unsigned HOST_WIDE_INT *) i1; + const unsigned HOST_WIDE_INT *imm2 = (const unsigned HOST_WIDE_INT *) i2; + + if (*imm1 < *imm2) + return -1; + if (*imm1 > *imm2) + return +1; + return 0; +} + + +static void +aarch64_build_bitmask_table (void) +{ + unsigned HOST_WIDE_INT mask, imm; + unsigned int log_e, e, s, r; + unsigned int nimms = 0; + + for (log_e = 1; log_e <= 6; log_e++) + { + e = 1 << log_e; + if (e == 64) + mask = ~(HOST_WIDE_INT) 0; + else + mask = ((HOST_WIDE_INT) 1 << e) - 1; + for (s = 1; s < e; s++) + { + for (r = 0; r < e; r++) + { + /* set s consecutive bits to 1 (s < 64) */ + imm = ((unsigned HOST_WIDE_INT)1 << s) - 1; + /* rotate right by r */ + if (r != 0) + imm = ((imm >> r) | (imm << (e - r))) & mask; + /* replicate the constant depending on SIMD size */ + switch (log_e) { + case 1: imm |= (imm << 2); + case 2: imm |= (imm << 4); + case 3: imm |= (imm << 8); + case 4: imm |= (imm << 16); + case 5: imm |= (imm << 32); + case 6: + break; + default: + gcc_unreachable (); + } + gcc_assert (nimms < AARCH64_NUM_BITMASKS); + aarch64_bitmasks[nimms++] = imm; + } + } + } + + gcc_assert (nimms == AARCH64_NUM_BITMASKS); + qsort (aarch64_bitmasks, nimms, sizeof (aarch64_bitmasks[0]), + aarch64_bitmasks_cmp); +} + + +/* Return true if val can be encoded as a 12-bit unsigned immediate with + a left shift of 0 or 12 bits. */ +bool +aarch64_uimm12_shift (HOST_WIDE_INT val) +{ + return ((val & (((HOST_WIDE_INT) 0xfff) << 0)) == val + || (val & (((HOST_WIDE_INT) 0xfff) << 12)) == val + ); +} + + +/* Return true if val is an immediate that can be loaded into a + register by a MOVZ instruction. */ +static bool +aarch64_movw_imm (HOST_WIDE_INT val, enum machine_mode mode) +{ + if (GET_MODE_SIZE (mode) > 4) + { + if ((val & (((HOST_WIDE_INT) 0xffff) << 32)) == val + || (val & (((HOST_WIDE_INT) 0xffff) << 48)) == val) + return 1; + } + else + { + /* Ignore sign extension. */ + val &= (HOST_WIDE_INT) 0xffffffff; + } + return ((val & (((HOST_WIDE_INT) 0xffff) << 0)) == val + || (val & (((HOST_WIDE_INT) 0xffff) << 16)) == val); +} + + +/* Return true if val is a valid bitmask immediate. */ +bool +aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode mode) +{ + if (GET_MODE_SIZE (mode) < 8) + { + /* Replicate bit pattern. */ + val &= (HOST_WIDE_INT) 0xffffffff; + val |= val << 32; + } + return bsearch (&val, aarch64_bitmasks, AARCH64_NUM_BITMASKS, + sizeof (aarch64_bitmasks[0]), aarch64_bitmasks_cmp) != NULL; +} + + +/* Return true if val is an immediate that can be loaded into a + register in a single instruction. */ +bool +aarch64_move_imm (HOST_WIDE_INT val, enum machine_mode mode) +{ + if (aarch64_movw_imm (val, mode) || aarch64_movw_imm (~val, mode)) + return 1; + return aarch64_bitmask_imm (val, mode); +} + +static bool +aarch64_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x) +{ + rtx base, offset; + + if (GET_CODE (x) == HIGH) + return true; + + split_const (x, &base, &offset); + if (GET_CODE (base) == SYMBOL_REF || GET_CODE (base) == LABEL_REF) + { + if (aarch64_classify_symbol (base, SYMBOL_CONTEXT_ADR) + != SYMBOL_FORCE_TO_MEM) + return true; + else + /* Avoid generating a 64-bit relocation in ILP32; leave + to aarch64_expand_mov_immediate to handle it properly. */ + return mode != ptr_mode; + } + + return aarch64_tls_referenced_p (x); +} + +/* Return true if register REGNO is a valid index register. + STRICT_P is true if REG_OK_STRICT is in effect. */ + +bool +aarch64_regno_ok_for_index_p (int regno, bool strict_p) +{ + if (!HARD_REGISTER_NUM_P (regno)) + { + if (!strict_p) + return true; + + if (!reg_renumber) + return false; + + regno = reg_renumber[regno]; + } + return GP_REGNUM_P (regno); +} + +/* Return true if register REGNO is a valid base register for mode MODE. + STRICT_P is true if REG_OK_STRICT is in effect. */ + +bool +aarch64_regno_ok_for_base_p (int regno, bool strict_p) +{ + if (!HARD_REGISTER_NUM_P (regno)) + { + if (!strict_p) + return true; + + if (!reg_renumber) + return false; + + regno = reg_renumber[regno]; + } + + /* The fake registers will be eliminated to either the stack or + hard frame pointer, both of which are usually valid base registers. + Reload deals with the cases where the eliminated form isn't valid. */ + return (GP_REGNUM_P (regno) + || regno == SP_REGNUM + || regno == FRAME_POINTER_REGNUM + || regno == ARG_POINTER_REGNUM); +} + +/* Return true if X is a valid base register for mode MODE. + STRICT_P is true if REG_OK_STRICT is in effect. */ + +static bool +aarch64_base_register_rtx_p (rtx x, bool strict_p) +{ + if (!strict_p && GET_CODE (x) == SUBREG) + x = SUBREG_REG (x); + + return (REG_P (x) && aarch64_regno_ok_for_base_p (REGNO (x), strict_p)); +} + +/* Return true if address offset is a valid index. If it is, fill in INFO + appropriately. STRICT_P is true if REG_OK_STRICT is in effect. */ + +static bool +aarch64_classify_index (struct aarch64_address_info *info, rtx x, + enum machine_mode mode, bool strict_p) +{ + enum aarch64_address_type type; + rtx index; + int shift; + + /* (reg:P) */ + if ((REG_P (x) || GET_CODE (x) == SUBREG) + && GET_MODE (x) == Pmode) + { + type = ADDRESS_REG_REG; + index = x; + shift = 0; + } + /* (sign_extend:DI (reg:SI)) */ + else if ((GET_CODE (x) == SIGN_EXTEND + || GET_CODE (x) == ZERO_EXTEND) + && GET_MODE (x) == DImode + && GET_MODE (XEXP (x, 0)) == SImode) + { + type = (GET_CODE (x) == SIGN_EXTEND) + ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW; + index = XEXP (x, 0); + shift = 0; + } + /* (mult:DI (sign_extend:DI (reg:SI)) (const_int scale)) */ + else if (GET_CODE (x) == MULT + && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND + || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND) + && GET_MODE (XEXP (x, 0)) == DImode + && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode + && CONST_INT_P (XEXP (x, 1))) + { + type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) + ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW; + index = XEXP (XEXP (x, 0), 0); + shift = exact_log2 (INTVAL (XEXP (x, 1))); + } + /* (ashift:DI (sign_extend:DI (reg:SI)) (const_int shift)) */ + else if (GET_CODE (x) == ASHIFT + && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND + || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND) + && GET_MODE (XEXP (x, 0)) == DImode + && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode + && CONST_INT_P (XEXP (x, 1))) + { + type = (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) + ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW; + index = XEXP (XEXP (x, 0), 0); + shift = INTVAL (XEXP (x, 1)); + } + /* (sign_extract:DI (mult:DI (reg:DI) (const_int scale)) 32+shift 0) */ + else if ((GET_CODE (x) == SIGN_EXTRACT + || GET_CODE (x) == ZERO_EXTRACT) + && GET_MODE (x) == DImode + && GET_CODE (XEXP (x, 0)) == MULT + && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode + && CONST_INT_P (XEXP (XEXP (x, 0), 1))) + { + type = (GET_CODE (x) == SIGN_EXTRACT) + ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW; + index = XEXP (XEXP (x, 0), 0); + shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1))); + if (INTVAL (XEXP (x, 1)) != 32 + shift + || INTVAL (XEXP (x, 2)) != 0) + shift = -1; + } + /* (and:DI (mult:DI (reg:DI) (const_int scale)) + (const_int 0xffffffff<<shift)) */ + else if (GET_CODE (x) == AND + && GET_MODE (x) == DImode + && GET_CODE (XEXP (x, 0)) == MULT + && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode + && CONST_INT_P (XEXP (XEXP (x, 0), 1)) + && CONST_INT_P (XEXP (x, 1))) + { + type = ADDRESS_REG_UXTW; + index = XEXP (XEXP (x, 0), 0); + shift = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1))); + if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift) + shift = -1; + } + /* (sign_extract:DI (ashift:DI (reg:DI) (const_int shift)) 32+shift 0) */ + else if ((GET_CODE (x) == SIGN_EXTRACT + || GET_CODE (x) == ZERO_EXTRACT) + && GET_MODE (x) == DImode + && GET_CODE (XEXP (x, 0)) == ASHIFT + && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode + && CONST_INT_P (XEXP (XEXP (x, 0), 1))) + { + type = (GET_CODE (x) == SIGN_EXTRACT) + ? ADDRESS_REG_SXTW : ADDRESS_REG_UXTW; + index = XEXP (XEXP (x, 0), 0); + shift = INTVAL (XEXP (XEXP (x, 0), 1)); + if (INTVAL (XEXP (x, 1)) != 32 + shift + || INTVAL (XEXP (x, 2)) != 0) + shift = -1; + } + /* (and:DI (ashift:DI (reg:DI) (const_int shift)) + (const_int 0xffffffff<<shift)) */ + else if (GET_CODE (x) == AND + && GET_MODE (x) == DImode + && GET_CODE (XEXP (x, 0)) == ASHIFT + && GET_MODE (XEXP (XEXP (x, 0), 0)) == DImode + && CONST_INT_P (XEXP (XEXP (x, 0), 1)) + && CONST_INT_P (XEXP (x, 1))) + { + type = ADDRESS_REG_UXTW; + index = XEXP (XEXP (x, 0), 0); + shift = INTVAL (XEXP (XEXP (x, 0), 1)); + if (INTVAL (XEXP (x, 1)) != (HOST_WIDE_INT)0xffffffff << shift) + shift = -1; + } + /* (mult:P (reg:P) (const_int scale)) */ + else if (GET_CODE (x) == MULT + && GET_MODE (x) == Pmode + && GET_MODE (XEXP (x, 0)) == Pmode + && CONST_INT_P (XEXP (x, 1))) + { + type = ADDRESS_REG_REG; + index = XEXP (x, 0); + shift = exact_log2 (INTVAL (XEXP (x, 1))); + } + /* (ashift:P (reg:P) (const_int shift)) */ + else if (GET_CODE (x) == ASHIFT + && GET_MODE (x) == Pmode + && GET_MODE (XEXP (x, 0)) == Pmode + && CONST_INT_P (XEXP (x, 1))) + { + type = ADDRESS_REG_REG; + index = XEXP (x, 0); + shift = INTVAL (XEXP (x, 1)); + } + else + return false; + + if (GET_CODE (index) == SUBREG) + index = SUBREG_REG (index); + + if ((shift == 0 || + (shift > 0 && shift <= 3 + && (1 << shift) == GET_MODE_SIZE (mode))) + && REG_P (index) + && aarch64_regno_ok_for_index_p (REGNO (index), strict_p)) + { + info->type = type; + info->offset = index; + info->shift = shift; + return true; + } + + return false; +} + +static inline bool +offset_7bit_signed_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset) +{ + return (offset >= -64 * GET_MODE_SIZE (mode) + && offset < 64 * GET_MODE_SIZE (mode) + && offset % GET_MODE_SIZE (mode) == 0); +} + +static inline bool +offset_9bit_signed_unscaled_p (enum machine_mode mode ATTRIBUTE_UNUSED, + HOST_WIDE_INT offset) +{ + return offset >= -256 && offset < 256; +} + +static inline bool +offset_12bit_unsigned_scaled_p (enum machine_mode mode, HOST_WIDE_INT offset) +{ + return (offset >= 0 + && offset < 4096 * GET_MODE_SIZE (mode) + && offset % GET_MODE_SIZE (mode) == 0); +} + +/* Return true if X is a valid address for machine mode MODE. If it is, + fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in + effect. OUTER_CODE is PARALLEL for a load/store pair. */ + +static bool +aarch64_classify_address (struct aarch64_address_info *info, + rtx x, enum machine_mode mode, + RTX_CODE outer_code, bool strict_p) +{ + enum rtx_code code = GET_CODE (x); + rtx op0, op1; + bool allow_reg_index_p = + outer_code != PARALLEL && GET_MODE_SIZE(mode) != 16; + + /* Don't support anything other than POST_INC or REG addressing for + AdvSIMD. */ + if (aarch64_vector_mode_p (mode) + && (code != POST_INC && code != REG)) + return false; + + switch (code) + { + case REG: + case SUBREG: + info->type = ADDRESS_REG_IMM; + info->base = x; + info->offset = const0_rtx; + return aarch64_base_register_rtx_p (x, strict_p); + + case PLUS: + op0 = XEXP (x, 0); + op1 = XEXP (x, 1); + if (GET_MODE_SIZE (mode) != 0 + && CONST_INT_P (op1) + && aarch64_base_register_rtx_p (op0, strict_p)) + { + HOST_WIDE_INT offset = INTVAL (op1); + + info->type = ADDRESS_REG_IMM; + info->base = op0; + info->offset = op1; + + /* TImode and TFmode values are allowed in both pairs of X + registers and individual Q registers. The available + address modes are: + X,X: 7-bit signed scaled offset + Q: 9-bit signed offset + We conservatively require an offset representable in either mode. + */ + if (mode == TImode || mode == TFmode) + return (offset_7bit_signed_scaled_p (mode, offset) + && offset_9bit_signed_unscaled_p (mode, offset)); + + if (outer_code == PARALLEL) + return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8) + && offset_7bit_signed_scaled_p (mode, offset)); + else + return (offset_9bit_signed_unscaled_p (mode, offset) + || offset_12bit_unsigned_scaled_p (mode, offset)); + } + + if (allow_reg_index_p) + { + /* Look for base + (scaled/extended) index register. */ + if (aarch64_base_register_rtx_p (op0, strict_p) + && aarch64_classify_index (info, op1, mode, strict_p)) + { + info->base = op0; + return true; + } + if (aarch64_base_register_rtx_p (op1, strict_p) + && aarch64_classify_index (info, op0, mode, strict_p)) + { + info->base = op1; + return true; + } + } + + return false; + + case POST_INC: + case POST_DEC: + case PRE_INC: + case PRE_DEC: + info->type = ADDRESS_REG_WB; + info->base = XEXP (x, 0); + info->offset = NULL_RTX; + return aarch64_base_register_rtx_p (info->base, strict_p); + + case POST_MODIFY: + case PRE_MODIFY: + info->type = ADDRESS_REG_WB; + info->base = XEXP (x, 0); + if (GET_CODE (XEXP (x, 1)) == PLUS + && CONST_INT_P (XEXP (XEXP (x, 1), 1)) + && rtx_equal_p (XEXP (XEXP (x, 1), 0), info->base) + && aarch64_base_register_rtx_p (info->base, strict_p)) + { + HOST_WIDE_INT offset; + info->offset = XEXP (XEXP (x, 1), 1); + offset = INTVAL (info->offset); + + /* TImode and TFmode values are allowed in both pairs of X + registers and individual Q registers. The available + address modes are: + X,X: 7-bit signed scaled offset + Q: 9-bit signed offset + We conservatively require an offset representable in either mode. + */ + if (mode == TImode || mode == TFmode) + return (offset_7bit_signed_scaled_p (mode, offset) + && offset_9bit_signed_unscaled_p (mode, offset)); + + if (outer_code == PARALLEL) + return ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8) + && offset_7bit_signed_scaled_p (mode, offset)); + else + return offset_9bit_signed_unscaled_p (mode, offset); + } + return false; + + case CONST: + case SYMBOL_REF: + case LABEL_REF: + /* load literal: pc-relative constant pool entry. Only supported + for SI mode or larger. */ + info->type = ADDRESS_SYMBOLIC; + if (outer_code != PARALLEL && GET_MODE_SIZE (mode) >= 4) + { + rtx sym, addend; + + split_const (x, &sym, &addend); + return (GET_CODE (sym) == LABEL_REF + || (GET_CODE (sym) == SYMBOL_REF + && CONSTANT_POOL_ADDRESS_P (sym))); + } + return false; + + case LO_SUM: + info->type = ADDRESS_LO_SUM; + info->base = XEXP (x, 0); + info->offset = XEXP (x, 1); + if (allow_reg_index_p + && aarch64_base_register_rtx_p (info->base, strict_p)) + { + rtx sym, offs; + split_const (info->offset, &sym, &offs); + if (GET_CODE (sym) == SYMBOL_REF + && (aarch64_classify_symbol (sym, SYMBOL_CONTEXT_MEM) + == SYMBOL_SMALL_ABSOLUTE)) + { + /* The symbol and offset must be aligned to the access size. */ + unsigned int align; + unsigned int ref_size; + + if (CONSTANT_POOL_ADDRESS_P (sym)) + align = GET_MODE_ALIGNMENT (get_pool_mode (sym)); + else if (TREE_CONSTANT_POOL_ADDRESS_P (sym)) + { + tree exp = SYMBOL_REF_DECL (sym); + align = TYPE_ALIGN (TREE_TYPE (exp)); + align = CONSTANT_ALIGNMENT (exp, align); + } + else if (SYMBOL_REF_DECL (sym)) + align = DECL_ALIGN (SYMBOL_REF_DECL (sym)); + else + align = BITS_PER_UNIT; + + ref_size = GET_MODE_SIZE (mode); + if (ref_size == 0) + ref_size = GET_MODE_SIZE (DImode); + + return ((INTVAL (offs) & (ref_size - 1)) == 0 + && ((align / BITS_PER_UNIT) & (ref_size - 1)) == 0); + } + } + return false; + + default: + return false; + } +} + +bool +aarch64_symbolic_address_p (rtx x) +{ + rtx offset; + + split_const (x, &x, &offset); + return GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF; +} + +/* Classify the base of symbolic expression X, given that X appears in + context CONTEXT. */ + +enum aarch64_symbol_type +aarch64_classify_symbolic_expression (rtx x, + enum aarch64_symbol_context context) +{ + rtx offset; + + split_const (x, &x, &offset); + return aarch64_classify_symbol (x, context); +} + + +/* Return TRUE if X is a legitimate address for accessing memory in + mode MODE. */ +static bool +aarch64_legitimate_address_hook_p (enum machine_mode mode, rtx x, bool strict_p) +{ + struct aarch64_address_info addr; + + return aarch64_classify_address (&addr, x, mode, MEM, strict_p); +} + +/* Return TRUE if X is a legitimate address for accessing memory in + mode MODE. OUTER_CODE will be PARALLEL if this is a load/store + pair operation. */ +bool +aarch64_legitimate_address_p (enum machine_mode mode, rtx x, + RTX_CODE outer_code, bool strict_p) +{ + struct aarch64_address_info addr; + + return aarch64_classify_address (&addr, x, mode, outer_code, strict_p); +} + +/* Return TRUE if rtx X is immediate constant 0.0 */ +bool +aarch64_float_const_zero_rtx_p (rtx x) +{ + REAL_VALUE_TYPE r; + + if (GET_MODE (x) == VOIDmode) + return false; + + REAL_VALUE_FROM_CONST_DOUBLE (r, x); + if (REAL_VALUE_MINUS_ZERO (r)) + return !HONOR_SIGNED_ZEROS (GET_MODE (x)); + return REAL_VALUES_EQUAL (r, dconst0); +} + +/* Return the fixed registers used for condition codes. */ + +static bool +aarch64_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2) +{ + *p1 = CC_REGNUM; + *p2 = INVALID_REGNUM; + return true; +} + +enum machine_mode +aarch64_select_cc_mode (RTX_CODE code, rtx x, rtx y) +{ + /* All floating point compares return CCFP if it is an equality + comparison, and CCFPE otherwise. */ + if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT) + { + switch (code) + { + case EQ: + case NE: + case UNORDERED: + case ORDERED: + case UNLT: + case UNLE: + case UNGT: + case UNGE: + case UNEQ: + case LTGT: + return CCFPmode; + + case LT: + case LE: + case GT: + case GE: + return CCFPEmode; + + default: + gcc_unreachable (); + } + } + + if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode) + && y == const0_rtx + && (code == EQ || code == NE || code == LT || code == GE) + && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS || GET_CODE (x) == AND + || GET_CODE (x) == NEG)) + return CC_NZmode; + + /* A compare with a shifted operand. Because of canonicalization, + the comparison will have to be swapped when we emit the assembly + code. */ + if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode) + && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG) + && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT + || GET_CODE (x) == LSHIFTRT + || GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)) + return CC_SWPmode; + + /* Similarly for a negated operand, but we can only do this for + equalities. */ + if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode) + && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG) + && (code == EQ || code == NE) + && GET_CODE (x) == NEG) + return CC_Zmode; + + /* A compare of a mode narrower than SI mode against zero can be done + by extending the value in the comparison. */ + if ((GET_MODE (x) == QImode || GET_MODE (x) == HImode) + && y == const0_rtx) + /* Only use sign-extension if we really need it. */ + return ((code == GT || code == GE || code == LE || code == LT) + ? CC_SESWPmode : CC_ZESWPmode); + + /* For everything else, return CCmode. */ + return CCmode; +} + +static unsigned +aarch64_get_condition_code (rtx x) +{ + enum machine_mode mode = GET_MODE (XEXP (x, 0)); + enum rtx_code comp_code = GET_CODE (x); + + if (GET_MODE_CLASS (mode) != MODE_CC) + mode = SELECT_CC_MODE (comp_code, XEXP (x, 0), XEXP (x, 1)); + + switch (mode) + { + case CCFPmode: + case CCFPEmode: + switch (comp_code) + { + case GE: return AARCH64_GE; + case GT: return AARCH64_GT; + case LE: return AARCH64_LS; + case LT: return AARCH64_MI; + case NE: return AARCH64_NE; + case EQ: return AARCH64_EQ; + case ORDERED: return AARCH64_VC; + case UNORDERED: return AARCH64_VS; + case UNLT: return AARCH64_LT; + case UNLE: return AARCH64_LE; + case UNGT: return AARCH64_HI; + case UNGE: return AARCH64_PL; + default: gcc_unreachable (); + } + break; + + case CCmode: + switch (comp_code) + { + case NE: return AARCH64_NE; + case EQ: return AARCH64_EQ; + case GE: return AARCH64_GE; + case GT: return AARCH64_GT; + case LE: return AARCH64_LE; + case LT: return AARCH64_LT; + case GEU: return AARCH64_CS; + case GTU: return AARCH64_HI; + case LEU: return AARCH64_LS; + case LTU: return AARCH64_CC; + default: gcc_unreachable (); + } + break; + + case CC_SWPmode: + case CC_ZESWPmode: + case CC_SESWPmode: + switch (comp_code) + { + case NE: return AARCH64_NE; + case EQ: return AARCH64_EQ; + case GE: return AARCH64_LE; + case GT: return AARCH64_LT; + case LE: return AARCH64_GE; + case LT: return AARCH64_GT; + case GEU: return AARCH64_LS; + case GTU: return AARCH64_CC; + case LEU: return AARCH64_CS; + case LTU: return AARCH64_HI; + default: gcc_unreachable (); + } + break; + + case CC_NZmode: + switch (comp_code) + { + case NE: return AARCH64_NE; + case EQ: return AARCH64_EQ; + case GE: return AARCH64_PL; + case LT: return AARCH64_MI; + default: gcc_unreachable (); + } + break; + + case CC_Zmode: + switch (comp_code) + { + case NE: return AARCH64_NE; + case EQ: return AARCH64_EQ; + default: gcc_unreachable (); + } + break; + + default: + gcc_unreachable (); + break; + } +} + +static unsigned +bit_count (unsigned HOST_WIDE_INT value) +{ + unsigned count = 0; + + while (value) + { + count++; + value &= value - 1; + } + + return count; +} + +void +aarch64_print_operand (FILE *f, rtx x, char code) +{ + switch (code) + { + /* An integer or symbol address without a preceding # sign. */ + case 'c': + switch (GET_CODE (x)) + { + case CONST_INT: + fprintf (f, HOST_WIDE_INT_PRINT_DEC, INTVAL (x)); + break; + + case SYMBOL_REF: + output_addr_const (f, x); + break; + + case CONST: + if (GET_CODE (XEXP (x, 0)) == PLUS + && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF) + { + output_addr_const (f, x); + break; + } + /* Fall through. */ + + default: + output_operand_lossage ("Unsupported operand for code '%c'", code); + } + break; + + case 'e': + /* Print the sign/zero-extend size as a character 8->b, 16->h, 32->w. */ + { + int n; + + if (GET_CODE (x) != CONST_INT + || (n = exact_log2 (INTVAL (x) & ~7)) <= 0) + { + output_operand_lossage ("invalid operand for '%%%c'", code); + return; + } + + switch (n) + { + case 3: + fputc ('b', f); + break; + case 4: + fputc ('h', f); + break; + case 5: + fputc ('w', f); + break; + default: + output_operand_lossage ("invalid operand for '%%%c'", code); + return; + } + } + break; + + case 'p': + { + int n; + + /* Print N such that 2^N == X. */ + if (GET_CODE (x) != CONST_INT || (n = exact_log2 (INTVAL (x))) < 0) + { + output_operand_lossage ("invalid operand for '%%%c'", code); + return; + } + + asm_fprintf (f, "%d", n); + } + break; + + case 'P': + /* Print the number of non-zero bits in X (a const_int). */ + if (GET_CODE (x) != CONST_INT) + { + output_operand_lossage ("invalid operand for '%%%c'", code); + return; + } + + asm_fprintf (f, "%u", bit_count (INTVAL (x))); + break; + + case 'H': + /* Print the higher numbered register of a pair (TImode) of regs. */ + if (GET_CODE (x) != REG || !GP_REGNUM_P (REGNO (x) + 1)) + { + output_operand_lossage ("invalid operand for '%%%c'", code); + return; + } + + asm_fprintf (f, "%s", reg_names [REGNO (x) + 1]); + break; + + case 'm': + /* Print a condition (eq, ne, etc). */ + + /* CONST_TRUE_RTX means always -- that's the default. */ + if (x == const_true_rtx) + return; + + if (!COMPARISON_P (x)) + { + output_operand_lossage ("invalid operand for '%%%c'", code); + return; + } + + fputs (aarch64_condition_codes[aarch64_get_condition_code (x)], f); + break; + + case 'M': + /* Print the inverse of a condition (eq <-> ne, etc). */ + + /* CONST_TRUE_RTX means never -- that's the default. */ + if (x == const_true_rtx) + { + fputs ("nv", f); + return; + } + + if (!COMPARISON_P (x)) + { + output_operand_lossage ("invalid operand for '%%%c'", code); + return; + } + + fputs (aarch64_condition_codes[AARCH64_INVERSE_CONDITION_CODE + (aarch64_get_condition_code (x))], f); + break; + + case 'b': + case 'h': + case 's': + case 'd': + case 'q': + /* Print a scalar FP/SIMD register name. */ + if (!REG_P (x) || !FP_REGNUM_P (REGNO (x))) + { + output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code); + return; + } + asm_fprintf (f, "%c%d", code, REGNO (x) - V0_REGNUM); + break; + + case 'S': + case 'T': + case 'U': + case 'V': + /* Print the first FP/SIMD register name in a list. */ + if (!REG_P (x) || !FP_REGNUM_P (REGNO (x))) + { + output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code); + return; + } + asm_fprintf (f, "v%d", REGNO (x) - V0_REGNUM + (code - 'S')); + break; + + case 'X': + /* Print bottom 16 bits of integer constant in hex. */ + if (GET_CODE (x) != CONST_INT) + { + output_operand_lossage ("invalid operand for '%%%c'", code); + return; + } + asm_fprintf (f, "0x%wx", UINTVAL (x) & 0xffff); + break; + + case 'w': + case 'x': + /* Print a general register name or the zero register (32-bit or + 64-bit). */ + if (x == const0_rtx + || (CONST_DOUBLE_P (x) && aarch64_float_const_zero_rtx_p (x))) + { + asm_fprintf (f, "%czr", code); + break; + } + + if (REG_P (x) && GP_REGNUM_P (REGNO (x))) + { + asm_fprintf (f, "%c%d", code, REGNO (x) - R0_REGNUM); + break; + } + + if (REG_P (x) && REGNO (x) == SP_REGNUM) + { + asm_fprintf (f, "%ssp", code == 'w' ? "w" : ""); + break; + } + + /* Fall through */ + + case 0: + /* Print a normal operand, if it's a general register, then we + assume DImode. */ + if (x == NULL) + { + output_operand_lossage ("missing operand"); + return; + } + + switch (GET_CODE (x)) + { + case REG: + asm_fprintf (f, "%s", reg_names [REGNO (x)]); + break; + + case MEM: + aarch64_memory_reference_mode = GET_MODE (x); + output_address (XEXP (x, 0)); + break; + + case LABEL_REF: + case SYMBOL_REF: + output_addr_const (asm_out_file, x); + break; + + case CONST_INT: + asm_fprintf (f, "%wd", INTVAL (x)); + break; + + case CONST_VECTOR: + if (GET_MODE_CLASS (GET_MODE (x)) == MODE_VECTOR_INT) + { + gcc_assert (aarch64_const_vec_all_same_int_p (x, + HOST_WIDE_INT_MIN, + HOST_WIDE_INT_MAX)); + asm_fprintf (f, "%wd", INTVAL (CONST_VECTOR_ELT (x, 0))); + } + else if (aarch64_simd_imm_zero_p (x, GET_MODE (x))) + { + fputc ('0', f); + } + else + gcc_unreachable (); + break; + + case CONST_DOUBLE: + /* CONST_DOUBLE can represent a double-width integer. + In this case, the mode of x is VOIDmode. */ + if (GET_MODE (x) == VOIDmode) + ; /* Do Nothing. */ + else if (aarch64_float_const_zero_rtx_p (x)) + { + fputc ('0', f); + break; + } + else if (aarch64_float_const_representable_p (x)) + { +#define buf_size 20 + char float_buf[buf_size] = {'\0'}; + REAL_VALUE_TYPE r; + REAL_VALUE_FROM_CONST_DOUBLE (r, x); + real_to_decimal_for_mode (float_buf, &r, + buf_size, buf_size, + 1, GET_MODE (x)); + asm_fprintf (asm_out_file, "%s", float_buf); + break; +#undef buf_size + } + output_operand_lossage ("invalid constant"); + return; + default: + output_operand_lossage ("invalid operand"); + return; + } + break; + + case 'A': + if (GET_CODE (x) == HIGH) + x = XEXP (x, 0); + + switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR)) + { + case SYMBOL_SMALL_GOT: + asm_fprintf (asm_out_file, ":got:"); + break; + + case SYMBOL_SMALL_TLSGD: + asm_fprintf (asm_out_file, ":tlsgd:"); + break; + + case SYMBOL_SMALL_TLSDESC: + asm_fprintf (asm_out_file, ":tlsdesc:"); + break; + + case SYMBOL_SMALL_GOTTPREL: + asm_fprintf (asm_out_file, ":gottprel:"); + break; + + case SYMBOL_SMALL_TPREL: + asm_fprintf (asm_out_file, ":tprel:"); + break; + + case SYMBOL_TINY_GOT: + gcc_unreachable (); + break; + + default: + break; + } + output_addr_const (asm_out_file, x); + break; + + case 'L': + switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR)) + { + case SYMBOL_SMALL_GOT: + asm_fprintf (asm_out_file, ":lo12:"); + break; + + case SYMBOL_SMALL_TLSGD: + asm_fprintf (asm_out_file, ":tlsgd_lo12:"); + break; + + case SYMBOL_SMALL_TLSDESC: + asm_fprintf (asm_out_file, ":tlsdesc_lo12:"); + break; + + case SYMBOL_SMALL_GOTTPREL: + asm_fprintf (asm_out_file, ":gottprel_lo12:"); + break; + + case SYMBOL_SMALL_TPREL: + asm_fprintf (asm_out_file, ":tprel_lo12_nc:"); + break; + + case SYMBOL_TINY_GOT: + asm_fprintf (asm_out_file, ":got:"); + break; + + default: + break; + } + output_addr_const (asm_out_file, x); + break; + + case 'G': + + switch (aarch64_classify_symbolic_expression (x, SYMBOL_CONTEXT_ADR)) + { + case SYMBOL_SMALL_TPREL: + asm_fprintf (asm_out_file, ":tprel_hi12:"); + break; + default: + break; + } + output_addr_const (asm_out_file, x); + break; + + default: + output_operand_lossage ("invalid operand prefix '%%%c'", code); + return; + } +} + +void +aarch64_print_operand_address (FILE *f, rtx x) +{ + struct aarch64_address_info addr; + + if (aarch64_classify_address (&addr, x, aarch64_memory_reference_mode, + MEM, true)) + switch (addr.type) + { + case ADDRESS_REG_IMM: + if (addr.offset == const0_rtx) + asm_fprintf (f, "[%s]", reg_names [REGNO (addr.base)]); + else + asm_fprintf (f, "[%s,%wd]", reg_names [REGNO (addr.base)], + INTVAL (addr.offset)); + return; + + case ADDRESS_REG_REG: + if (addr.shift == 0) + asm_fprintf (f, "[%s,%s]", reg_names [REGNO (addr.base)], + reg_names [REGNO (addr.offset)]); + else + asm_fprintf (f, "[%s,%s,lsl %u]", reg_names [REGNO (addr.base)], + reg_names [REGNO (addr.offset)], addr.shift); + return; + + case ADDRESS_REG_UXTW: + if (addr.shift == 0) + asm_fprintf (f, "[%s,w%d,uxtw]", reg_names [REGNO (addr.base)], + REGNO (addr.offset) - R0_REGNUM); + else + asm_fprintf (f, "[%s,w%d,uxtw %u]", reg_names [REGNO (addr.base)], + REGNO (addr.offset) - R0_REGNUM, addr.shift); + return; + + case ADDRESS_REG_SXTW: + if (addr.shift == 0) + asm_fprintf (f, "[%s,w%d,sxtw]", reg_names [REGNO (addr.base)], + REGNO (addr.offset) - R0_REGNUM); + else + asm_fprintf (f, "[%s,w%d,sxtw %u]", reg_names [REGNO (addr.base)], + REGNO (addr.offset) - R0_REGNUM, addr.shift); + return; + + case ADDRESS_REG_WB: + switch (GET_CODE (x)) + { + case PRE_INC: + asm_fprintf (f, "[%s,%d]!", reg_names [REGNO (addr.base)], + GET_MODE_SIZE (aarch64_memory_reference_mode)); + return; + case POST_INC: + asm_fprintf (f, "[%s],%d", reg_names [REGNO (addr.base)], + GET_MODE_SIZE (aarch64_memory_reference_mode)); + return; + case PRE_DEC: + asm_fprintf (f, "[%s,-%d]!", reg_names [REGNO (addr.base)], + GET_MODE_SIZE (aarch64_memory_reference_mode)); + return; + case POST_DEC: + asm_fprintf (f, "[%s],-%d", reg_names [REGNO (addr.base)], + GET_MODE_SIZE (aarch64_memory_reference_mode)); + return; + case PRE_MODIFY: + asm_fprintf (f, "[%s,%wd]!", reg_names [REGNO (addr.base)], + INTVAL (addr.offset)); + return; + case POST_MODIFY: + asm_fprintf (f, "[%s],%wd", reg_names [REGNO (addr.base)], + INTVAL (addr.offset)); + return; + default: + break; + } + break; + + case ADDRESS_LO_SUM: + asm_fprintf (f, "[%s,#:lo12:", reg_names [REGNO (addr.base)]); + output_addr_const (f, addr.offset); + asm_fprintf (f, "]"); + return; + + case ADDRESS_SYMBOLIC: + break; + } + + output_addr_const (f, x); +} + +bool +aarch64_label_mentioned_p (rtx x) +{ + const char *fmt; + int i; + + if (GET_CODE (x) == LABEL_REF) + return true; + + /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the + referencing instruction, but they are constant offsets, not + symbols. */ + if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS) + return false; + + fmt = GET_RTX_FORMAT (GET_CODE (x)); + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) + { + if (fmt[i] == 'E') + { + int j; + + for (j = XVECLEN (x, i) - 1; j >= 0; j--) + if (aarch64_label_mentioned_p (XVECEXP (x, i, j))) + return 1; + } + else if (fmt[i] == 'e' && aarch64_label_mentioned_p (XEXP (x, i))) + return 1; + } + + return 0; +} + +/* Implement REGNO_REG_CLASS. */ + +enum reg_class +aarch64_regno_regclass (unsigned regno) +{ + if (GP_REGNUM_P (regno)) + return CORE_REGS; + + if (regno == SP_REGNUM) + return STACK_REG; + + if (regno == FRAME_POINTER_REGNUM + || regno == ARG_POINTER_REGNUM) + return POINTER_REGS; + + if (FP_REGNUM_P (regno)) + return FP_LO_REGNUM_P (regno) ? FP_LO_REGS : FP_REGS; + + return NO_REGS; +} + +/* Try a machine-dependent way of reloading an illegitimate address + operand. If we find one, push the reload and return the new rtx. */ + +rtx +aarch64_legitimize_reload_address (rtx *x_p, + enum machine_mode mode, + int opnum, int type, + int ind_levels ATTRIBUTE_UNUSED) +{ + rtx x = *x_p; + + /* Do not allow mem (plus (reg, const)) if vector mode. */ + if (aarch64_vector_mode_p (mode) + && GET_CODE (x) == PLUS + && REG_P (XEXP (x, 0)) + && CONST_INT_P (XEXP (x, 1))) + { + rtx orig_rtx = x; + x = copy_rtx (x); + push_reload (orig_rtx, NULL_RTX, x_p, NULL, + BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0, + opnum, (enum reload_type) type); + return x; + } + + /* We must recognize output that we have already generated ourselves. */ + if (GET_CODE (x) == PLUS + && GET_CODE (XEXP (x, 0)) == PLUS + && REG_P (XEXP (XEXP (x, 0), 0)) + && CONST_INT_P (XEXP (XEXP (x, 0), 1)) + && CONST_INT_P (XEXP (x, 1))) + { + push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL, + BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0, + opnum, (enum reload_type) type); + return x; + } + + /* We wish to handle large displacements off a base register by splitting + the addend across an add and the mem insn. This can cut the number of + extra insns needed from 3 to 1. It is only useful for load/store of a + single register with 12 bit offset field. */ + if (GET_CODE (x) == PLUS + && REG_P (XEXP (x, 0)) + && CONST_INT_P (XEXP (x, 1)) + && HARD_REGISTER_P (XEXP (x, 0)) + && mode != TImode + && mode != TFmode + && aarch64_regno_ok_for_base_p (REGNO (XEXP (x, 0)), true)) + { + HOST_WIDE_INT val = INTVAL (XEXP (x, 1)); + HOST_WIDE_INT low = val & 0xfff; + HOST_WIDE_INT high = val - low; + HOST_WIDE_INT offs; + rtx cst; + enum machine_mode xmode = GET_MODE (x); + + /* In ILP32, xmode can be either DImode or SImode. */ + gcc_assert (xmode == DImode || xmode == SImode); + + /* Reload non-zero BLKmode offsets. This is because we cannot ascertain + BLKmode alignment. */ + if (GET_MODE_SIZE (mode) == 0) + return NULL_RTX; + + offs = low % GET_MODE_SIZE (mode); + + /* Align misaligned offset by adjusting high part to compensate. */ + if (offs != 0) + { + if (aarch64_uimm12_shift (high + offs)) + { + /* Align down. */ + low = low - offs; + high = high + offs; + } + else + { + /* Align up. */ + offs = GET_MODE_SIZE (mode) - offs; + low = low + offs; + high = high + (low & 0x1000) - offs; + low &= 0xfff; + } + } + + /* Check for overflow. */ + if (high + low != val) + return NULL_RTX; + + cst = GEN_INT (high); + if (!aarch64_uimm12_shift (high)) + cst = force_const_mem (xmode, cst); + + /* Reload high part into base reg, leaving the low part + in the mem instruction. + Note that replacing this gen_rtx_PLUS with plus_constant is + wrong in this case because we rely on the + (plus (plus reg c1) c2) structure being preserved so that + XEXP (*p, 0) in push_reload below uses the correct term. */ + x = gen_rtx_PLUS (xmode, + gen_rtx_PLUS (xmode, XEXP (x, 0), cst), + GEN_INT (low)); + + push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL, + BASE_REG_CLASS, xmode, VOIDmode, 0, 0, + opnum, (enum reload_type) type); + return x; + } + + return NULL_RTX; +} + + +static reg_class_t +aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x, + reg_class_t rclass, + enum machine_mode mode, + secondary_reload_info *sri) +{ + /* Without the TARGET_SIMD instructions we cannot move a Q register + to a Q register directly. We need a scratch. */ + if (REG_P (x) && (mode == TFmode || mode == TImode) && mode == GET_MODE (x) + && FP_REGNUM_P (REGNO (x)) && !TARGET_SIMD + && reg_class_subset_p (rclass, FP_REGS)) + { + if (mode == TFmode) + sri->icode = CODE_FOR_aarch64_reload_movtf; + else if (mode == TImode) + sri->icode = CODE_FOR_aarch64_reload_movti; + return NO_REGS; + } + + /* A TFmode or TImode memory access should be handled via an FP_REGS + because AArch64 has richer addressing modes for LDR/STR instructions + than LDP/STP instructions. */ + if (!TARGET_GENERAL_REGS_ONLY && rclass == CORE_REGS + && GET_MODE_SIZE (mode) == 16 && MEM_P (x)) + return FP_REGS; + + if (rclass == FP_REGS && (mode == TImode || mode == TFmode) && CONSTANT_P(x)) + return CORE_REGS; + + return NO_REGS; +} + +static bool +aarch64_can_eliminate (const int from, const int to) +{ + /* If we need a frame pointer, we must eliminate FRAME_POINTER_REGNUM into + HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */ + + if (frame_pointer_needed) + { + if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM) + return true; + if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM) + return false; + if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM + && !cfun->calls_alloca) + return true; + if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM) + return true; + return false; + } + else + { + /* If we decided that we didn't need a leaf frame pointer but then used + LR in the function, then we'll want a frame pointer after all, so + prevent this elimination to ensure a frame pointer is used. + + NOTE: the original value of flag_omit_frame_pointer gets trashed + IFF flag_omit_leaf_frame_pointer is true, so we check the value + of faked_omit_frame_pointer here (which is true when we always + wish to keep non-leaf frame pointers but only wish to keep leaf frame + pointers when LR is clobbered). */ + if (to == STACK_POINTER_REGNUM + && df_regs_ever_live_p (LR_REGNUM) + && faked_omit_frame_pointer) + return false; + } + + return true; +} + +HOST_WIDE_INT +aarch64_initial_elimination_offset (unsigned from, unsigned to) +{ + HOST_WIDE_INT frame_size; + HOST_WIDE_INT offset; + + aarch64_layout_frame (); + frame_size = (get_frame_size () + cfun->machine->frame.saved_regs_size + + crtl->outgoing_args_size + + cfun->machine->saved_varargs_size); + + frame_size = AARCH64_ROUND_UP (frame_size, STACK_BOUNDARY / BITS_PER_UNIT); + offset = frame_size; + + if (to == HARD_FRAME_POINTER_REGNUM) + { + if (from == ARG_POINTER_REGNUM) + return offset - crtl->outgoing_args_size; + + if (from == FRAME_POINTER_REGNUM) + return cfun->machine->frame.saved_regs_size + get_frame_size (); + } + + if (to == STACK_POINTER_REGNUM) + { + if (from == FRAME_POINTER_REGNUM) + { + HOST_WIDE_INT elim = crtl->outgoing_args_size + + cfun->machine->frame.saved_regs_size + + get_frame_size () + - cfun->machine->frame.fp_lr_offset; + elim = AARCH64_ROUND_UP (elim, STACK_BOUNDARY / BITS_PER_UNIT); + return elim; + } + } + + return offset; +} + + +/* Implement RETURN_ADDR_RTX. We do not support moving back to a + previous frame. */ + +rtx +aarch64_return_addr (int count, rtx frame ATTRIBUTE_UNUSED) +{ + if (count != 0) + return const0_rtx; + return get_hard_reg_initial_val (Pmode, LR_REGNUM); +} + + +static void +aarch64_asm_trampoline_template (FILE *f) +{ + if (TARGET_ILP32) + { + asm_fprintf (f, "\tldr\tw%d, .+16\n", IP1_REGNUM - R0_REGNUM); + asm_fprintf (f, "\tldr\tw%d, .+16\n", STATIC_CHAIN_REGNUM - R0_REGNUM); + } + else + { + asm_fprintf (f, "\tldr\t%s, .+16\n", reg_names [IP1_REGNUM]); + asm_fprintf (f, "\tldr\t%s, .+20\n", reg_names [STATIC_CHAIN_REGNUM]); + } + asm_fprintf (f, "\tbr\t%s\n", reg_names [IP1_REGNUM]); + assemble_aligned_integer (4, const0_rtx); + assemble_aligned_integer (POINTER_BYTES, const0_rtx); + assemble_aligned_integer (POINTER_BYTES, const0_rtx); +} + +static void +aarch64_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) +{ + rtx fnaddr, mem, a_tramp; + const int tramp_code_sz = 16; + + /* Don't need to copy the trailing D-words, we fill those in below. */ + emit_block_move (m_tramp, assemble_trampoline_template (), + GEN_INT (tramp_code_sz), BLOCK_OP_NORMAL); + mem = adjust_address (m_tramp, ptr_mode, tramp_code_sz); + fnaddr = XEXP (DECL_RTL (fndecl), 0); + if (GET_MODE (fnaddr) != ptr_mode) + fnaddr = convert_memory_address (ptr_mode, fnaddr); + emit_move_insn (mem, fnaddr); + + mem = adjust_address (m_tramp, ptr_mode, tramp_code_sz + POINTER_BYTES); + emit_move_insn (mem, chain_value); + + /* XXX We should really define a "clear_cache" pattern and use + gen_clear_cache(). */ + a_tramp = XEXP (m_tramp, 0); + emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"), + LCT_NORMAL, VOIDmode, 2, a_tramp, ptr_mode, + plus_constant (ptr_mode, a_tramp, TRAMPOLINE_SIZE), + ptr_mode); +} + +static unsigned char +aarch64_class_max_nregs (reg_class_t regclass, enum machine_mode mode) +{ + switch (regclass) + { + case CORE_REGS: + case POINTER_REGS: + case GENERAL_REGS: + case ALL_REGS: + case FP_REGS: + case FP_LO_REGS: + return + aarch64_vector_mode_p (mode) ? (GET_MODE_SIZE (mode) + 15) / 16 : + (GET_MODE_SIZE (mode) + 7) / 8; + case STACK_REG: + return 1; + + case NO_REGS: + return 0; + + default: + break; + } + gcc_unreachable (); +} + +static reg_class_t +aarch64_preferred_reload_class (rtx x, reg_class_t regclass) +{ + if (regclass == POINTER_REGS) + return GENERAL_REGS; + + if (regclass == STACK_REG) + { + if (REG_P(x) + && reg_class_subset_p (REGNO_REG_CLASS (REGNO (x)), POINTER_REGS)) + return regclass; + + return NO_REGS; + } + + /* If it's an integer immediate that MOVI can't handle, then + FP_REGS is not an option, so we return NO_REGS instead. */ + if (CONST_INT_P (x) && reg_class_subset_p (regclass, FP_REGS) + && !aarch64_simd_imm_scalar_p (x, GET_MODE (x))) + return NO_REGS; + + /* Register eliminiation can result in a request for + SP+constant->FP_REGS. We cannot support such operations which + use SP as source and an FP_REG as destination, so reject out + right now. */ + if (! reg_class_subset_p (regclass, GENERAL_REGS) && GET_CODE (x) == PLUS) + { + rtx lhs = XEXP (x, 0); + + /* Look through a possible SUBREG introduced by ILP32. */ + if (GET_CODE (lhs) == SUBREG) + lhs = SUBREG_REG (lhs); + + gcc_assert (REG_P (lhs)); + gcc_assert (reg_class_subset_p (REGNO_REG_CLASS (REGNO (lhs)), + POINTER_REGS)); + return NO_REGS; + } + + return regclass; +} + +void +aarch64_asm_output_labelref (FILE* f, const char *name) +{ + asm_fprintf (f, "%U%s", name); +} + +static void +aarch64_elf_asm_constructor (rtx symbol, int priority) +{ + if (priority == DEFAULT_INIT_PRIORITY) + default_ctor_section_asm_out_constructor (symbol, priority); + else + { + section *s; + char buf[18]; + snprintf (buf, sizeof (buf), ".init_array.%.5u", priority); + s = get_section (buf, SECTION_WRITE, NULL); + switch_to_section (s); + assemble_align (POINTER_SIZE); + assemble_aligned_integer (POINTER_BYTES, symbol); + } +} + +static void +aarch64_elf_asm_destructor (rtx symbol, int priority) +{ + if (priority == DEFAULT_INIT_PRIORITY) + default_dtor_section_asm_out_destructor (symbol, priority); + else + { + section *s; + char buf[18]; + snprintf (buf, sizeof (buf), ".fini_array.%.5u", priority); + s = get_section (buf, SECTION_WRITE, NULL); + switch_to_section (s); + assemble_align (POINTER_SIZE); + assemble_aligned_integer (POINTER_BYTES, symbol); + } +} + +const char* +aarch64_output_casesi (rtx *operands) +{ + char buf[100]; + char label[100]; + rtx diff_vec = PATTERN (NEXT_INSN (operands[2])); + int index; + static const char *const patterns[4][2] = + { + { + "ldrb\t%w3, [%0,%w1,uxtw]", + "add\t%3, %4, %w3, sxtb #2" + }, + { + "ldrh\t%w3, [%0,%w1,uxtw #1]", + "add\t%3, %4, %w3, sxth #2" + }, + { + "ldr\t%w3, [%0,%w1,uxtw #2]", + "add\t%3, %4, %w3, sxtw #2" + }, + /* We assume that DImode is only generated when not optimizing and + that we don't really need 64-bit address offsets. That would + imply an object file with 8GB of code in a single function! */ + { + "ldr\t%w3, [%0,%w1,uxtw #2]", + "add\t%3, %4, %w3, sxtw #2" + } + }; + + gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC); + + index = exact_log2 (GET_MODE_SIZE (GET_MODE (diff_vec))); + + gcc_assert (index >= 0 && index <= 3); + + /* Need to implement table size reduction, by chaning the code below. */ + output_asm_insn (patterns[index][0], operands); + ASM_GENERATE_INTERNAL_LABEL (label, "Lrtx", CODE_LABEL_NUMBER (operands[2])); + snprintf (buf, sizeof (buf), + "adr\t%%4, %s", targetm.strip_name_encoding (label)); + output_asm_insn (buf, operands); + output_asm_insn (patterns[index][1], operands); + output_asm_insn ("br\t%3", operands); + assemble_label (asm_out_file, label); + return ""; +} + + +/* Return size in bits of an arithmetic operand which is shifted/scaled and + masked such that it is suitable for a UXTB, UXTH, or UXTW extend + operator. */ + +int +aarch64_uxt_size (int shift, HOST_WIDE_INT mask) +{ + if (shift >= 0 && shift <= 3) + { + int size; + for (size = 8; size <= 32; size *= 2) + { + HOST_WIDE_INT bits = ((HOST_WIDE_INT)1U << size) - 1; + if (mask == bits << shift) + return size; + } + } + return 0; +} + +static bool +aarch64_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, + const_rtx x ATTRIBUTE_UNUSED) +{ + /* We can't use blocks for constants when we're using a per-function + constant pool. */ + return false; +} + +static section * +aarch64_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED, + rtx x ATTRIBUTE_UNUSED, + unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED) +{ + /* Force all constant pool entries into the current function section. */ + return function_section (current_function_decl); +} + + +/* Costs. */ + +/* Helper function for rtx cost calculation. Strip a shift expression + from X. Returns the inner operand if successful, or the original + expression on failure. */ +static rtx +aarch64_strip_shift (rtx x) +{ + rtx op = x; + + if ((GET_CODE (op) == ASHIFT + || GET_CODE (op) == ASHIFTRT + || GET_CODE (op) == LSHIFTRT) + && CONST_INT_P (XEXP (op, 1))) + return XEXP (op, 0); + + if (GET_CODE (op) == MULT + && CONST_INT_P (XEXP (op, 1)) + && ((unsigned) exact_log2 (INTVAL (XEXP (op, 1)))) < 64) + return XEXP (op, 0); + + return x; +} + +/* Helper function for rtx cost calculation. Strip a shift or extend + expression from X. Returns the inner operand if successful, or the + original expression on failure. We deal with a number of possible + canonicalization variations here. */ +static rtx +aarch64_strip_shift_or_extend (rtx x) +{ + rtx op = x; + + /* Zero and sign extraction of a widened value. */ + if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT) + && XEXP (op, 2) == const0_rtx + && aarch64_is_extend_from_extract (GET_MODE (op), XEXP (XEXP (op, 0), 1), + XEXP (op, 1))) + return XEXP (XEXP (op, 0), 0); + + /* It can also be represented (for zero-extend) as an AND with an + immediate. */ + if (GET_CODE (op) == AND + && GET_CODE (XEXP (op, 0)) == MULT + && CONST_INT_P (XEXP (XEXP (op, 0), 1)) + && CONST_INT_P (XEXP (op, 1)) + && aarch64_uxt_size (exact_log2 (INTVAL (XEXP (XEXP (op, 0), 1))), + INTVAL (XEXP (op, 1))) != 0) + return XEXP (XEXP (op, 0), 0); + + /* Now handle extended register, as this may also have an optional + left shift by 1..4. */ + if (GET_CODE (op) == ASHIFT + && CONST_INT_P (XEXP (op, 1)) + && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op, 1))) <= 4) + op = XEXP (op, 0); + + if (GET_CODE (op) == ZERO_EXTEND + || GET_CODE (op) == SIGN_EXTEND) + op = XEXP (op, 0); + + if (op != x) + return op; + + return aarch64_strip_shift (x); +} + +/* Calculate the cost of calculating X, storing it in *COST. Result + is true if the total cost of the operation has now been calculated. */ +static bool +aarch64_rtx_costs (rtx x, int code, int outer ATTRIBUTE_UNUSED, + int param ATTRIBUTE_UNUSED, int *cost, bool speed) +{ + rtx op0, op1; + const struct cpu_cost_table *extra_cost + = aarch64_tune_params->insn_extra_cost; + + switch (code) + { + case SET: + op0 = SET_DEST (x); + op1 = SET_SRC (x); + + switch (GET_CODE (op0)) + { + case MEM: + if (speed) + *cost += extra_cost->ldst.store; + + if (op1 != const0_rtx) + *cost += rtx_cost (op1, SET, 1, speed); + return true; + + case SUBREG: + if (! REG_P (SUBREG_REG (op0))) + *cost += rtx_cost (SUBREG_REG (op0), SET, 0, speed); + /* Fall through. */ + case REG: + /* Cost is just the cost of the RHS of the set. */ + *cost += rtx_cost (op1, SET, 1, true); + return true; + + case ZERO_EXTRACT: /* Bit-field insertion. */ + case SIGN_EXTRACT: + /* Strip any redundant widening of the RHS to meet the width of + the target. */ + if (GET_CODE (op1) == SUBREG) + op1 = SUBREG_REG (op1); + if ((GET_CODE (op1) == ZERO_EXTEND + || GET_CODE (op1) == SIGN_EXTEND) + && GET_CODE (XEXP (op0, 1)) == CONST_INT + && (GET_MODE_BITSIZE (GET_MODE (XEXP (op1, 0))) + >= INTVAL (XEXP (op0, 1)))) + op1 = XEXP (op1, 0); + *cost += rtx_cost (op1, SET, 1, speed); + return true; + + default: + break; + } + return false; + + case MEM: + if (speed) + *cost += extra_cost->ldst.load; + + return true; + + case NEG: + op0 = CONST0_RTX (GET_MODE (x)); + op1 = XEXP (x, 0); + goto cost_minus; + + case COMPARE: + op0 = XEXP (x, 0); + op1 = XEXP (x, 1); + + if (op1 == const0_rtx + && GET_CODE (op0) == AND) + { + x = op0; + goto cost_logic; + } + + /* Comparisons can work if the order is swapped. + Canonicalization puts the more complex operation first, but + we want it in op1. */ + if (! (REG_P (op0) + || (GET_CODE (op0) == SUBREG && REG_P (SUBREG_REG (op0))))) + { + op0 = XEXP (x, 1); + op1 = XEXP (x, 0); + } + goto cost_minus; + + case MINUS: + op0 = XEXP (x, 0); + op1 = XEXP (x, 1); + + cost_minus: + if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT + || (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC + && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)) + { + if (op0 != const0_rtx) + *cost += rtx_cost (op0, MINUS, 0, speed); + + if (CONST_INT_P (op1)) + { + if (!aarch64_uimm12_shift (INTVAL (op1))) + *cost += rtx_cost (op1, MINUS, 1, speed); + } + else + { + op1 = aarch64_strip_shift_or_extend (op1); + *cost += rtx_cost (op1, MINUS, 1, speed); + } + return true; + } + + return false; + + case PLUS: + op0 = XEXP (x, 0); + op1 = XEXP (x, 1); + + if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT) + { + if (CONST_INT_P (op1) && aarch64_uimm12_shift (INTVAL (op1))) + { + *cost += rtx_cost (op0, PLUS, 0, speed); + } + else + { + rtx new_op0 = aarch64_strip_shift_or_extend (op0); + + if (new_op0 == op0 + && GET_CODE (op0) == MULT) + { + if ((GET_CODE (XEXP (op0, 0)) == ZERO_EXTEND + && GET_CODE (XEXP (op0, 1)) == ZERO_EXTEND) + || (GET_CODE (XEXP (op0, 0)) == SIGN_EXTEND + && GET_CODE (XEXP (op0, 1)) == SIGN_EXTEND)) + { + *cost += (rtx_cost (XEXP (XEXP (op0, 0), 0), MULT, 0, + speed) + + rtx_cost (XEXP (XEXP (op0, 1), 0), MULT, 1, + speed) + + rtx_cost (op1, PLUS, 1, speed)); + if (speed) + *cost += + extra_cost->mult[GET_MODE (x) == DImode].extend_add; + return true; + } + + *cost += (rtx_cost (XEXP (op0, 0), MULT, 0, speed) + + rtx_cost (XEXP (op0, 1), MULT, 1, speed) + + rtx_cost (op1, PLUS, 1, speed)); + + if (speed) + *cost += extra_cost->mult[GET_MODE (x) == DImode].add; + + return true; + } + + *cost += (rtx_cost (new_op0, PLUS, 0, speed) + + rtx_cost (op1, PLUS, 1, speed)); + } + return true; + } + + return false; + + case IOR: + case XOR: + case AND: + cost_logic: + op0 = XEXP (x, 0); + op1 = XEXP (x, 1); + + if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT) + { + if (CONST_INT_P (op1) + && aarch64_bitmask_imm (INTVAL (op1), GET_MODE (x))) + { + *cost += rtx_cost (op0, AND, 0, speed); + } + else + { + if (GET_CODE (op0) == NOT) + op0 = XEXP (op0, 0); + op0 = aarch64_strip_shift (op0); + *cost += (rtx_cost (op0, AND, 0, speed) + + rtx_cost (op1, AND, 1, speed)); + } + return true; + } + return false; + + case ZERO_EXTEND: + if ((GET_MODE (x) == DImode + && GET_MODE (XEXP (x, 0)) == SImode) + || GET_CODE (XEXP (x, 0)) == MEM) + { + *cost += rtx_cost (XEXP (x, 0), ZERO_EXTEND, 0, speed); + return true; + } + return false; + + case SIGN_EXTEND: + if (GET_CODE (XEXP (x, 0)) == MEM) + { + *cost += rtx_cost (XEXP (x, 0), SIGN_EXTEND, 0, speed); + return true; + } + return false; + + case ROTATE: + if (!CONST_INT_P (XEXP (x, 1))) + *cost += COSTS_N_INSNS (2); + /* Fall through. */ + case ROTATERT: + case LSHIFTRT: + case ASHIFT: + case ASHIFTRT: + + /* Shifting by a register often takes an extra cycle. */ + if (speed && !CONST_INT_P (XEXP (x, 1))) + *cost += extra_cost->alu.arith_shift_reg; + + *cost += rtx_cost (XEXP (x, 0), ASHIFT, 0, speed); + return true; + + case HIGH: + if (!CONSTANT_P (XEXP (x, 0))) + *cost += rtx_cost (XEXP (x, 0), HIGH, 0, speed); + return true; + + case LO_SUM: + if (!CONSTANT_P (XEXP (x, 1))) + *cost += rtx_cost (XEXP (x, 1), LO_SUM, 1, speed); + *cost += rtx_cost (XEXP (x, 0), LO_SUM, 0, speed); + return true; + + case ZERO_EXTRACT: + case SIGN_EXTRACT: + *cost += rtx_cost (XEXP (x, 0), ZERO_EXTRACT, 0, speed); + return true; + + case MULT: + op0 = XEXP (x, 0); + op1 = XEXP (x, 1); + + *cost = COSTS_N_INSNS (1); + if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT) + { + if (CONST_INT_P (op1) + && exact_log2 (INTVAL (op1)) > 0) + { + *cost += rtx_cost (op0, ASHIFT, 0, speed); + return true; + } + + if ((GET_CODE (op0) == ZERO_EXTEND + && GET_CODE (op1) == ZERO_EXTEND) + || (GET_CODE (op0) == SIGN_EXTEND + && GET_CODE (op1) == SIGN_EXTEND)) + { + *cost += (rtx_cost (XEXP (op0, 0), MULT, 0, speed) + + rtx_cost (XEXP (op1, 0), MULT, 1, speed)); + if (speed) + *cost += extra_cost->mult[GET_MODE (x) == DImode].extend; + return true; + } + + if (speed) + *cost += extra_cost->mult[GET_MODE (x) == DImode].simple; + } + else if (speed) + { + if (GET_MODE (x) == DFmode) + *cost += extra_cost->fp[1].mult; + else if (GET_MODE (x) == SFmode) + *cost += extra_cost->fp[0].mult; + } + + return false; /* All arguments need to be in registers. */ + + case MOD: + case UMOD: + *cost = COSTS_N_INSNS (2); + if (speed) + { + if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT) + *cost += (extra_cost->mult[GET_MODE (x) == DImode].add + + extra_cost->mult[GET_MODE (x) == DImode].idiv); + else if (GET_MODE (x) == DFmode) + *cost += (extra_cost->fp[1].mult + + extra_cost->fp[1].div); + else if (GET_MODE (x) == SFmode) + *cost += (extra_cost->fp[0].mult + + extra_cost->fp[0].div); + } + return false; /* All arguments need to be in registers. */ + + case DIV: + case UDIV: + *cost = COSTS_N_INSNS (1); + if (speed) + { + if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT) + *cost += extra_cost->mult[GET_MODE (x) == DImode].idiv; + else if (GET_MODE (x) == DFmode) + *cost += extra_cost->fp[1].div; + else if (GET_MODE (x) == SFmode) + *cost += extra_cost->fp[0].div; + } + return false; /* All arguments need to be in registers. */ + + default: + break; + } + return false; +} + +static int +aarch64_address_cost (rtx x ATTRIBUTE_UNUSED, + enum machine_mode mode ATTRIBUTE_UNUSED, + addr_space_t as ATTRIBUTE_UNUSED, bool speed ATTRIBUTE_UNUSED) +{ + enum rtx_code c = GET_CODE (x); + const struct cpu_addrcost_table *addr_cost = aarch64_tune_params->addr_cost; + + if (c == PRE_INC || c == PRE_DEC || c == PRE_MODIFY) + return addr_cost->pre_modify; + + if (c == POST_INC || c == POST_DEC || c == POST_MODIFY) + return addr_cost->post_modify; + + if (c == PLUS) + { + if (GET_CODE (XEXP (x, 1)) == CONST_INT) + return addr_cost->imm_offset; + else if (GET_CODE (XEXP (x, 0)) == MULT + || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND + || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) + return addr_cost->register_extend; + + return addr_cost->register_offset; + } + else if (c == MEM || c == LABEL_REF || c == SYMBOL_REF) + return addr_cost->imm_offset; + + return 0; +} + +static int +aarch64_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED, + reg_class_t from, reg_class_t to) +{ + const struct cpu_regmove_cost *regmove_cost + = aarch64_tune_params->regmove_cost; + + /* Moving between GPR and stack cost is the same as GP2GP. */ + if ((from == GENERAL_REGS && to == STACK_REG) + || (to == GENERAL_REGS && from == STACK_REG)) + return regmove_cost->GP2GP; + + /* To/From the stack register, we move via the gprs. */ + if (to == STACK_REG || from == STACK_REG) + return aarch64_register_move_cost (mode, from, GENERAL_REGS) + + aarch64_register_move_cost (mode, GENERAL_REGS, to); + + if (from == GENERAL_REGS && to == GENERAL_REGS) + return regmove_cost->GP2GP; + else if (from == GENERAL_REGS) + return regmove_cost->GP2FP; + else if (to == GENERAL_REGS) + return regmove_cost->FP2GP; + + /* When AdvSIMD instructions are disabled it is not possible to move + a 128-bit value directly between Q registers. This is handled in + secondary reload. A general register is used as a scratch to move + the upper DI value and the lower DI value is moved directly, + hence the cost is the sum of three moves. */ + + if (! TARGET_SIMD && GET_MODE_SIZE (from) == 128 && GET_MODE_SIZE (to) == 128) + return regmove_cost->GP2FP + regmove_cost->FP2GP + regmove_cost->FP2FP; + + return regmove_cost->FP2FP; +} + +static int +aarch64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED, + reg_class_t rclass ATTRIBUTE_UNUSED, + bool in ATTRIBUTE_UNUSED) +{ + return aarch64_tune_params->memmov_cost; +} + +/* Return the number of instructions that can be issued per cycle. */ +static int +aarch64_sched_issue_rate (void) +{ + return aarch64_tune_params->issue_rate; +} + +/* Vectorizer cost model target hooks. */ + +/* Implement targetm.vectorize.builtin_vectorization_cost. */ +static int +aarch64_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, + tree vectype, + int misalign ATTRIBUTE_UNUSED) +{ + unsigned elements; + + switch (type_of_cost) + { + case scalar_stmt: + return aarch64_tune_params->vec_costs->scalar_stmt_cost; + + case scalar_load: + return aarch64_tune_params->vec_costs->scalar_load_cost; + + case scalar_store: + return aarch64_tune_params->vec_costs->scalar_store_cost; + + case vector_stmt: + return aarch64_tune_params->vec_costs->vec_stmt_cost; + + case vector_load: + return aarch64_tune_params->vec_costs->vec_align_load_cost; + + case vector_store: + return aarch64_tune_params->vec_costs->vec_store_cost; + + case vec_to_scalar: + return aarch64_tune_params->vec_costs->vec_to_scalar_cost; + + case scalar_to_vec: + return aarch64_tune_params->vec_costs->scalar_to_vec_cost; + + case unaligned_load: + return aarch64_tune_params->vec_costs->vec_unalign_load_cost; + + case unaligned_store: + return aarch64_tune_params->vec_costs->vec_unalign_store_cost; + + case cond_branch_taken: + return aarch64_tune_params->vec_costs->cond_taken_branch_cost; + + case cond_branch_not_taken: + return aarch64_tune_params->vec_costs->cond_not_taken_branch_cost; + + case vec_perm: + case vec_promote_demote: + return aarch64_tune_params->vec_costs->vec_stmt_cost; + + case vec_construct: + elements = TYPE_VECTOR_SUBPARTS (vectype); + return elements / 2 + 1; + + default: + gcc_unreachable (); + } +} + +/* Implement targetm.vectorize.add_stmt_cost. */ +static unsigned +aarch64_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, + struct _stmt_vec_info *stmt_info, int misalign, + enum vect_cost_model_location where) +{ + unsigned *cost = (unsigned *) data; + unsigned retval = 0; + + if (flag_vect_cost_model) + { + tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE; + int stmt_cost = + aarch64_builtin_vectorization_cost (kind, vectype, misalign); + + /* Statements in an inner loop relative to the loop being + vectorized are weighted more heavily. The value here is + a function (linear for now) of the loop nest level. */ + if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info)) + { + loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info); + struct loop *loop = LOOP_VINFO_LOOP (loop_info); + unsigned nest_level = loop_depth (loop); + + count *= nest_level; + } + + retval = (unsigned) (count * stmt_cost); + cost[where] += retval; + } + + return retval; +} + +static void initialize_aarch64_code_model (void); + +/* Parse the architecture extension string. */ + +static void +aarch64_parse_extension (char *str) +{ + /* The extension string is parsed left to right. */ + const struct aarch64_option_extension *opt = NULL; + + /* Flag to say whether we are adding or removing an extension. */ + int adding_ext = -1; + + while (str != NULL && *str != 0) + { + char *ext; + size_t len; + + str++; + ext = strchr (str, '+'); + + if (ext != NULL) + len = ext - str; + else + len = strlen (str); + + if (len >= 2 && strncmp (str, "no", 2) == 0) + { + adding_ext = 0; + len -= 2; + str += 2; + } + else if (len > 0) + adding_ext = 1; + + if (len == 0) + { + error ("missing feature modifier after %qs", "+no"); + return; + } + + /* Scan over the extensions table trying to find an exact match. */ + for (opt = all_extensions; opt->name != NULL; opt++) + { + if (strlen (opt->name) == len && strncmp (opt->name, str, len) == 0) + { + /* Add or remove the extension. */ + if (adding_ext) + aarch64_isa_flags |= opt->flags_on; + else + aarch64_isa_flags &= ~(opt->flags_off); + break; + } + } + + if (opt->name == NULL) + { + /* Extension not found in list. */ + error ("unknown feature modifier %qs", str); + return; + } + + str = ext; + }; + + return; +} + +/* Parse the ARCH string. */ + +static void +aarch64_parse_arch (void) +{ + char *ext; + const struct processor *arch; + char *str = (char *) alloca (strlen (aarch64_arch_string) + 1); + size_t len; + + strcpy (str, aarch64_arch_string); + + ext = strchr (str, '+'); + + if (ext != NULL) + len = ext - str; + else + len = strlen (str); + + if (len == 0) + { + error ("missing arch name in -march=%qs", str); + return; + } + + /* Loop through the list of supported ARCHs to find a match. */ + for (arch = all_architectures; arch->name != NULL; arch++) + { + if (strlen (arch->name) == len && strncmp (arch->name, str, len) == 0) + { + selected_arch = arch; + aarch64_isa_flags = selected_arch->flags; + + if (!selected_cpu) + selected_cpu = &all_cores[selected_arch->core]; + + if (ext != NULL) + { + /* ARCH string contains at least one extension. */ + aarch64_parse_extension (ext); + } + + if (strcmp (selected_arch->arch, selected_cpu->arch)) + { + warning (0, "switch -mcpu=%s conflicts with -march=%s switch", + selected_cpu->name, selected_arch->name); + } + + return; + } + } + + /* ARCH name not found in list. */ + error ("unknown value %qs for -march", str); + return; +} + +/* Parse the CPU string. */ + +static void +aarch64_parse_cpu (void) +{ + char *ext; + const struct processor *cpu; + char *str = (char *) alloca (strlen (aarch64_cpu_string) + 1); + size_t len; + + strcpy (str, aarch64_cpu_string); + + ext = strchr (str, '+'); + + if (ext != NULL) + len = ext - str; + else + len = strlen (str); + + if (len == 0) + { + error ("missing cpu name in -mcpu=%qs", str); + return; + } + + /* Loop through the list of supported CPUs to find a match. */ + for (cpu = all_cores; cpu->name != NULL; cpu++) + { + if (strlen (cpu->name) == len && strncmp (cpu->name, str, len) == 0) + { + selected_cpu = cpu; + selected_tune = cpu; + aarch64_isa_flags = selected_cpu->flags; + + if (ext != NULL) + { + /* CPU string contains at least one extension. */ + aarch64_parse_extension (ext); + } + + return; + } + } + + /* CPU name not found in list. */ + error ("unknown value %qs for -mcpu", str); + return; +} + +/* Parse the TUNE string. */ + +static void +aarch64_parse_tune (void) +{ + const struct processor *cpu; + char *str = (char *) alloca (strlen (aarch64_tune_string) + 1); + strcpy (str, aarch64_tune_string); + + /* Loop through the list of supported CPUs to find a match. */ + for (cpu = all_cores; cpu->name != NULL; cpu++) + { + if (strcmp (cpu->name, str) == 0) + { + selected_tune = cpu; + return; + } + } + + /* CPU name not found in list. */ + error ("unknown value %qs for -mtune", str); + return; +} + + +/* Implement TARGET_OPTION_OVERRIDE. */ + +static void +aarch64_override_options (void) +{ + /* -mcpu=CPU is shorthand for -march=ARCH_FOR_CPU, -mtune=CPU. + If either of -march or -mtune is given, they override their + respective component of -mcpu. + + So, first parse AARCH64_CPU_STRING, then the others, be careful + with -march as, if -mcpu is not present on the command line, march + must set a sensible default CPU. */ + if (aarch64_cpu_string) + { + aarch64_parse_cpu (); + } + + if (aarch64_arch_string) + { + aarch64_parse_arch (); + } + + if (aarch64_tune_string) + { + aarch64_parse_tune (); + } + +#ifndef HAVE_AS_MABI_OPTION + /* The compiler may have been configured with 2.23.* binutils, which does + not have support for ILP32. */ + if (TARGET_ILP32) + error ("Assembler does not support -mabi=ilp32"); +#endif + + initialize_aarch64_code_model (); + + aarch64_build_bitmask_table (); + + /* This target defaults to strict volatile bitfields. */ + if (flag_strict_volatile_bitfields < 0 && abi_version_at_least (2)) + flag_strict_volatile_bitfields = 1; + + /* If the user did not specify a processor, choose the default + one for them. This will be the CPU set during configuration using + --with-cpu, otherwise it is "generic". */ + if (!selected_cpu) + { + selected_cpu = &all_cores[TARGET_CPU_DEFAULT & 0x3f]; + aarch64_isa_flags = TARGET_CPU_DEFAULT >> 6; + } + + gcc_assert (selected_cpu); + + /* The selected cpu may be an architecture, so lookup tuning by core ID. */ + if (!selected_tune) + selected_tune = &all_cores[selected_cpu->core]; + + aarch64_tune_flags = selected_tune->flags; + aarch64_tune = selected_tune->core; + aarch64_tune_params = selected_tune->tune; + + aarch64_override_options_after_change (); +} + +/* Implement targetm.override_options_after_change. */ + +static void +aarch64_override_options_after_change (void) +{ + faked_omit_frame_pointer = false; + + /* To omit leaf frame pointers, we need to turn flag_omit_frame_pointer on so + that aarch64_frame_pointer_required will be called. We need to remember + whether flag_omit_frame_pointer was turned on normally or just faked. */ + + if (flag_omit_leaf_frame_pointer && !flag_omit_frame_pointer) + { + flag_omit_frame_pointer = true; + faked_omit_frame_pointer = true; + } +} + +static struct machine_function * +aarch64_init_machine_status (void) +{ + struct machine_function *machine; + machine = ggc_alloc_cleared_machine_function (); + return machine; +} + +void +aarch64_init_expanders (void) +{ + init_machine_status = aarch64_init_machine_status; +} + +/* A checking mechanism for the implementation of the various code models. */ +static void +initialize_aarch64_code_model (void) +{ + if (flag_pic) + { + switch (aarch64_cmodel_var) + { + case AARCH64_CMODEL_TINY: + aarch64_cmodel = AARCH64_CMODEL_TINY_PIC; + break; + case AARCH64_CMODEL_SMALL: + aarch64_cmodel = AARCH64_CMODEL_SMALL_PIC; + break; + case AARCH64_CMODEL_LARGE: + sorry ("code model %qs with -f%s", "large", + flag_pic > 1 ? "PIC" : "pic"); + default: + gcc_unreachable (); + } + } + else + aarch64_cmodel = aarch64_cmodel_var; +} + +/* Return true if SYMBOL_REF X binds locally. */ + +static bool +aarch64_symbol_binds_local_p (const_rtx x) +{ + return (SYMBOL_REF_DECL (x) + ? targetm.binds_local_p (SYMBOL_REF_DECL (x)) + : SYMBOL_REF_LOCAL_P (x)); +} + +/* Return true if SYMBOL_REF X is thread local */ +static bool +aarch64_tls_symbol_p (rtx x) +{ + if (! TARGET_HAVE_TLS) + return false; + + if (GET_CODE (x) != SYMBOL_REF) + return false; + + return SYMBOL_REF_TLS_MODEL (x) != 0; +} + +/* Classify a TLS symbol into one of the TLS kinds. */ +enum aarch64_symbol_type +aarch64_classify_tls_symbol (rtx x) +{ + enum tls_model tls_kind = tls_symbolic_operand_type (x); + + switch (tls_kind) + { + case TLS_MODEL_GLOBAL_DYNAMIC: + case TLS_MODEL_LOCAL_DYNAMIC: + return TARGET_TLS_DESC ? SYMBOL_SMALL_TLSDESC : SYMBOL_SMALL_TLSGD; + + case TLS_MODEL_INITIAL_EXEC: + return SYMBOL_SMALL_GOTTPREL; + + case TLS_MODEL_LOCAL_EXEC: + return SYMBOL_SMALL_TPREL; + + case TLS_MODEL_EMULATED: + case TLS_MODEL_NONE: + return SYMBOL_FORCE_TO_MEM; + + default: + gcc_unreachable (); + } +} + +/* Return the method that should be used to access SYMBOL_REF or + LABEL_REF X in context CONTEXT. */ + +enum aarch64_symbol_type +aarch64_classify_symbol (rtx x, + enum aarch64_symbol_context context ATTRIBUTE_UNUSED) +{ + if (GET_CODE (x) == LABEL_REF) + { + switch (aarch64_cmodel) + { + case AARCH64_CMODEL_LARGE: + return SYMBOL_FORCE_TO_MEM; + + case AARCH64_CMODEL_TINY_PIC: + case AARCH64_CMODEL_TINY: + return SYMBOL_TINY_ABSOLUTE; + + case AARCH64_CMODEL_SMALL_PIC: + case AARCH64_CMODEL_SMALL: + return SYMBOL_SMALL_ABSOLUTE; + + default: + gcc_unreachable (); + } + } + + if (GET_CODE (x) == SYMBOL_REF) + { + if (aarch64_cmodel == AARCH64_CMODEL_LARGE) + return SYMBOL_FORCE_TO_MEM; + + if (aarch64_tls_symbol_p (x)) + return aarch64_classify_tls_symbol (x); + + switch (aarch64_cmodel) + { + case AARCH64_CMODEL_TINY: + if (SYMBOL_REF_WEAK (x)) + return SYMBOL_FORCE_TO_MEM; + return SYMBOL_TINY_ABSOLUTE; + + case AARCH64_CMODEL_SMALL: + if (SYMBOL_REF_WEAK (x)) + return SYMBOL_FORCE_TO_MEM; + return SYMBOL_SMALL_ABSOLUTE; + + case AARCH64_CMODEL_TINY_PIC: + if (!aarch64_symbol_binds_local_p (x)) + return SYMBOL_TINY_GOT; + return SYMBOL_TINY_ABSOLUTE; + + case AARCH64_CMODEL_SMALL_PIC: + if (!aarch64_symbol_binds_local_p (x)) + return SYMBOL_SMALL_GOT; + return SYMBOL_SMALL_ABSOLUTE; + + default: + gcc_unreachable (); + } + } + + /* By default push everything into the constant pool. */ + return SYMBOL_FORCE_TO_MEM; +} + +bool +aarch64_constant_address_p (rtx x) +{ + return (CONSTANT_P (x) && memory_address_p (DImode, x)); +} + +bool +aarch64_legitimate_pic_operand_p (rtx x) +{ + if (GET_CODE (x) == SYMBOL_REF + || (GET_CODE (x) == CONST + && GET_CODE (XEXP (x, 0)) == PLUS + && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)) + return false; + + return true; +} + +/* Return true if X holds either a quarter-precision or + floating-point +0.0 constant. */ +static bool +aarch64_valid_floating_const (enum machine_mode mode, rtx x) +{ + if (!CONST_DOUBLE_P (x)) + return false; + + /* TODO: We could handle moving 0.0 to a TFmode register, + but first we would like to refactor the movtf_aarch64 + to be more amicable to split moves properly and + correctly gate on TARGET_SIMD. For now - reject all + constants which are not to SFmode or DFmode registers. */ + if (!(mode == SFmode || mode == DFmode)) + return false; + + if (aarch64_float_const_zero_rtx_p (x)) + return true; + return aarch64_float_const_representable_p (x); +} + +static bool +aarch64_legitimate_constant_p (enum machine_mode mode, rtx x) +{ + /* Do not allow vector struct mode constants. We could support + 0 and -1 easily, but they need support in aarch64-simd.md. */ + if (TARGET_SIMD && aarch64_vect_struct_mode_p (mode)) + return false; + + /* This could probably go away because + we now decompose CONST_INTs according to expand_mov_immediate. */ + if ((GET_CODE (x) == CONST_VECTOR + && aarch64_simd_valid_immediate (x, mode, false, NULL)) + || CONST_INT_P (x) || aarch64_valid_floating_const (mode, x)) + return !targetm.cannot_force_const_mem (mode, x); + + if (GET_CODE (x) == HIGH + && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0)))) + return true; + + return aarch64_constant_address_p (x); +} + +rtx +aarch64_load_tp (rtx target) +{ + if (!target + || GET_MODE (target) != Pmode + || !register_operand (target, Pmode)) + target = gen_reg_rtx (Pmode); + + /* Can return in any reg. */ + emit_insn (gen_aarch64_load_tp_hard (target)); + return target; +} + +/* On AAPCS systems, this is the "struct __va_list". */ +static GTY(()) tree va_list_type; + +/* Implement TARGET_BUILD_BUILTIN_VA_LIST. + Return the type to use as __builtin_va_list. + + AAPCS64 \S 7.1.4 requires that va_list be a typedef for a type defined as: + + struct __va_list + { + void *__stack; + void *__gr_top; + void *__vr_top; + int __gr_offs; + int __vr_offs; + }; */ + +static tree +aarch64_build_builtin_va_list (void) +{ + tree va_list_name; + tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff; + + /* Create the type. */ + va_list_type = lang_hooks.types.make_type (RECORD_TYPE); + /* Give it the required name. */ + va_list_name = build_decl (BUILTINS_LOCATION, + TYPE_DECL, + get_identifier ("__va_list"), + va_list_type); + DECL_ARTIFICIAL (va_list_name) = 1; + TYPE_NAME (va_list_type) = va_list_name; + TYPE_STUB_DECL (va_list_type) = va_list_name; + + /* Create the fields. */ + f_stack = build_decl (BUILTINS_LOCATION, + FIELD_DECL, get_identifier ("__stack"), + ptr_type_node); + f_grtop = build_decl (BUILTINS_LOCATION, + FIELD_DECL, get_identifier ("__gr_top"), + ptr_type_node); + f_vrtop = build_decl (BUILTINS_LOCATION, + FIELD_DECL, get_identifier ("__vr_top"), + ptr_type_node); + f_groff = build_decl (BUILTINS_LOCATION, + FIELD_DECL, get_identifier ("__gr_offs"), + integer_type_node); + f_vroff = build_decl (BUILTINS_LOCATION, + FIELD_DECL, get_identifier ("__vr_offs"), + integer_type_node); + + DECL_ARTIFICIAL (f_stack) = 1; + DECL_ARTIFICIAL (f_grtop) = 1; + DECL_ARTIFICIAL (f_vrtop) = 1; + DECL_ARTIFICIAL (f_groff) = 1; + DECL_ARTIFICIAL (f_vroff) = 1; + + DECL_FIELD_CONTEXT (f_stack) = va_list_type; + DECL_FIELD_CONTEXT (f_grtop) = va_list_type; + DECL_FIELD_CONTEXT (f_vrtop) = va_list_type; + DECL_FIELD_CONTEXT (f_groff) = va_list_type; + DECL_FIELD_CONTEXT (f_vroff) = va_list_type; + + TYPE_FIELDS (va_list_type) = f_stack; + DECL_CHAIN (f_stack) = f_grtop; + DECL_CHAIN (f_grtop) = f_vrtop; + DECL_CHAIN (f_vrtop) = f_groff; + DECL_CHAIN (f_groff) = f_vroff; + + /* Compute its layout. */ + layout_type (va_list_type); + + return va_list_type; +} + +/* Implement TARGET_EXPAND_BUILTIN_VA_START. */ +static void +aarch64_expand_builtin_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED) +{ + const CUMULATIVE_ARGS *cum; + tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff; + tree stack, grtop, vrtop, groff, vroff; + tree t; + int gr_save_area_size; + int vr_save_area_size; + int vr_offset; + + cum = &crtl->args.info; + gr_save_area_size + = (NUM_ARG_REGS - cum->aapcs_ncrn) * UNITS_PER_WORD; + vr_save_area_size + = (NUM_FP_ARG_REGS - cum->aapcs_nvrn) * UNITS_PER_VREG; + + if (TARGET_GENERAL_REGS_ONLY) + { + if (cum->aapcs_nvrn > 0) + sorry ("%qs and floating point or vector arguments", + "-mgeneral-regs-only"); + vr_save_area_size = 0; + } + + f_stack = TYPE_FIELDS (va_list_type_node); + f_grtop = DECL_CHAIN (f_stack); + f_vrtop = DECL_CHAIN (f_grtop); + f_groff = DECL_CHAIN (f_vrtop); + f_vroff = DECL_CHAIN (f_groff); + + stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), valist, f_stack, + NULL_TREE); + grtop = build3 (COMPONENT_REF, TREE_TYPE (f_grtop), valist, f_grtop, + NULL_TREE); + vrtop = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop), valist, f_vrtop, + NULL_TREE); + groff = build3 (COMPONENT_REF, TREE_TYPE (f_groff), valist, f_groff, + NULL_TREE); + vroff = build3 (COMPONENT_REF, TREE_TYPE (f_vroff), valist, f_vroff, + NULL_TREE); + + /* Emit code to initialize STACK, which points to the next varargs stack + argument. CUM->AAPCS_STACK_SIZE gives the number of stack words used + by named arguments. STACK is 8-byte aligned. */ + t = make_tree (TREE_TYPE (stack), virtual_incoming_args_rtx); + if (cum->aapcs_stack_size > 0) + t = fold_build_pointer_plus_hwi (t, cum->aapcs_stack_size * UNITS_PER_WORD); + t = build2 (MODIFY_EXPR, TREE_TYPE (stack), stack, t); + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + + /* Emit code to initialize GRTOP, the top of the GR save area. + virtual_incoming_args_rtx should have been 16 byte aligned. */ + t = make_tree (TREE_TYPE (grtop), virtual_incoming_args_rtx); + t = build2 (MODIFY_EXPR, TREE_TYPE (grtop), grtop, t); + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + + /* Emit code to initialize VRTOP, the top of the VR save area. + This address is gr_save_area_bytes below GRTOP, rounded + down to the next 16-byte boundary. */ + t = make_tree (TREE_TYPE (vrtop), virtual_incoming_args_rtx); + vr_offset = AARCH64_ROUND_UP (gr_save_area_size, + STACK_BOUNDARY / BITS_PER_UNIT); + + if (vr_offset) + t = fold_build_pointer_plus_hwi (t, -vr_offset); + t = build2 (MODIFY_EXPR, TREE_TYPE (vrtop), vrtop, t); + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + + /* Emit code to initialize GROFF, the offset from GRTOP of the + next GPR argument. */ + t = build2 (MODIFY_EXPR, TREE_TYPE (groff), groff, + build_int_cst (TREE_TYPE (groff), -gr_save_area_size)); + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + + /* Likewise emit code to initialize VROFF, the offset from FTOP + of the next VR argument. */ + t = build2 (MODIFY_EXPR, TREE_TYPE (vroff), vroff, + build_int_cst (TREE_TYPE (vroff), -vr_save_area_size)); + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); +} + +/* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */ + +static tree +aarch64_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p, + gimple_seq *post_p ATTRIBUTE_UNUSED) +{ + tree addr; + bool indirect_p; + bool is_ha; /* is HFA or HVA. */ + bool dw_align; /* double-word align. */ + enum machine_mode ag_mode = VOIDmode; + int nregs; + enum machine_mode mode; + + tree f_stack, f_grtop, f_vrtop, f_groff, f_vroff; + tree stack, f_top, f_off, off, arg, roundup, on_stack; + HOST_WIDE_INT size, rsize, adjust, align; + tree t, u, cond1, cond2; + + indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false); + if (indirect_p) + type = build_pointer_type (type); + + mode = TYPE_MODE (type); + + f_stack = TYPE_FIELDS (va_list_type_node); + f_grtop = DECL_CHAIN (f_stack); + f_vrtop = DECL_CHAIN (f_grtop); + f_groff = DECL_CHAIN (f_vrtop); + f_vroff = DECL_CHAIN (f_groff); + + stack = build3 (COMPONENT_REF, TREE_TYPE (f_stack), unshare_expr (valist), + f_stack, NULL_TREE); + size = int_size_in_bytes (type); + align = aarch64_function_arg_alignment (mode, type) / BITS_PER_UNIT; + + dw_align = false; + adjust = 0; + if (aarch64_vfp_is_call_or_return_candidate (mode, + type, + &ag_mode, + &nregs, + &is_ha)) + { + /* TYPE passed in fp/simd registers. */ + if (TARGET_GENERAL_REGS_ONLY) + sorry ("%qs and floating point or vector arguments", + "-mgeneral-regs-only"); + + f_top = build3 (COMPONENT_REF, TREE_TYPE (f_vrtop), + unshare_expr (valist), f_vrtop, NULL_TREE); + f_off = build3 (COMPONENT_REF, TREE_TYPE (f_vroff), + unshare_expr (valist), f_vroff, NULL_TREE); + + rsize = nregs * UNITS_PER_VREG; + + if (is_ha) + { + if (BYTES_BIG_ENDIAN && GET_MODE_SIZE (ag_mode) < UNITS_PER_VREG) + adjust = UNITS_PER_VREG - GET_MODE_SIZE (ag_mode); + } + else if (BLOCK_REG_PADDING (mode, type, 1) == downward + && size < UNITS_PER_VREG) + { + adjust = UNITS_PER_VREG - size; + } + } + else + { + /* TYPE passed in general registers. */ + f_top = build3 (COMPONENT_REF, TREE_TYPE (f_grtop), + unshare_expr (valist), f_grtop, NULL_TREE); + f_off = build3 (COMPONENT_REF, TREE_TYPE (f_groff), + unshare_expr (valist), f_groff, NULL_TREE); + rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD; + nregs = rsize / UNITS_PER_WORD; + + if (align > 8) + dw_align = true; + + if (BLOCK_REG_PADDING (mode, type, 1) == downward + && size < UNITS_PER_WORD) + { + adjust = UNITS_PER_WORD - size; + } + } + + /* Get a local temporary for the field value. */ + off = get_initialized_tmp_var (f_off, pre_p, NULL); + + /* Emit code to branch if off >= 0. */ + t = build2 (GE_EXPR, boolean_type_node, off, + build_int_cst (TREE_TYPE (off), 0)); + cond1 = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE); + + if (dw_align) + { + /* Emit: offs = (offs + 15) & -16. */ + t = build2 (PLUS_EXPR, TREE_TYPE (off), off, + build_int_cst (TREE_TYPE (off), 15)); + t = build2 (BIT_AND_EXPR, TREE_TYPE (off), t, + build_int_cst (TREE_TYPE (off), -16)); + roundup = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t); + } + else + roundup = NULL; + + /* Update ap.__[g|v]r_offs */ + t = build2 (PLUS_EXPR, TREE_TYPE (off), off, + build_int_cst (TREE_TYPE (off), rsize)); + t = build2 (MODIFY_EXPR, TREE_TYPE (f_off), unshare_expr (f_off), t); + + /* String up. */ + if (roundup) + t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t); + + /* [cond2] if (ap.__[g|v]r_offs > 0) */ + u = build2 (GT_EXPR, boolean_type_node, unshare_expr (f_off), + build_int_cst (TREE_TYPE (f_off), 0)); + cond2 = build3 (COND_EXPR, ptr_type_node, u, NULL_TREE, NULL_TREE); + + /* String up: make sure the assignment happens before the use. */ + t = build2 (COMPOUND_EXPR, TREE_TYPE (cond2), t, cond2); + COND_EXPR_ELSE (cond1) = t; + + /* Prepare the trees handling the argument that is passed on the stack; + the top level node will store in ON_STACK. */ + arg = get_initialized_tmp_var (stack, pre_p, NULL); + if (align > 8) + { + /* if (alignof(type) > 8) (arg = arg + 15) & -16; */ + t = fold_convert (intDI_type_node, arg); + t = build2 (PLUS_EXPR, TREE_TYPE (t), t, + build_int_cst (TREE_TYPE (t), 15)); + t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, + build_int_cst (TREE_TYPE (t), -16)); + t = fold_convert (TREE_TYPE (arg), t); + roundup = build2 (MODIFY_EXPR, TREE_TYPE (arg), arg, t); + } + else + roundup = NULL; + /* Advance ap.__stack */ + t = fold_convert (intDI_type_node, arg); + t = build2 (PLUS_EXPR, TREE_TYPE (t), t, + build_int_cst (TREE_TYPE (t), size + 7)); + t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, + build_int_cst (TREE_TYPE (t), -8)); + t = fold_convert (TREE_TYPE (arg), t); + t = build2 (MODIFY_EXPR, TREE_TYPE (stack), unshare_expr (stack), t); + /* String up roundup and advance. */ + if (roundup) + t = build2 (COMPOUND_EXPR, TREE_TYPE (t), roundup, t); + /* String up with arg */ + on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), t, arg); + /* Big-endianness related address adjustment. */ + if (BLOCK_REG_PADDING (mode, type, 1) == downward + && size < UNITS_PER_WORD) + { + t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (arg), arg, + size_int (UNITS_PER_WORD - size)); + on_stack = build2 (COMPOUND_EXPR, TREE_TYPE (arg), on_stack, t); + } + + COND_EXPR_THEN (cond1) = unshare_expr (on_stack); + COND_EXPR_THEN (cond2) = unshare_expr (on_stack); + + /* Adjustment to OFFSET in the case of BIG_ENDIAN. */ + t = off; + if (adjust) + t = build2 (PREINCREMENT_EXPR, TREE_TYPE (off), off, + build_int_cst (TREE_TYPE (off), adjust)); + + t = fold_convert (sizetype, t); + t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (f_top), f_top, t); + + if (is_ha) + { + /* type ha; // treat as "struct {ftype field[n];}" + ... [computing offs] + for (i = 0; i <nregs; ++i, offs += 16) + ha.field[i] = *((ftype *)(ap.__vr_top + offs)); + return ha; */ + int i; + tree tmp_ha, field_t, field_ptr_t; + + /* Declare a local variable. */ + tmp_ha = create_tmp_var_raw (type, "ha"); + gimple_add_tmp_var (tmp_ha); + + /* Establish the base type. */ + switch (ag_mode) + { + case SFmode: + field_t = float_type_node; + field_ptr_t = float_ptr_type_node; + break; + case DFmode: + field_t = double_type_node; + field_ptr_t = double_ptr_type_node; + break; + case TFmode: + field_t = long_double_type_node; + field_ptr_t = long_double_ptr_type_node; + break; +/* The half precision and quad precision are not fully supported yet. Enable + the following code after the support is complete. Need to find the correct + type node for __fp16 *. */ +#if 0 + case HFmode: + field_t = float_type_node; + field_ptr_t = float_ptr_type_node; + break; +#endif + case V2SImode: + case V4SImode: + { + tree innertype = make_signed_type (GET_MODE_PRECISION (SImode)); + field_t = build_vector_type_for_mode (innertype, ag_mode); + field_ptr_t = build_pointer_type (field_t); + } + break; + default: + gcc_assert (0); + } + + /* *(field_ptr_t)&ha = *((field_ptr_t)vr_saved_area */ + tmp_ha = build1 (ADDR_EXPR, field_ptr_t, tmp_ha); + addr = t; + t = fold_convert (field_ptr_t, addr); + t = build2 (MODIFY_EXPR, field_t, + build1 (INDIRECT_REF, field_t, tmp_ha), + build1 (INDIRECT_REF, field_t, t)); + + /* ha.field[i] = *((field_ptr_t)vr_saved_area + i) */ + for (i = 1; i < nregs; ++i) + { + addr = fold_build_pointer_plus_hwi (addr, UNITS_PER_VREG); + u = fold_convert (field_ptr_t, addr); + u = build2 (MODIFY_EXPR, field_t, + build2 (MEM_REF, field_t, tmp_ha, + build_int_cst (field_ptr_t, + (i * + int_size_in_bytes (field_t)))), + build1 (INDIRECT_REF, field_t, u)); + t = build2 (COMPOUND_EXPR, TREE_TYPE (t), t, u); + } + + u = fold_convert (TREE_TYPE (f_top), tmp_ha); + t = build2 (COMPOUND_EXPR, TREE_TYPE (f_top), t, u); + } + + COND_EXPR_ELSE (cond2) = t; + addr = fold_convert (build_pointer_type (type), cond1); + addr = build_va_arg_indirect_ref (addr); + + if (indirect_p) + addr = build_va_arg_indirect_ref (addr); + + return addr; +} + +/* Implement TARGET_SETUP_INCOMING_VARARGS. */ + +static void +aarch64_setup_incoming_varargs (cumulative_args_t cum_v, enum machine_mode mode, + tree type, int *pretend_size ATTRIBUTE_UNUSED, + int no_rtl) +{ + CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); + CUMULATIVE_ARGS local_cum; + int gr_saved, vr_saved; + + /* The caller has advanced CUM up to, but not beyond, the last named + argument. Advance a local copy of CUM past the last "real" named + argument, to find out how many registers are left over. */ + local_cum = *cum; + aarch64_function_arg_advance (pack_cumulative_args(&local_cum), mode, type, true); + + /* Found out how many registers we need to save. */ + gr_saved = NUM_ARG_REGS - local_cum.aapcs_ncrn; + vr_saved = NUM_FP_ARG_REGS - local_cum.aapcs_nvrn; + + if (TARGET_GENERAL_REGS_ONLY) + { + if (local_cum.aapcs_nvrn > 0) + sorry ("%qs and floating point or vector arguments", + "-mgeneral-regs-only"); + vr_saved = 0; + } + + if (!no_rtl) + { + if (gr_saved > 0) + { + rtx ptr, mem; + + /* virtual_incoming_args_rtx should have been 16-byte aligned. */ + ptr = plus_constant (Pmode, virtual_incoming_args_rtx, + - gr_saved * UNITS_PER_WORD); + mem = gen_frame_mem (BLKmode, ptr); + set_mem_alias_set (mem, get_varargs_alias_set ()); + + move_block_from_reg (local_cum.aapcs_ncrn + R0_REGNUM, + mem, gr_saved); + } + if (vr_saved > 0) + { + /* We can't use move_block_from_reg, because it will use + the wrong mode, storing D regs only. */ + enum machine_mode mode = TImode; + int off, i; + + /* Set OFF to the offset from virtual_incoming_args_rtx of + the first vector register. The VR save area lies below + the GR one, and is aligned to 16 bytes. */ + off = -AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD, + STACK_BOUNDARY / BITS_PER_UNIT); + off -= vr_saved * UNITS_PER_VREG; + + for (i = local_cum.aapcs_nvrn; i < NUM_FP_ARG_REGS; ++i) + { + rtx ptr, mem; + + ptr = plus_constant (Pmode, virtual_incoming_args_rtx, off); + mem = gen_frame_mem (mode, ptr); + set_mem_alias_set (mem, get_varargs_alias_set ()); + aarch64_emit_move (mem, gen_rtx_REG (mode, V0_REGNUM + i)); + off += UNITS_PER_VREG; + } + } + } + + /* We don't save the size into *PRETEND_SIZE because we want to avoid + any complication of having crtl->args.pretend_args_size changed. */ + cfun->machine->saved_varargs_size + = (AARCH64_ROUND_UP (gr_saved * UNITS_PER_WORD, + STACK_BOUNDARY / BITS_PER_UNIT) + + vr_saved * UNITS_PER_VREG); +} + +static void +aarch64_conditional_register_usage (void) +{ + int i; + if (!TARGET_FLOAT) + { + for (i = V0_REGNUM; i <= V31_REGNUM; i++) + { + fixed_regs[i] = 1; + call_used_regs[i] = 1; + } + } +} + +/* Walk down the type tree of TYPE counting consecutive base elements. + If *MODEP is VOIDmode, then set it to the first valid floating point + type. If a non-floating point type is found, or if a floating point + type that doesn't match a non-VOIDmode *MODEP is found, then return -1, + otherwise return the count in the sub-tree. */ +static int +aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) +{ + enum machine_mode mode; + HOST_WIDE_INT size; + + switch (TREE_CODE (type)) + { + case REAL_TYPE: + mode = TYPE_MODE (type); + if (mode != DFmode && mode != SFmode && mode != TFmode) + return -1; + + if (*modep == VOIDmode) + *modep = mode; + + if (*modep == mode) + return 1; + + break; + + case COMPLEX_TYPE: + mode = TYPE_MODE (TREE_TYPE (type)); + if (mode != DFmode && mode != SFmode && mode != TFmode) + return -1; + + if (*modep == VOIDmode) + *modep = mode; + + if (*modep == mode) + return 2; + + break; + + case VECTOR_TYPE: + /* Use V2SImode and V4SImode as representatives of all 64-bit + and 128-bit vector types. */ + size = int_size_in_bytes (type); + switch (size) + { + case 8: + mode = V2SImode; + break; + case 16: + mode = V4SImode; + break; + default: + return -1; + } + + if (*modep == VOIDmode) + *modep = mode; + + /* Vector modes are considered to be opaque: two vectors are + equivalent for the purposes of being homogeneous aggregates + if they are the same size. */ + if (*modep == mode) + return 1; + + break; + + case ARRAY_TYPE: + { + int count; + tree index = TYPE_DOMAIN (type); + + /* Can't handle incomplete types. */ + if (!COMPLETE_TYPE_P (type)) + return -1; + + count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep); + if (count == -1 + || !index + || !TYPE_MAX_VALUE (index) + || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index)) + || !TYPE_MIN_VALUE (index) + || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index)) + || count < 0) + return -1; + + count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index)) + - tree_to_uhwi (TYPE_MIN_VALUE (index))); + + /* There must be no padding. */ + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type)) + != count * GET_MODE_BITSIZE (*modep))) + return -1; + + return count; + } + + case RECORD_TYPE: + { + int count = 0; + int sub_count; + tree field; + + /* Can't handle incomplete types. */ + if (!COMPLETE_TYPE_P (type)) + return -1; + + for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) + { + if (TREE_CODE (field) != FIELD_DECL) + continue; + + sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep); + if (sub_count < 0) + return -1; + count += sub_count; + } + + /* There must be no padding. */ + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type)) + != count * GET_MODE_BITSIZE (*modep))) + return -1; + + return count; + } + + case UNION_TYPE: + case QUAL_UNION_TYPE: + { + /* These aren't very interesting except in a degenerate case. */ + int count = 0; + int sub_count; + tree field; + + /* Can't handle incomplete types. */ + if (!COMPLETE_TYPE_P (type)) + return -1; + + for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) + { + if (TREE_CODE (field) != FIELD_DECL) + continue; + + sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep); + if (sub_count < 0) + return -1; + count = count > sub_count ? count : sub_count; + } + + /* There must be no padding. */ + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type)) + != count * GET_MODE_BITSIZE (*modep))) + return -1; + + return count; + } + + default: + break; + } + + return -1; +} + +/* Return true if we use LRA instead of reload pass. */ +static bool +aarch64_lra_p (void) +{ + return aarch64_lra_flag; +} + +/* Return TRUE if the type, as described by TYPE and MODE, is a composite + type as described in AAPCS64 \S 4.3. This includes aggregate, union and + array types. The C99 floating-point complex types are also considered + as composite types, according to AAPCS64 \S 7.1.1. The complex integer + types, which are GCC extensions and out of the scope of AAPCS64, are + treated as composite types here as well. + + Note that MODE itself is not sufficient in determining whether a type + is such a composite type or not. This is because + stor-layout.c:compute_record_mode may have already changed the MODE + (BLKmode) of a RECORD_TYPE TYPE to some other mode. For example, a + structure with only one field may have its MODE set to the mode of the + field. Also an integer mode whose size matches the size of the + RECORD_TYPE type may be used to substitute the original mode + (i.e. BLKmode) in certain circumstances. In other words, MODE cannot be + solely relied on. */ + +static bool +aarch64_composite_type_p (const_tree type, + enum machine_mode mode) +{ + if (type && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)) + return true; + + if (mode == BLKmode + || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT + || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT) + return true; + + return false; +} + +/* Return TRUE if the type, as described by TYPE and MODE, is a short vector + type as described in AAPCS64 \S 4.1.2. + + See the comment above aarch64_composite_type_p for the notes on MODE. */ + +static bool +aarch64_short_vector_p (const_tree type, + enum machine_mode mode) +{ + HOST_WIDE_INT size = -1; + + if (type && TREE_CODE (type) == VECTOR_TYPE) + size = int_size_in_bytes (type); + else if (!aarch64_composite_type_p (type, mode) + && (GET_MODE_CLASS (mode) == MODE_VECTOR_INT + || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)) + size = GET_MODE_SIZE (mode); + + return (size == 8 || size == 16) ? true : false; +} + +/* Return TRUE if an argument, whose type is described by TYPE and MODE, + shall be passed or returned in simd/fp register(s) (providing these + parameter passing registers are available). + + Upon successful return, *COUNT returns the number of needed registers, + *BASE_MODE returns the mode of the individual register and when IS_HAF + is not NULL, *IS_HA indicates whether or not the argument is a homogeneous + floating-point aggregate or a homogeneous short-vector aggregate. */ + +static bool +aarch64_vfp_is_call_or_return_candidate (enum machine_mode mode, + const_tree type, + enum machine_mode *base_mode, + int *count, + bool *is_ha) +{ + enum machine_mode new_mode = VOIDmode; + bool composite_p = aarch64_composite_type_p (type, mode); + + if (is_ha != NULL) *is_ha = false; + + if ((!composite_p && GET_MODE_CLASS (mode) == MODE_FLOAT) + || aarch64_short_vector_p (type, mode)) + { + *count = 1; + new_mode = mode; + } + else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT) + { + if (is_ha != NULL) *is_ha = true; + *count = 2; + new_mode = GET_MODE_INNER (mode); + } + else if (type && composite_p) + { + int ag_count = aapcs_vfp_sub_candidate (type, &new_mode); + + if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS) + { + if (is_ha != NULL) *is_ha = true; + *count = ag_count; + } + else + return false; + } + else + return false; + + *base_mode = new_mode; + return true; +} + +/* Implement TARGET_STRUCT_VALUE_RTX. */ + +static rtx +aarch64_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED, + int incoming ATTRIBUTE_UNUSED) +{ + return gen_rtx_REG (Pmode, AARCH64_STRUCT_VALUE_REGNUM); +} + +/* Implements target hook vector_mode_supported_p. */ +static bool +aarch64_vector_mode_supported_p (enum machine_mode mode) +{ + if (TARGET_SIMD + && (mode == V4SImode || mode == V8HImode + || mode == V16QImode || mode == V2DImode + || mode == V2SImode || mode == V4HImode + || mode == V8QImode || mode == V2SFmode + || mode == V4SFmode || mode == V2DFmode)) + return true; + + return false; +} + +/* Return appropriate SIMD container + for MODE within a vector of WIDTH bits. */ +static enum machine_mode +aarch64_simd_container_mode (enum machine_mode mode, unsigned width) +{ + gcc_assert (width == 64 || width == 128); + if (TARGET_SIMD) + { + if (width == 128) + switch (mode) + { + case DFmode: + return V2DFmode; + case SFmode: + return V4SFmode; + case SImode: + return V4SImode; + case HImode: + return V8HImode; + case QImode: + return V16QImode; + case DImode: + return V2DImode; + default: + break; + } + else + switch (mode) + { + case SFmode: + return V2SFmode; + case SImode: + return V2SImode; + case HImode: + return V4HImode; + case QImode: + return V8QImode; + default: + break; + } + } + return word_mode; +} + +/* Return 128-bit container as the preferred SIMD mode for MODE. */ +static enum machine_mode +aarch64_preferred_simd_mode (enum machine_mode mode) +{ + return aarch64_simd_container_mode (mode, 128); +} + +/* Return the bitmask of possible vector sizes for the vectorizer + to iterate over. */ +static unsigned int +aarch64_autovectorize_vector_sizes (void) +{ + return (16 | 8); +} + +/* A table to help perform AArch64-specific name mangling for AdvSIMD + vector types in order to conform to the AAPCS64 (see "Procedure + Call Standard for the ARM 64-bit Architecture", Appendix A). To + qualify for emission with the mangled names defined in that document, + a vector type must not only be of the correct mode but also be + composed of AdvSIMD vector element types (e.g. + _builtin_aarch64_simd_qi); these types are registered by + aarch64_init_simd_builtins (). In other words, vector types defined + in other ways e.g. via vector_size attribute will get default + mangled names. */ +typedef struct +{ + enum machine_mode mode; + const char *element_type_name; + const char *mangled_name; +} aarch64_simd_mangle_map_entry; + +static aarch64_simd_mangle_map_entry aarch64_simd_mangle_map[] = { + /* 64-bit containerized types. */ + { V8QImode, "__builtin_aarch64_simd_qi", "10__Int8x8_t" }, + { V8QImode, "__builtin_aarch64_simd_uqi", "11__Uint8x8_t" }, + { V4HImode, "__builtin_aarch64_simd_hi", "11__Int16x4_t" }, + { V4HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x4_t" }, + { V2SImode, "__builtin_aarch64_simd_si", "11__Int32x2_t" }, + { V2SImode, "__builtin_aarch64_simd_usi", "12__Uint32x2_t" }, + { V2SFmode, "__builtin_aarch64_simd_sf", "13__Float32x2_t" }, + { V8QImode, "__builtin_aarch64_simd_poly8", "11__Poly8x8_t" }, + { V4HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x4_t" }, + /* 128-bit containerized types. */ + { V16QImode, "__builtin_aarch64_simd_qi", "11__Int8x16_t" }, + { V16QImode, "__builtin_aarch64_simd_uqi", "12__Uint8x16_t" }, + { V8HImode, "__builtin_aarch64_simd_hi", "11__Int16x8_t" }, + { V8HImode, "__builtin_aarch64_simd_uhi", "12__Uint16x8_t" }, + { V4SImode, "__builtin_aarch64_simd_si", "11__Int32x4_t" }, + { V4SImode, "__builtin_aarch64_simd_usi", "12__Uint32x4_t" }, + { V2DImode, "__builtin_aarch64_simd_di", "11__Int64x2_t" }, + { V2DImode, "__builtin_aarch64_simd_udi", "12__Uint64x2_t" }, + { V4SFmode, "__builtin_aarch64_simd_sf", "13__Float32x4_t" }, + { V2DFmode, "__builtin_aarch64_simd_df", "13__Float64x2_t" }, + { V16QImode, "__builtin_aarch64_simd_poly8", "12__Poly8x16_t" }, + { V8HImode, "__builtin_aarch64_simd_poly16", "12__Poly16x8_t" }, + { V2DImode, "__builtin_aarch64_simd_poly64", "12__Poly64x2_t" }, + { VOIDmode, NULL, NULL } +}; + +/* Implement TARGET_MANGLE_TYPE. */ + +static const char * +aarch64_mangle_type (const_tree type) +{ + /* The AArch64 ABI documents say that "__va_list" has to be + managled as if it is in the "std" namespace. */ + if (lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type)) + return "St9__va_list"; + + /* Check the mode of the vector type, and the name of the vector + element type, against the table. */ + if (TREE_CODE (type) == VECTOR_TYPE) + { + aarch64_simd_mangle_map_entry *pos = aarch64_simd_mangle_map; + + while (pos->mode != VOIDmode) + { + tree elt_type = TREE_TYPE (type); + + if (pos->mode == TYPE_MODE (type) + && TREE_CODE (TYPE_NAME (elt_type)) == TYPE_DECL + && !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (elt_type))), + pos->element_type_name)) + return pos->mangled_name; + + pos++; + } + } + + /* Use the default mangling. */ + return NULL; +} + +/* Return the equivalent letter for size. */ +static char +sizetochar (int size) +{ + switch (size) + { + case 64: return 'd'; + case 32: return 's'; + case 16: return 'h'; + case 8 : return 'b'; + default: gcc_unreachable (); + } +} + +/* Return true iff x is a uniform vector of floating-point + constants, and the constant can be represented in + quarter-precision form. Note, as aarch64_float_const_representable + rejects both +0.0 and -0.0, we will also reject +0.0 and -0.0. */ +static bool +aarch64_vect_float_const_representable_p (rtx x) +{ + int i = 0; + REAL_VALUE_TYPE r0, ri; + rtx x0, xi; + + if (GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_FLOAT) + return false; + + x0 = CONST_VECTOR_ELT (x, 0); + if (!CONST_DOUBLE_P (x0)) + return false; + + REAL_VALUE_FROM_CONST_DOUBLE (r0, x0); + + for (i = 1; i < CONST_VECTOR_NUNITS (x); i++) + { + xi = CONST_VECTOR_ELT (x, i); + if (!CONST_DOUBLE_P (xi)) + return false; + + REAL_VALUE_FROM_CONST_DOUBLE (ri, xi); + if (!REAL_VALUES_EQUAL (r0, ri)) + return false; + } + + return aarch64_float_const_representable_p (x0); +} + +/* Return true for valid and false for invalid. */ +bool +aarch64_simd_valid_immediate (rtx op, enum machine_mode mode, bool inverse, + struct simd_immediate_info *info) +{ +#define CHECK(STRIDE, ELSIZE, CLASS, TEST, SHIFT, NEG) \ + matches = 1; \ + for (i = 0; i < idx; i += (STRIDE)) \ + if (!(TEST)) \ + matches = 0; \ + if (matches) \ + { \ + immtype = (CLASS); \ + elsize = (ELSIZE); \ + eshift = (SHIFT); \ + emvn = (NEG); \ + break; \ + } + + unsigned int i, elsize = 0, idx = 0, n_elts = CONST_VECTOR_NUNITS (op); + unsigned int innersize = GET_MODE_SIZE (GET_MODE_INNER (mode)); + unsigned char bytes[16]; + int immtype = -1, matches; + unsigned int invmask = inverse ? 0xff : 0; + int eshift, emvn; + + if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT) + { + if (! (aarch64_simd_imm_zero_p (op, mode) + || aarch64_vect_float_const_representable_p (op))) + return false; + + if (info) + { + info->value = CONST_VECTOR_ELT (op, 0); + info->element_width = GET_MODE_BITSIZE (GET_MODE (info->value)); + info->mvn = false; + info->shift = 0; + } + + return true; + } + + /* Splat vector constant out into a byte vector. */ + for (i = 0; i < n_elts; i++) + { + rtx el = CONST_VECTOR_ELT (op, i); + unsigned HOST_WIDE_INT elpart; + unsigned int part, parts; + + if (GET_CODE (el) == CONST_INT) + { + elpart = INTVAL (el); + parts = 1; + } + else if (GET_CODE (el) == CONST_DOUBLE) + { + elpart = CONST_DOUBLE_LOW (el); + parts = 2; + } + else + gcc_unreachable (); + + for (part = 0; part < parts; part++) + { + unsigned int byte; + for (byte = 0; byte < innersize; byte++) + { + bytes[idx++] = (elpart & 0xff) ^ invmask; + elpart >>= BITS_PER_UNIT; + } + if (GET_CODE (el) == CONST_DOUBLE) + elpart = CONST_DOUBLE_HIGH (el); + } + } + + /* Sanity check. */ + gcc_assert (idx == GET_MODE_SIZE (mode)); + + do + { + CHECK (4, 32, 0, bytes[i] == bytes[0] && bytes[i + 1] == 0 + && bytes[i + 2] == 0 && bytes[i + 3] == 0, 0, 0); + + CHECK (4, 32, 1, bytes[i] == 0 && bytes[i + 1] == bytes[1] + && bytes[i + 2] == 0 && bytes[i + 3] == 0, 8, 0); + + CHECK (4, 32, 2, bytes[i] == 0 && bytes[i + 1] == 0 + && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 16, 0); + + CHECK (4, 32, 3, bytes[i] == 0 && bytes[i + 1] == 0 + && bytes[i + 2] == 0 && bytes[i + 3] == bytes[3], 24, 0); + + CHECK (2, 16, 4, bytes[i] == bytes[0] && bytes[i + 1] == 0, 0, 0); + + CHECK (2, 16, 5, bytes[i] == 0 && bytes[i + 1] == bytes[1], 8, 0); + + CHECK (4, 32, 6, bytes[i] == bytes[0] && bytes[i + 1] == 0xff + && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 0, 1); + + CHECK (4, 32, 7, bytes[i] == 0xff && bytes[i + 1] == bytes[1] + && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 8, 1); + + CHECK (4, 32, 8, bytes[i] == 0xff && bytes[i + 1] == 0xff + && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 16, 1); + + CHECK (4, 32, 9, bytes[i] == 0xff && bytes[i + 1] == 0xff + && bytes[i + 2] == 0xff && bytes[i + 3] == bytes[3], 24, 1); + + CHECK (2, 16, 10, bytes[i] == bytes[0] && bytes[i + 1] == 0xff, 0, 1); + + CHECK (2, 16, 11, bytes[i] == 0xff && bytes[i + 1] == bytes[1], 8, 1); + + CHECK (4, 32, 12, bytes[i] == 0xff && bytes[i + 1] == bytes[1] + && bytes[i + 2] == 0 && bytes[i + 3] == 0, 8, 0); + + CHECK (4, 32, 13, bytes[i] == 0 && bytes[i + 1] == bytes[1] + && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff, 8, 1); + + CHECK (4, 32, 14, bytes[i] == 0xff && bytes[i + 1] == 0xff + && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0, 16, 0); + + CHECK (4, 32, 15, bytes[i] == 0 && bytes[i + 1] == 0 + && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff, 16, 1); + + CHECK (1, 8, 16, bytes[i] == bytes[0], 0, 0); + + CHECK (1, 64, 17, (bytes[i] == 0 || bytes[i] == 0xff) + && bytes[i] == bytes[(i + 8) % idx], 0, 0); + } + while (0); + + if (immtype == -1) + return false; + + if (info) + { + info->element_width = elsize; + info->mvn = emvn != 0; + info->shift = eshift; + + unsigned HOST_WIDE_INT imm = 0; + + if (immtype >= 12 && immtype <= 15) + info->msl = true; + + /* Un-invert bytes of recognized vector, if necessary. */ + if (invmask != 0) + for (i = 0; i < idx; i++) + bytes[i] ^= invmask; + + if (immtype == 17) + { + /* FIXME: Broken on 32-bit H_W_I hosts. */ + gcc_assert (sizeof (HOST_WIDE_INT) == 8); + + for (i = 0; i < 8; i++) + imm |= (unsigned HOST_WIDE_INT) (bytes[i] ? 0xff : 0) + << (i * BITS_PER_UNIT); + + + info->value = GEN_INT (imm); + } + else + { + for (i = 0; i < elsize / BITS_PER_UNIT; i++) + imm |= (unsigned HOST_WIDE_INT) bytes[i] << (i * BITS_PER_UNIT); + + /* Construct 'abcdefgh' because the assembler cannot handle + generic constants. */ + if (info->mvn) + imm = ~imm; + imm = (imm >> info->shift) & 0xff; + info->value = GEN_INT (imm); + } + } + + return true; +#undef CHECK +} + +static bool +aarch64_const_vec_all_same_int_p (rtx x, + HOST_WIDE_INT minval, + HOST_WIDE_INT maxval) +{ + HOST_WIDE_INT firstval; + int count, i; + + if (GET_CODE (x) != CONST_VECTOR + || GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_INT) + return false; + + firstval = INTVAL (CONST_VECTOR_ELT (x, 0)); + if (firstval < minval || firstval > maxval) + return false; + + count = CONST_VECTOR_NUNITS (x); + for (i = 1; i < count; i++) + if (INTVAL (CONST_VECTOR_ELT (x, i)) != firstval) + return false; + + return true; +} + +/* Check of immediate shift constants are within range. */ +bool +aarch64_simd_shift_imm_p (rtx x, enum machine_mode mode, bool left) +{ + int bit_width = GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT; + if (left) + return aarch64_const_vec_all_same_int_p (x, 0, bit_width - 1); + else + return aarch64_const_vec_all_same_int_p (x, 1, bit_width); +} + +/* Return true if X is a uniform vector where all elements + are either the floating-point constant 0.0 or the + integer constant 0. */ +bool +aarch64_simd_imm_zero_p (rtx x, enum machine_mode mode) +{ + return x == CONST0_RTX (mode); +} + +bool +aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED) +{ + HOST_WIDE_INT imm = INTVAL (x); + int i; + + for (i = 0; i < 8; i++) + { + unsigned int byte = imm & 0xff; + if (byte != 0xff && byte != 0) + return false; + imm >>= 8; + } + + return true; +} + +bool +aarch64_mov_operand_p (rtx x, + enum aarch64_symbol_context context, + enum machine_mode mode) +{ + if (GET_CODE (x) == HIGH + && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0)))) + return true; + + if (CONST_INT_P (x) && aarch64_move_imm (INTVAL (x), mode)) + return true; + + if (GET_CODE (x) == SYMBOL_REF && mode == DImode && CONSTANT_ADDRESS_P (x)) + return true; + + return aarch64_classify_symbolic_expression (x, context) + == SYMBOL_TINY_ABSOLUTE; +} + +/* Return a const_int vector of VAL. */ +rtx +aarch64_simd_gen_const_vector_dup (enum machine_mode mode, int val) +{ + int nunits = GET_MODE_NUNITS (mode); + rtvec v = rtvec_alloc (nunits); + int i; + + for (i=0; i < nunits; i++) + RTVEC_ELT (v, i) = GEN_INT (val); + + return gen_rtx_CONST_VECTOR (mode, v); +} + +/* Check OP is a legal scalar immediate for the MOVI instruction. */ + +bool +aarch64_simd_scalar_immediate_valid_for_move (rtx op, enum machine_mode mode) +{ + enum machine_mode vmode; + + gcc_assert (!VECTOR_MODE_P (mode)); + vmode = aarch64_preferred_simd_mode (mode); + rtx op_v = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (op)); + return aarch64_simd_valid_immediate (op_v, vmode, false, NULL); +} + +/* Construct and return a PARALLEL RTX vector. */ +rtx +aarch64_simd_vect_par_cnst_half (enum machine_mode mode, bool high) +{ + int nunits = GET_MODE_NUNITS (mode); + rtvec v = rtvec_alloc (nunits / 2); + int base = high ? nunits / 2 : 0; + rtx t1; + int i; + + for (i=0; i < nunits / 2; i++) + RTVEC_ELT (v, i) = GEN_INT (base + i); + + t1 = gen_rtx_PARALLEL (mode, v); + return t1; +} + +/* Bounds-check lanes. Ensure OPERAND lies between LOW (inclusive) and + HIGH (exclusive). */ +void +aarch64_simd_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high) +{ + HOST_WIDE_INT lane; + gcc_assert (GET_CODE (operand) == CONST_INT); + lane = INTVAL (operand); + + if (lane < low || lane >= high) + error ("lane out of range"); +} + +void +aarch64_simd_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high) +{ + gcc_assert (GET_CODE (operand) == CONST_INT); + HOST_WIDE_INT lane = INTVAL (operand); + + if (lane < low || lane >= high) + error ("constant out of range"); +} + +/* Emit code to reinterpret one AdvSIMD type as another, + without altering bits. */ +void +aarch64_simd_reinterpret (rtx dest, rtx src) +{ + emit_move_insn (dest, gen_lowpart (GET_MODE (dest), src)); +} + +/* Emit code to place a AdvSIMD pair result in memory locations (with equal + registers). */ +void +aarch64_simd_emit_pair_result_insn (enum machine_mode mode, + rtx (*intfn) (rtx, rtx, rtx), rtx destaddr, + rtx op1) +{ + rtx mem = gen_rtx_MEM (mode, destaddr); + rtx tmp1 = gen_reg_rtx (mode); + rtx tmp2 = gen_reg_rtx (mode); + + emit_insn (intfn (tmp1, op1, tmp2)); + + emit_move_insn (mem, tmp1); + mem = adjust_address (mem, mode, GET_MODE_SIZE (mode)); + emit_move_insn (mem, tmp2); +} + +/* Return TRUE if OP is a valid vector addressing mode. */ +bool +aarch64_simd_mem_operand_p (rtx op) +{ + return MEM_P (op) && (GET_CODE (XEXP (op, 0)) == POST_INC + || GET_CODE (XEXP (op, 0)) == REG); +} + +/* Set up OPERANDS for a register copy from SRC to DEST, taking care + not to early-clobber SRC registers in the process. + + We assume that the operands described by SRC and DEST represent a + decomposed copy of OPERANDS[1] into OPERANDS[0]. COUNT is the + number of components into which the copy has been decomposed. */ +void +aarch64_simd_disambiguate_copy (rtx *operands, rtx *dest, + rtx *src, unsigned int count) +{ + unsigned int i; + + if (!reg_overlap_mentioned_p (operands[0], operands[1]) + || REGNO (operands[0]) < REGNO (operands[1])) + { + for (i = 0; i < count; i++) + { + operands[2 * i] = dest[i]; + operands[2 * i + 1] = src[i]; + } + } + else + { + for (i = 0; i < count; i++) + { + operands[2 * i] = dest[count - i - 1]; + operands[2 * i + 1] = src[count - i - 1]; + } + } +} + +/* Compute and return the length of aarch64_simd_mov<mode>, where <mode> is + one of VSTRUCT modes: OI, CI or XI. */ +int +aarch64_simd_attr_length_move (rtx insn) +{ + enum machine_mode mode; + + extract_insn_cached (insn); + + if (REG_P (recog_data.operand[0]) && REG_P (recog_data.operand[1])) + { + mode = GET_MODE (recog_data.operand[0]); + switch (mode) + { + case OImode: + return 8; + case CImode: + return 12; + case XImode: + return 16; + default: + gcc_unreachable (); + } + } + return 4; +} + +/* Implement target hook TARGET_VECTOR_ALIGNMENT. The AAPCS64 sets the maximum + alignment of a vector to 128 bits. */ +static HOST_WIDE_INT +aarch64_simd_vector_alignment (const_tree type) +{ + HOST_WIDE_INT align = tree_to_shwi (TYPE_SIZE (type)); + return MIN (align, 128); +} + +/* Implement target hook TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE. */ +static bool +aarch64_simd_vector_alignment_reachable (const_tree type, bool is_packed) +{ + if (is_packed) + return false; + + /* We guarantee alignment for vectors up to 128-bits. */ + if (tree_int_cst_compare (TYPE_SIZE (type), + bitsize_int (BIGGEST_ALIGNMENT)) > 0) + return false; + + /* Vectors whose size is <= BIGGEST_ALIGNMENT are naturally aligned. */ + return true; +} + +/* If VALS is a vector constant that can be loaded into a register + using DUP, generate instructions to do so and return an RTX to + assign to the register. Otherwise return NULL_RTX. */ +static rtx +aarch64_simd_dup_constant (rtx vals) +{ + enum machine_mode mode = GET_MODE (vals); + enum machine_mode inner_mode = GET_MODE_INNER (mode); + int n_elts = GET_MODE_NUNITS (mode); + bool all_same = true; + rtx x; + int i; + + if (GET_CODE (vals) != CONST_VECTOR) + return NULL_RTX; + + for (i = 1; i < n_elts; ++i) + { + x = CONST_VECTOR_ELT (vals, i); + if (!rtx_equal_p (x, CONST_VECTOR_ELT (vals, 0))) + all_same = false; + } + + if (!all_same) + return NULL_RTX; + + /* We can load this constant by using DUP and a constant in a + single ARM register. This will be cheaper than a vector + load. */ + x = copy_to_mode_reg (inner_mode, CONST_VECTOR_ELT (vals, 0)); + return gen_rtx_VEC_DUPLICATE (mode, x); +} + + +/* Generate code to load VALS, which is a PARALLEL containing only + constants (for vec_init) or CONST_VECTOR, efficiently into a + register. Returns an RTX to copy into the register, or NULL_RTX + for a PARALLEL that can not be converted into a CONST_VECTOR. */ +static rtx +aarch64_simd_make_constant (rtx vals) +{ + enum machine_mode mode = GET_MODE (vals); + rtx const_dup; + rtx const_vec = NULL_RTX; + int n_elts = GET_MODE_NUNITS (mode); + int n_const = 0; + int i; + + if (GET_CODE (vals) == CONST_VECTOR) + const_vec = vals; + else if (GET_CODE (vals) == PARALLEL) + { + /* A CONST_VECTOR must contain only CONST_INTs and + CONST_DOUBLEs, but CONSTANT_P allows more (e.g. SYMBOL_REF). + Only store valid constants in a CONST_VECTOR. */ + for (i = 0; i < n_elts; ++i) + { + rtx x = XVECEXP (vals, 0, i); + if (CONST_INT_P (x) || CONST_DOUBLE_P (x)) + n_const++; + } + if (n_const == n_elts) + const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)); + } + else + gcc_unreachable (); + + if (const_vec != NULL_RTX + && aarch64_simd_valid_immediate (const_vec, mode, false, NULL)) + /* Load using MOVI/MVNI. */ + return const_vec; + else if ((const_dup = aarch64_simd_dup_constant (vals)) != NULL_RTX) + /* Loaded using DUP. */ + return const_dup; + else if (const_vec != NULL_RTX) + /* Load from constant pool. We can not take advantage of single-cycle + LD1 because we need a PC-relative addressing mode. */ + return const_vec; + else + /* A PARALLEL containing something not valid inside CONST_VECTOR. + We can not construct an initializer. */ + return NULL_RTX; +} + +void +aarch64_expand_vector_init (rtx target, rtx vals) +{ + enum machine_mode mode = GET_MODE (target); + enum machine_mode inner_mode = GET_MODE_INNER (mode); + int n_elts = GET_MODE_NUNITS (mode); + int n_var = 0, one_var = -1; + bool all_same = true; + rtx x, mem; + int i; + + x = XVECEXP (vals, 0, 0); + if (!CONST_INT_P (x) && !CONST_DOUBLE_P (x)) + n_var = 1, one_var = 0; + + for (i = 1; i < n_elts; ++i) + { + x = XVECEXP (vals, 0, i); + if (!CONST_INT_P (x) && !CONST_DOUBLE_P (x)) + ++n_var, one_var = i; + + if (!rtx_equal_p (x, XVECEXP (vals, 0, 0))) + all_same = false; + } + + if (n_var == 0) + { + rtx constant = aarch64_simd_make_constant (vals); + if (constant != NULL_RTX) + { + emit_move_insn (target, constant); + return; + } + } + + /* Splat a single non-constant element if we can. */ + if (all_same) + { + x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, 0)); + aarch64_emit_move (target, gen_rtx_VEC_DUPLICATE (mode, x)); + return; + } + + /* One field is non-constant. Load constant then overwrite varying + field. This is more efficient than using the stack. */ + if (n_var == 1) + { + rtx copy = copy_rtx (vals); + rtx index = GEN_INT (one_var); + enum insn_code icode; + + /* Load constant part of vector, substitute neighboring value for + varying element. */ + XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, one_var ^ 1); + aarch64_expand_vector_init (target, copy); + + /* Insert variable. */ + x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, one_var)); + icode = optab_handler (vec_set_optab, mode); + gcc_assert (icode != CODE_FOR_nothing); + emit_insn (GEN_FCN (icode) (target, x, index)); + return; + } + + /* Construct the vector in memory one field at a time + and load the whole vector. */ + mem = assign_stack_temp (mode, GET_MODE_SIZE (mode)); + for (i = 0; i < n_elts; i++) + emit_move_insn (adjust_address_nv (mem, inner_mode, + i * GET_MODE_SIZE (inner_mode)), + XVECEXP (vals, 0, i)); + emit_move_insn (target, mem); + +} + +static unsigned HOST_WIDE_INT +aarch64_shift_truncation_mask (enum machine_mode mode) +{ + return + (aarch64_vector_mode_supported_p (mode) + || aarch64_vect_struct_mode_p (mode)) ? 0 : (GET_MODE_BITSIZE (mode) - 1); +} + +#ifndef TLS_SECTION_ASM_FLAG +#define TLS_SECTION_ASM_FLAG 'T' +#endif + +void +aarch64_elf_asm_named_section (const char *name, unsigned int flags, + tree decl ATTRIBUTE_UNUSED) +{ + char flagchars[10], *f = flagchars; + + /* If we have already declared this section, we can use an + abbreviated form to switch back to it -- unless this section is + part of a COMDAT groups, in which case GAS requires the full + declaration every time. */ + if (!(HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE)) + && (flags & SECTION_DECLARED)) + { + fprintf (asm_out_file, "\t.section\t%s\n", name); + return; + } + + if (!(flags & SECTION_DEBUG)) + *f++ = 'a'; + if (flags & SECTION_WRITE) + *f++ = 'w'; + if (flags & SECTION_CODE) + *f++ = 'x'; + if (flags & SECTION_SMALL) + *f++ = 's'; + if (flags & SECTION_MERGE) + *f++ = 'M'; + if (flags & SECTION_STRINGS) + *f++ = 'S'; + if (flags & SECTION_TLS) + *f++ = TLS_SECTION_ASM_FLAG; + if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE)) + *f++ = 'G'; + *f = '\0'; + + fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars); + + if (!(flags & SECTION_NOTYPE)) + { + const char *type; + const char *format; + + if (flags & SECTION_BSS) + type = "nobits"; + else + type = "progbits"; + +#ifdef TYPE_OPERAND_FMT + format = "," TYPE_OPERAND_FMT; +#else + format = ",@%s"; +#endif + + fprintf (asm_out_file, format, type); + + if (flags & SECTION_ENTSIZE) + fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE); + if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE)) + { + if (TREE_CODE (decl) == IDENTIFIER_NODE) + fprintf (asm_out_file, ",%s,comdat", IDENTIFIER_POINTER (decl)); + else + fprintf (asm_out_file, ",%s,comdat", + IDENTIFIER_POINTER (DECL_COMDAT_GROUP (decl))); + } + } + + putc ('\n', asm_out_file); +} + +/* Select a format to encode pointers in exception handling data. */ +int +aarch64_asm_preferred_eh_data_format (int code ATTRIBUTE_UNUSED, int global) +{ + int type; + switch (aarch64_cmodel) + { + case AARCH64_CMODEL_TINY: + case AARCH64_CMODEL_TINY_PIC: + case AARCH64_CMODEL_SMALL: + case AARCH64_CMODEL_SMALL_PIC: + /* text+got+data < 4Gb. 4-byte signed relocs are sufficient + for everything. */ + type = DW_EH_PE_sdata4; + break; + default: + /* No assumptions here. 8-byte relocs required. */ + type = DW_EH_PE_sdata8; + break; + } + return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type; +} + +/* Emit load exclusive. */ + +static void +aarch64_emit_load_exclusive (enum machine_mode mode, rtx rval, + rtx mem, rtx model_rtx) +{ + rtx (*gen) (rtx, rtx, rtx); + + switch (mode) + { + case QImode: gen = gen_aarch64_load_exclusiveqi; break; + case HImode: gen = gen_aarch64_load_exclusivehi; break; + case SImode: gen = gen_aarch64_load_exclusivesi; break; + case DImode: gen = gen_aarch64_load_exclusivedi; break; + default: + gcc_unreachable (); + } + + emit_insn (gen (rval, mem, model_rtx)); +} + +/* Emit store exclusive. */ + +static void +aarch64_emit_store_exclusive (enum machine_mode mode, rtx bval, + rtx rval, rtx mem, rtx model_rtx) +{ + rtx (*gen) (rtx, rtx, rtx, rtx); + + switch (mode) + { + case QImode: gen = gen_aarch64_store_exclusiveqi; break; + case HImode: gen = gen_aarch64_store_exclusivehi; break; + case SImode: gen = gen_aarch64_store_exclusivesi; break; + case DImode: gen = gen_aarch64_store_exclusivedi; break; + default: + gcc_unreachable (); + } + + emit_insn (gen (bval, rval, mem, model_rtx)); +} + +/* Mark the previous jump instruction as unlikely. */ + +static void +aarch64_emit_unlikely_jump (rtx insn) +{ + int very_unlikely = REG_BR_PROB_BASE / 100 - 1; + + insn = emit_jump_insn (insn); + add_int_reg_note (insn, REG_BR_PROB, very_unlikely); +} + +/* Expand a compare and swap pattern. */ + +void +aarch64_expand_compare_and_swap (rtx operands[]) +{ + rtx bval, rval, mem, oldval, newval, is_weak, mod_s, mod_f, x; + enum machine_mode mode, cmp_mode; + rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx); + + bval = operands[0]; + rval = operands[1]; + mem = operands[2]; + oldval = operands[3]; + newval = operands[4]; + is_weak = operands[5]; + mod_s = operands[6]; + mod_f = operands[7]; + mode = GET_MODE (mem); + cmp_mode = mode; + + /* Normally the succ memory model must be stronger than fail, but in the + unlikely event of fail being ACQUIRE and succ being RELEASE we need to + promote succ to ACQ_REL so that we don't lose the acquire semantics. */ + + if (INTVAL (mod_f) == MEMMODEL_ACQUIRE + && INTVAL (mod_s) == MEMMODEL_RELEASE) + mod_s = GEN_INT (MEMMODEL_ACQ_REL); + + switch (mode) + { + case QImode: + case HImode: + /* For short modes, we're going to perform the comparison in SImode, + so do the zero-extension now. */ + cmp_mode = SImode; + rval = gen_reg_rtx (SImode); + oldval = convert_modes (SImode, mode, oldval, true); + /* Fall through. */ + + case SImode: + case DImode: + /* Force the value into a register if needed. */ + if (!aarch64_plus_operand (oldval, mode)) + oldval = force_reg (cmp_mode, oldval); + break; + + default: + gcc_unreachable (); + } + + switch (mode) + { + case QImode: gen = gen_atomic_compare_and_swapqi_1; break; + case HImode: gen = gen_atomic_compare_and_swaphi_1; break; + case SImode: gen = gen_atomic_compare_and_swapsi_1; break; + case DImode: gen = gen_atomic_compare_and_swapdi_1; break; + default: + gcc_unreachable (); + } + + emit_insn (gen (rval, mem, oldval, newval, is_weak, mod_s, mod_f)); + + if (mode == QImode || mode == HImode) + emit_move_insn (operands[1], gen_lowpart (mode, rval)); + + x = gen_rtx_REG (CCmode, CC_REGNUM); + x = gen_rtx_EQ (SImode, x, const0_rtx); + emit_insn (gen_rtx_SET (VOIDmode, bval, x)); +} + +/* Split a compare and swap pattern. */ + +void +aarch64_split_compare_and_swap (rtx operands[]) +{ + rtx rval, mem, oldval, newval, scratch; + enum machine_mode mode; + bool is_weak; + rtx label1, label2, x, cond; + + rval = operands[0]; + mem = operands[1]; + oldval = operands[2]; + newval = operands[3]; + is_weak = (operands[4] != const0_rtx); + scratch = operands[7]; + mode = GET_MODE (mem); + + label1 = NULL_RTX; + if (!is_weak) + { + label1 = gen_label_rtx (); + emit_label (label1); + } + label2 = gen_label_rtx (); + + aarch64_emit_load_exclusive (mode, rval, mem, operands[5]); + + cond = aarch64_gen_compare_reg (NE, rval, oldval); + x = gen_rtx_NE (VOIDmode, cond, const0_rtx); + x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, + gen_rtx_LABEL_REF (Pmode, label2), pc_rtx); + aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x)); + + aarch64_emit_store_exclusive (mode, scratch, mem, newval, operands[5]); + + if (!is_weak) + { + x = gen_rtx_NE (VOIDmode, scratch, const0_rtx); + x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, + gen_rtx_LABEL_REF (Pmode, label1), pc_rtx); + aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x)); + } + else + { + cond = gen_rtx_REG (CCmode, CC_REGNUM); + x = gen_rtx_COMPARE (CCmode, scratch, const0_rtx); + emit_insn (gen_rtx_SET (VOIDmode, cond, x)); + } + + emit_label (label2); +} + +/* Split an atomic operation. */ + +void +aarch64_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem, + rtx value, rtx model_rtx, rtx cond) +{ + enum machine_mode mode = GET_MODE (mem); + enum machine_mode wmode = (mode == DImode ? DImode : SImode); + rtx label, x; + + label = gen_label_rtx (); + emit_label (label); + + if (new_out) + new_out = gen_lowpart (wmode, new_out); + if (old_out) + old_out = gen_lowpart (wmode, old_out); + else + old_out = new_out; + value = simplify_gen_subreg (wmode, value, mode, 0); + + aarch64_emit_load_exclusive (mode, old_out, mem, model_rtx); + + switch (code) + { + case SET: + new_out = value; + break; + + case NOT: + x = gen_rtx_AND (wmode, old_out, value); + emit_insn (gen_rtx_SET (VOIDmode, new_out, x)); + x = gen_rtx_NOT (wmode, new_out); + emit_insn (gen_rtx_SET (VOIDmode, new_out, x)); + break; + + case MINUS: + if (CONST_INT_P (value)) + { + value = GEN_INT (-INTVAL (value)); + code = PLUS; + } + /* Fall through. */ + + default: + x = gen_rtx_fmt_ee (code, wmode, old_out, value); + emit_insn (gen_rtx_SET (VOIDmode, new_out, x)); + break; + } + + aarch64_emit_store_exclusive (mode, cond, mem, + gen_lowpart (mode, new_out), model_rtx); + + x = gen_rtx_NE (VOIDmode, cond, const0_rtx); + x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, + gen_rtx_LABEL_REF (Pmode, label), pc_rtx); + aarch64_emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x)); +} + +static void +aarch64_print_extension (void) +{ + const struct aarch64_option_extension *opt = NULL; + + for (opt = all_extensions; opt->name != NULL; opt++) + if ((aarch64_isa_flags & opt->flags_on) == opt->flags_on) + asm_fprintf (asm_out_file, "+%s", opt->name); + + asm_fprintf (asm_out_file, "\n"); +} + +static void +aarch64_start_file (void) +{ + if (selected_arch) + { + asm_fprintf (asm_out_file, "\t.arch %s", selected_arch->name); + aarch64_print_extension (); + } + else if (selected_cpu) + { + const char *truncated_name + = aarch64_rewrite_selected_cpu (selected_cpu->name); + asm_fprintf (asm_out_file, "\t.cpu %s", truncated_name); + aarch64_print_extension (); + } + default_file_start(); +} + +/* Target hook for c_mode_for_suffix. */ +static enum machine_mode +aarch64_c_mode_for_suffix (char suffix) +{ + if (suffix == 'q') + return TFmode; + + return VOIDmode; +} + +/* We can only represent floating point constants which will fit in + "quarter-precision" values. These values are characterised by + a sign bit, a 4-bit mantissa and a 3-bit exponent. And are given + by: + + (-1)^s * (n/16) * 2^r + + Where: + 's' is the sign bit. + 'n' is an integer in the range 16 <= n <= 31. + 'r' is an integer in the range -3 <= r <= 4. */ + +/* Return true iff X can be represented by a quarter-precision + floating point immediate operand X. Note, we cannot represent 0.0. */ +bool +aarch64_float_const_representable_p (rtx x) +{ + /* This represents our current view of how many bits + make up the mantissa. */ + int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1; + int exponent; + unsigned HOST_WIDE_INT mantissa, mask; + HOST_WIDE_INT m1, m2; + REAL_VALUE_TYPE r, m; + + if (!CONST_DOUBLE_P (x)) + return false; + + REAL_VALUE_FROM_CONST_DOUBLE (r, x); + + /* We cannot represent infinities, NaNs or +/-zero. We won't + know if we have +zero until we analyse the mantissa, but we + can reject the other invalid values. */ + if (REAL_VALUE_ISINF (r) || REAL_VALUE_ISNAN (r) + || REAL_VALUE_MINUS_ZERO (r)) + return false; + + /* Extract exponent. */ + r = real_value_abs (&r); + exponent = REAL_EXP (&r); + + /* For the mantissa, we expand into two HOST_WIDE_INTS, apart from the + highest (sign) bit, with a fixed binary point at bit point_pos. + m1 holds the low part of the mantissa, m2 the high part. + WARNING: If we ever have a representation using more than 2 * H_W_I - 1 + bits for the mantissa, this can fail (low bits will be lost). */ + real_ldexp (&m, &r, point_pos - exponent); + REAL_VALUE_TO_INT (&m1, &m2, m); + + /* If the low part of the mantissa has bits set we cannot represent + the value. */ + if (m1 != 0) + return false; + /* We have rejected the lower HOST_WIDE_INT, so update our + understanding of how many bits lie in the mantissa and + look only at the high HOST_WIDE_INT. */ + mantissa = m2; + point_pos -= HOST_BITS_PER_WIDE_INT; + + /* We can only represent values with a mantissa of the form 1.xxxx. */ + mask = ((unsigned HOST_WIDE_INT)1 << (point_pos - 5)) - 1; + if ((mantissa & mask) != 0) + return false; + + /* Having filtered unrepresentable values, we may now remove all + but the highest 5 bits. */ + mantissa >>= point_pos - 5; + + /* We cannot represent the value 0.0, so reject it. This is handled + elsewhere. */ + if (mantissa == 0) + return false; + + /* Then, as bit 4 is always set, we can mask it off, leaving + the mantissa in the range [0, 15]. */ + mantissa &= ~(1 << 4); + gcc_assert (mantissa <= 15); + + /* GCC internally does not use IEEE754-like encoding (where normalized + significands are in the range [1, 2). GCC uses [0.5, 1) (see real.c). + Our mantissa values are shifted 4 places to the left relative to + normalized IEEE754 so we must modify the exponent returned by REAL_EXP + by 5 places to correct for GCC's representation. */ + exponent = 5 - exponent; + + return (exponent >= 0 && exponent <= 7); +} + +char* +aarch64_output_simd_mov_immediate (rtx const_vector, + enum machine_mode mode, + unsigned width) +{ + bool is_valid; + static char templ[40]; + const char *mnemonic; + const char *shift_op; + unsigned int lane_count = 0; + char element_char; + + struct simd_immediate_info info = { NULL_RTX, 0, 0, false, false }; + + /* This will return true to show const_vector is legal for use as either + a AdvSIMD MOVI instruction (or, implicitly, MVNI) immediate. It will + also update INFO to show how the immediate should be generated. */ + is_valid = aarch64_simd_valid_immediate (const_vector, mode, false, &info); + gcc_assert (is_valid); + + element_char = sizetochar (info.element_width); + lane_count = width / info.element_width; + + mode = GET_MODE_INNER (mode); + if (mode == SFmode || mode == DFmode) + { + gcc_assert (info.shift == 0 && ! info.mvn); + if (aarch64_float_const_zero_rtx_p (info.value)) + info.value = GEN_INT (0); + else + { +#define buf_size 20 + REAL_VALUE_TYPE r; + REAL_VALUE_FROM_CONST_DOUBLE (r, info.value); + char float_buf[buf_size] = {'\0'}; + real_to_decimal_for_mode (float_buf, &r, buf_size, buf_size, 1, mode); +#undef buf_size + + if (lane_count == 1) + snprintf (templ, sizeof (templ), "fmov\t%%d0, %s", float_buf); + else + snprintf (templ, sizeof (templ), "fmov\t%%0.%d%c, %s", + lane_count, element_char, float_buf); + return templ; + } + } + + mnemonic = info.mvn ? "mvni" : "movi"; + shift_op = info.msl ? "msl" : "lsl"; + + if (lane_count == 1) + snprintf (templ, sizeof (templ), "%s\t%%d0, " HOST_WIDE_INT_PRINT_HEX, + mnemonic, UINTVAL (info.value)); + else if (info.shift) + snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX + ", %s %d", mnemonic, lane_count, element_char, + UINTVAL (info.value), shift_op, info.shift); + else + snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX, + mnemonic, lane_count, element_char, UINTVAL (info.value)); + return templ; +} + +char* +aarch64_output_scalar_simd_mov_immediate (rtx immediate, + enum machine_mode mode) +{ + enum machine_mode vmode; + + gcc_assert (!VECTOR_MODE_P (mode)); + vmode = aarch64_simd_container_mode (mode, 64); + rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (immediate)); + return aarch64_output_simd_mov_immediate (v_op, vmode, 64); +} + +/* Split operands into moves from op[1] + op[2] into op[0]. */ + +void +aarch64_split_combinev16qi (rtx operands[3]) +{ + unsigned int dest = REGNO (operands[0]); + unsigned int src1 = REGNO (operands[1]); + unsigned int src2 = REGNO (operands[2]); + enum machine_mode halfmode = GET_MODE (operands[1]); + unsigned int halfregs = HARD_REGNO_NREGS (src1, halfmode); + rtx destlo, desthi; + + gcc_assert (halfmode == V16QImode); + + if (src1 == dest && src2 == dest + halfregs) + { + /* No-op move. Can't split to nothing; emit something. */ + emit_note (NOTE_INSN_DELETED); + return; + } + + /* Preserve register attributes for variable tracking. */ + destlo = gen_rtx_REG_offset (operands[0], halfmode, dest, 0); + desthi = gen_rtx_REG_offset (operands[0], halfmode, dest + halfregs, + GET_MODE_SIZE (halfmode)); + + /* Special case of reversed high/low parts. */ + if (reg_overlap_mentioned_p (operands[2], destlo) + && reg_overlap_mentioned_p (operands[1], desthi)) + { + emit_insn (gen_xorv16qi3 (operands[1], operands[1], operands[2])); + emit_insn (gen_xorv16qi3 (operands[2], operands[1], operands[2])); + emit_insn (gen_xorv16qi3 (operands[1], operands[1], operands[2])); + } + else if (!reg_overlap_mentioned_p (operands[2], destlo)) + { + /* Try to avoid unnecessary moves if part of the result + is in the right place already. */ + if (src1 != dest) + emit_move_insn (destlo, operands[1]); + if (src2 != dest + halfregs) + emit_move_insn (desthi, operands[2]); + } + else + { + if (src2 != dest + halfregs) + emit_move_insn (desthi, operands[2]); + if (src1 != dest) + emit_move_insn (destlo, operands[1]); + } +} + +/* vec_perm support. */ + +#define MAX_VECT_LEN 16 + +struct expand_vec_perm_d +{ + rtx target, op0, op1; + unsigned char perm[MAX_VECT_LEN]; + enum machine_mode vmode; + unsigned char nelt; + bool one_vector_p; + bool testing_p; +}; + +/* Generate a variable permutation. */ + +static void +aarch64_expand_vec_perm_1 (rtx target, rtx op0, rtx op1, rtx sel) +{ + enum machine_mode vmode = GET_MODE (target); + bool one_vector_p = rtx_equal_p (op0, op1); + + gcc_checking_assert (vmode == V8QImode || vmode == V16QImode); + gcc_checking_assert (GET_MODE (op0) == vmode); + gcc_checking_assert (GET_MODE (op1) == vmode); + gcc_checking_assert (GET_MODE (sel) == vmode); + gcc_checking_assert (TARGET_SIMD); + + if (one_vector_p) + { + if (vmode == V8QImode) + { + /* Expand the argument to a V16QI mode by duplicating it. */ + rtx pair = gen_reg_rtx (V16QImode); + emit_insn (gen_aarch64_combinev8qi (pair, op0, op0)); + emit_insn (gen_aarch64_tbl1v8qi (target, pair, sel)); + } + else + { + emit_insn (gen_aarch64_tbl1v16qi (target, op0, sel)); + } + } + else + { + rtx pair; + + if (vmode == V8QImode) + { + pair = gen_reg_rtx (V16QImode); + emit_insn (gen_aarch64_combinev8qi (pair, op0, op1)); + emit_insn (gen_aarch64_tbl1v8qi (target, pair, sel)); + } + else + { + pair = gen_reg_rtx (OImode); + emit_insn (gen_aarch64_combinev16qi (pair, op0, op1)); + emit_insn (gen_aarch64_tbl2v16qi (target, pair, sel)); + } + } +} + +void +aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel) +{ + enum machine_mode vmode = GET_MODE (target); + unsigned int i, nelt = GET_MODE_NUNITS (vmode); + bool one_vector_p = rtx_equal_p (op0, op1); + rtx rmask[MAX_VECT_LEN], mask; + + gcc_checking_assert (!BYTES_BIG_ENDIAN); + + /* The TBL instruction does not use a modulo index, so we must take care + of that ourselves. */ + mask = GEN_INT (one_vector_p ? nelt - 1 : 2 * nelt - 1); + for (i = 0; i < nelt; ++i) + rmask[i] = mask; + mask = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rmask)); + sel = expand_simple_binop (vmode, AND, sel, mask, NULL, 0, OPTAB_LIB_WIDEN); + + aarch64_expand_vec_perm_1 (target, op0, op1, sel); +} + +/* Recognize patterns suitable for the TRN instructions. */ +static bool +aarch64_evpc_trn (struct expand_vec_perm_d *d) +{ + unsigned int i, odd, mask, nelt = d->nelt; + rtx out, in0, in1, x; + rtx (*gen) (rtx, rtx, rtx); + enum machine_mode vmode = d->vmode; + + if (GET_MODE_UNIT_SIZE (vmode) > 8) + return false; + + /* Note that these are little-endian tests. + We correct for big-endian later. */ + if (d->perm[0] == 0) + odd = 0; + else if (d->perm[0] == 1) + odd = 1; + else + return false; + mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1); + + for (i = 0; i < nelt; i += 2) + { + if (d->perm[i] != i + odd) + return false; + if (d->perm[i + 1] != ((i + nelt + odd) & mask)) + return false; + } + + /* Success! */ + if (d->testing_p) + return true; + + in0 = d->op0; + in1 = d->op1; + if (BYTES_BIG_ENDIAN) + { + x = in0, in0 = in1, in1 = x; + odd = !odd; + } + out = d->target; + + if (odd) + { + switch (vmode) + { + case V16QImode: gen = gen_aarch64_trn2v16qi; break; + case V8QImode: gen = gen_aarch64_trn2v8qi; break; + case V8HImode: gen = gen_aarch64_trn2v8hi; break; + case V4HImode: gen = gen_aarch64_trn2v4hi; break; + case V4SImode: gen = gen_aarch64_trn2v4si; break; + case V2SImode: gen = gen_aarch64_trn2v2si; break; + case V2DImode: gen = gen_aarch64_trn2v2di; break; + case V4SFmode: gen = gen_aarch64_trn2v4sf; break; + case V2SFmode: gen = gen_aarch64_trn2v2sf; break; + case V2DFmode: gen = gen_aarch64_trn2v2df; break; + default: + return false; + } + } + else + { + switch (vmode) + { + case V16QImode: gen = gen_aarch64_trn1v16qi; break; + case V8QImode: gen = gen_aarch64_trn1v8qi; break; + case V8HImode: gen = gen_aarch64_trn1v8hi; break; + case V4HImode: gen = gen_aarch64_trn1v4hi; break; + case V4SImode: gen = gen_aarch64_trn1v4si; break; + case V2SImode: gen = gen_aarch64_trn1v2si; break; + case V2DImode: gen = gen_aarch64_trn1v2di; break; + case V4SFmode: gen = gen_aarch64_trn1v4sf; break; + case V2SFmode: gen = gen_aarch64_trn1v2sf; break; + case V2DFmode: gen = gen_aarch64_trn1v2df; break; + default: + return false; + } + } + + emit_insn (gen (out, in0, in1)); + return true; +} + +/* Recognize patterns suitable for the UZP instructions. */ +static bool +aarch64_evpc_uzp (struct expand_vec_perm_d *d) +{ + unsigned int i, odd, mask, nelt = d->nelt; + rtx out, in0, in1, x; + rtx (*gen) (rtx, rtx, rtx); + enum machine_mode vmode = d->vmode; + + if (GET_MODE_UNIT_SIZE (vmode) > 8) + return false; + + /* Note that these are little-endian tests. + We correct for big-endian later. */ + if (d->perm[0] == 0) + odd = 0; + else if (d->perm[0] == 1) + odd = 1; + else + return false; + mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1); + + for (i = 0; i < nelt; i++) + { + unsigned elt = (i * 2 + odd) & mask; + if (d->perm[i] != elt) + return false; + } + + /* Success! */ + if (d->testing_p) + return true; + + in0 = d->op0; + in1 = d->op1; + if (BYTES_BIG_ENDIAN) + { + x = in0, in0 = in1, in1 = x; + odd = !odd; + } + out = d->target; + + if (odd) + { + switch (vmode) + { + case V16QImode: gen = gen_aarch64_uzp2v16qi; break; + case V8QImode: gen = gen_aarch64_uzp2v8qi; break; + case V8HImode: gen = gen_aarch64_uzp2v8hi; break; + case V4HImode: gen = gen_aarch64_uzp2v4hi; break; + case V4SImode: gen = gen_aarch64_uzp2v4si; break; + case V2SImode: gen = gen_aarch64_uzp2v2si; break; + case V2DImode: gen = gen_aarch64_uzp2v2di; break; + case V4SFmode: gen = gen_aarch64_uzp2v4sf; break; + case V2SFmode: gen = gen_aarch64_uzp2v2sf; break; + case V2DFmode: gen = gen_aarch64_uzp2v2df; break; + default: + return false; + } + } + else + { + switch (vmode) + { + case V16QImode: gen = gen_aarch64_uzp1v16qi; break; + case V8QImode: gen = gen_aarch64_uzp1v8qi; break; + case V8HImode: gen = gen_aarch64_uzp1v8hi; break; + case V4HImode: gen = gen_aarch64_uzp1v4hi; break; + case V4SImode: gen = gen_aarch64_uzp1v4si; break; + case V2SImode: gen = gen_aarch64_uzp1v2si; break; + case V2DImode: gen = gen_aarch64_uzp1v2di; break; + case V4SFmode: gen = gen_aarch64_uzp1v4sf; break; + case V2SFmode: gen = gen_aarch64_uzp1v2sf; break; + case V2DFmode: gen = gen_aarch64_uzp1v2df; break; + default: + return false; + } + } + + emit_insn (gen (out, in0, in1)); + return true; +} + +/* Recognize patterns suitable for the ZIP instructions. */ +static bool +aarch64_evpc_zip (struct expand_vec_perm_d *d) +{ + unsigned int i, high, mask, nelt = d->nelt; + rtx out, in0, in1, x; + rtx (*gen) (rtx, rtx, rtx); + enum machine_mode vmode = d->vmode; + + if (GET_MODE_UNIT_SIZE (vmode) > 8) + return false; + + /* Note that these are little-endian tests. + We correct for big-endian later. */ + high = nelt / 2; + if (d->perm[0] == high) + /* Do Nothing. */ + ; + else if (d->perm[0] == 0) + high = 0; + else + return false; + mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1); + + for (i = 0; i < nelt / 2; i++) + { + unsigned elt = (i + high) & mask; + if (d->perm[i * 2] != elt) + return false; + elt = (elt + nelt) & mask; + if (d->perm[i * 2 + 1] != elt) + return false; + } + + /* Success! */ + if (d->testing_p) + return true; + + in0 = d->op0; + in1 = d->op1; + if (BYTES_BIG_ENDIAN) + { + x = in0, in0 = in1, in1 = x; + high = !high; + } + out = d->target; + + if (high) + { + switch (vmode) + { + case V16QImode: gen = gen_aarch64_zip2v16qi; break; + case V8QImode: gen = gen_aarch64_zip2v8qi; break; + case V8HImode: gen = gen_aarch64_zip2v8hi; break; + case V4HImode: gen = gen_aarch64_zip2v4hi; break; + case V4SImode: gen = gen_aarch64_zip2v4si; break; + case V2SImode: gen = gen_aarch64_zip2v2si; break; + case V2DImode: gen = gen_aarch64_zip2v2di; break; + case V4SFmode: gen = gen_aarch64_zip2v4sf; break; + case V2SFmode: gen = gen_aarch64_zip2v2sf; break; + case V2DFmode: gen = gen_aarch64_zip2v2df; break; + default: + return false; + } + } + else + { + switch (vmode) + { + case V16QImode: gen = gen_aarch64_zip1v16qi; break; + case V8QImode: gen = gen_aarch64_zip1v8qi; break; + case V8HImode: gen = gen_aarch64_zip1v8hi; break; + case V4HImode: gen = gen_aarch64_zip1v4hi; break; + case V4SImode: gen = gen_aarch64_zip1v4si; break; + case V2SImode: gen = gen_aarch64_zip1v2si; break; + case V2DImode: gen = gen_aarch64_zip1v2di; break; + case V4SFmode: gen = gen_aarch64_zip1v4sf; break; + case V2SFmode: gen = gen_aarch64_zip1v2sf; break; + case V2DFmode: gen = gen_aarch64_zip1v2df; break; + default: + return false; + } + } + + emit_insn (gen (out, in0, in1)); + return true; +} + +static bool +aarch64_evpc_dup (struct expand_vec_perm_d *d) +{ + rtx (*gen) (rtx, rtx, rtx); + rtx out = d->target; + rtx in0; + enum machine_mode vmode = d->vmode; + unsigned int i, elt, nelt = d->nelt; + rtx lane; + + /* TODO: This may not be big-endian safe. */ + if (BYTES_BIG_ENDIAN) + return false; + + elt = d->perm[0]; + for (i = 1; i < nelt; i++) + { + if (elt != d->perm[i]) + return false; + } + + /* The generic preparation in aarch64_expand_vec_perm_const_1 + swaps the operand order and the permute indices if it finds + d->perm[0] to be in the second operand. Thus, we can always + use d->op0 and need not do any extra arithmetic to get the + correct lane number. */ + in0 = d->op0; + lane = GEN_INT (elt); + + switch (vmode) + { + case V16QImode: gen = gen_aarch64_dup_lanev16qi; break; + case V8QImode: gen = gen_aarch64_dup_lanev8qi; break; + case V8HImode: gen = gen_aarch64_dup_lanev8hi; break; + case V4HImode: gen = gen_aarch64_dup_lanev4hi; break; + case V4SImode: gen = gen_aarch64_dup_lanev4si; break; + case V2SImode: gen = gen_aarch64_dup_lanev2si; break; + case V2DImode: gen = gen_aarch64_dup_lanev2di; break; + case V4SFmode: gen = gen_aarch64_dup_lanev4sf; break; + case V2SFmode: gen = gen_aarch64_dup_lanev2sf; break; + case V2DFmode: gen = gen_aarch64_dup_lanev2df; break; + default: + return false; + } + + emit_insn (gen (out, in0, lane)); + return true; +} + +static bool +aarch64_evpc_tbl (struct expand_vec_perm_d *d) +{ + rtx rperm[MAX_VECT_LEN], sel; + enum machine_mode vmode = d->vmode; + unsigned int i, nelt = d->nelt; + + /* TODO: ARM's TBL indexing is little-endian. In order to handle GCC's + numbering of elements for big-endian, we must reverse the order. */ + if (BYTES_BIG_ENDIAN) + return false; + + if (d->testing_p) + return true; + + /* Generic code will try constant permutation twice. Once with the + original mode and again with the elements lowered to QImode. + So wait and don't do the selector expansion ourselves. */ + if (vmode != V8QImode && vmode != V16QImode) + return false; + + for (i = 0; i < nelt; ++i) + rperm[i] = GEN_INT (d->perm[i]); + sel = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rperm)); + sel = force_reg (vmode, sel); + + aarch64_expand_vec_perm_1 (d->target, d->op0, d->op1, sel); + return true; +} + +static bool +aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) +{ + /* The pattern matching functions above are written to look for a small + number to begin the sequence (0, 1, N/2). If we begin with an index + from the second operand, we can swap the operands. */ + if (d->perm[0] >= d->nelt) + { + unsigned i, nelt = d->nelt; + rtx x; + + for (i = 0; i < nelt; ++i) + d->perm[i] = (d->perm[i] + nelt) & (2 * nelt - 1); + + x = d->op0; + d->op0 = d->op1; + d->op1 = x; + } + + if (TARGET_SIMD) + { + if (aarch64_evpc_zip (d)) + return true; + else if (aarch64_evpc_uzp (d)) + return true; + else if (aarch64_evpc_trn (d)) + return true; + else if (aarch64_evpc_dup (d)) + return true; + return aarch64_evpc_tbl (d); + } + return false; +} + +/* Expand a vec_perm_const pattern. */ + +bool +aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel) +{ + struct expand_vec_perm_d d; + int i, nelt, which; + + d.target = target; + d.op0 = op0; + d.op1 = op1; + + d.vmode = GET_MODE (target); + gcc_assert (VECTOR_MODE_P (d.vmode)); + d.nelt = nelt = GET_MODE_NUNITS (d.vmode); + d.testing_p = false; + + for (i = which = 0; i < nelt; ++i) + { + rtx e = XVECEXP (sel, 0, i); + int ei = INTVAL (e) & (2 * nelt - 1); + which |= (ei < nelt ? 1 : 2); + d.perm[i] = ei; + } + + switch (which) + { + default: + gcc_unreachable (); + + case 3: + d.one_vector_p = false; + if (!rtx_equal_p (op0, op1)) + break; + + /* The elements of PERM do not suggest that only the first operand + is used, but both operands are identical. Allow easier matching + of the permutation by folding the permutation into the single + input vector. */ + /* Fall Through. */ + case 2: + for (i = 0; i < nelt; ++i) + d.perm[i] &= nelt - 1; + d.op0 = op1; + d.one_vector_p = true; + break; + + case 1: + d.op1 = op0; + d.one_vector_p = true; + break; + } + + return aarch64_expand_vec_perm_const_1 (&d); +} + +static bool +aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode, + const unsigned char *sel) +{ + struct expand_vec_perm_d d; + unsigned int i, nelt, which; + bool ret; + + d.vmode = vmode; + d.nelt = nelt = GET_MODE_NUNITS (d.vmode); + d.testing_p = true; + memcpy (d.perm, sel, nelt); + + /* Calculate whether all elements are in one vector. */ + for (i = which = 0; i < nelt; ++i) + { + unsigned char e = d.perm[i]; + gcc_assert (e < 2 * nelt); + which |= (e < nelt ? 1 : 2); + } + + /* If all elements are from the second vector, reindex as if from the + first vector. */ + if (which == 2) + for (i = 0; i < nelt; ++i) + d.perm[i] -= nelt; + + /* Check whether the mask can be applied to a single vector. */ + d.one_vector_p = (which != 3); + + d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1); + d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2); + if (!d.one_vector_p) + d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3); + + start_sequence (); + ret = aarch64_expand_vec_perm_const_1 (&d); + end_sequence (); + + return ret; +} + +/* Implement target hook CANNOT_CHANGE_MODE_CLASS. */ +bool +aarch64_cannot_change_mode_class (enum machine_mode from, + enum machine_mode to, + enum reg_class rclass) +{ + /* Full-reg subregs are allowed on general regs or any class if they are + the same size. */ + if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to) + || !reg_classes_intersect_p (FP_REGS, rclass)) + return false; + + /* Limited combinations of subregs are safe on FPREGs. Particularly, + 1. Vector Mode to Scalar mode where 1 unit of the vector is accessed. + 2. Scalar to Scalar for integer modes or same size float modes. + 3. Vector to Vector modes. */ + if (GET_MODE_SIZE (from) > GET_MODE_SIZE (to)) + { + if (aarch64_vector_mode_supported_p (from) + && GET_MODE_SIZE (GET_MODE_INNER (from)) == GET_MODE_SIZE (to)) + return false; + + if (GET_MODE_NUNITS (from) == 1 + && GET_MODE_NUNITS (to) == 1 + && (GET_MODE_CLASS (from) == MODE_INT + || from == to)) + return false; + + if (aarch64_vector_mode_supported_p (from) + && aarch64_vector_mode_supported_p (to)) + return false; + } + + return true; +} + +#undef TARGET_ADDRESS_COST +#define TARGET_ADDRESS_COST aarch64_address_cost + +/* This hook will determines whether unnamed bitfields affect the alignment + of the containing structure. The hook returns true if the structure + should inherit the alignment requirements of an unnamed bitfield's + type. */ +#undef TARGET_ALIGN_ANON_BITFIELD +#define TARGET_ALIGN_ANON_BITFIELD hook_bool_void_true + +#undef TARGET_ASM_ALIGNED_DI_OP +#define TARGET_ASM_ALIGNED_DI_OP "\t.xword\t" + +#undef TARGET_ASM_ALIGNED_HI_OP +#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t" + +#undef TARGET_ASM_ALIGNED_SI_OP +#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t" + +#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK +#define TARGET_ASM_CAN_OUTPUT_MI_THUNK \ + hook_bool_const_tree_hwi_hwi_const_tree_true + +#undef TARGET_ASM_FILE_START +#define TARGET_ASM_FILE_START aarch64_start_file + +#undef TARGET_ASM_OUTPUT_MI_THUNK +#define TARGET_ASM_OUTPUT_MI_THUNK aarch64_output_mi_thunk + +#undef TARGET_ASM_SELECT_RTX_SECTION +#define TARGET_ASM_SELECT_RTX_SECTION aarch64_select_rtx_section + +#undef TARGET_ASM_TRAMPOLINE_TEMPLATE +#define TARGET_ASM_TRAMPOLINE_TEMPLATE aarch64_asm_trampoline_template + +#undef TARGET_BUILD_BUILTIN_VA_LIST +#define TARGET_BUILD_BUILTIN_VA_LIST aarch64_build_builtin_va_list + +#undef TARGET_CALLEE_COPIES +#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_false + +#undef TARGET_CAN_ELIMINATE +#define TARGET_CAN_ELIMINATE aarch64_can_eliminate + +#undef TARGET_CANNOT_FORCE_CONST_MEM +#define TARGET_CANNOT_FORCE_CONST_MEM aarch64_cannot_force_const_mem + +#undef TARGET_CONDITIONAL_REGISTER_USAGE +#define TARGET_CONDITIONAL_REGISTER_USAGE aarch64_conditional_register_usage + +/* Only the least significant bit is used for initialization guard + variables. */ +#undef TARGET_CXX_GUARD_MASK_BIT +#define TARGET_CXX_GUARD_MASK_BIT hook_bool_void_true + +#undef TARGET_C_MODE_FOR_SUFFIX +#define TARGET_C_MODE_FOR_SUFFIX aarch64_c_mode_for_suffix + +#ifdef TARGET_BIG_ENDIAN_DEFAULT +#undef TARGET_DEFAULT_TARGET_FLAGS +#define TARGET_DEFAULT_TARGET_FLAGS (MASK_BIG_END) +#endif + +#undef TARGET_CLASS_MAX_NREGS +#define TARGET_CLASS_MAX_NREGS aarch64_class_max_nregs + +#undef TARGET_BUILTIN_DECL +#define TARGET_BUILTIN_DECL aarch64_builtin_decl + +#undef TARGET_EXPAND_BUILTIN +#define TARGET_EXPAND_BUILTIN aarch64_expand_builtin + +#undef TARGET_EXPAND_BUILTIN_VA_START +#define TARGET_EXPAND_BUILTIN_VA_START aarch64_expand_builtin_va_start + +#undef TARGET_FOLD_BUILTIN +#define TARGET_FOLD_BUILTIN aarch64_fold_builtin + +#undef TARGET_FUNCTION_ARG +#define TARGET_FUNCTION_ARG aarch64_function_arg + +#undef TARGET_FUNCTION_ARG_ADVANCE +#define TARGET_FUNCTION_ARG_ADVANCE aarch64_function_arg_advance + +#undef TARGET_FUNCTION_ARG_BOUNDARY +#define TARGET_FUNCTION_ARG_BOUNDARY aarch64_function_arg_boundary + +#undef TARGET_FUNCTION_OK_FOR_SIBCALL +#define TARGET_FUNCTION_OK_FOR_SIBCALL aarch64_function_ok_for_sibcall + +#undef TARGET_FUNCTION_VALUE +#define TARGET_FUNCTION_VALUE aarch64_function_value + +#undef TARGET_FUNCTION_VALUE_REGNO_P +#define TARGET_FUNCTION_VALUE_REGNO_P aarch64_function_value_regno_p + +#undef TARGET_FRAME_POINTER_REQUIRED +#define TARGET_FRAME_POINTER_REQUIRED aarch64_frame_pointer_required + +#undef TARGET_GIMPLE_FOLD_BUILTIN +#define TARGET_GIMPLE_FOLD_BUILTIN aarch64_gimple_fold_builtin + +#undef TARGET_GIMPLIFY_VA_ARG_EXPR +#define TARGET_GIMPLIFY_VA_ARG_EXPR aarch64_gimplify_va_arg_expr + +#undef TARGET_INIT_BUILTINS +#define TARGET_INIT_BUILTINS aarch64_init_builtins + +#undef TARGET_LEGITIMATE_ADDRESS_P +#define TARGET_LEGITIMATE_ADDRESS_P aarch64_legitimate_address_hook_p + +#undef TARGET_LEGITIMATE_CONSTANT_P +#define TARGET_LEGITIMATE_CONSTANT_P aarch64_legitimate_constant_p + +#undef TARGET_LIBGCC_CMP_RETURN_MODE +#define TARGET_LIBGCC_CMP_RETURN_MODE aarch64_libgcc_cmp_return_mode + +#undef TARGET_LRA_P +#define TARGET_LRA_P aarch64_lra_p + +#undef TARGET_MANGLE_TYPE +#define TARGET_MANGLE_TYPE aarch64_mangle_type + +#undef TARGET_MEMORY_MOVE_COST +#define TARGET_MEMORY_MOVE_COST aarch64_memory_move_cost + +#undef TARGET_MUST_PASS_IN_STACK +#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size + +/* This target hook should return true if accesses to volatile bitfields + should use the narrowest mode possible. It should return false if these + accesses should use the bitfield container type. */ +#undef TARGET_NARROW_VOLATILE_BITFIELD +#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false + +#undef TARGET_OPTION_OVERRIDE +#define TARGET_OPTION_OVERRIDE aarch64_override_options + +#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE +#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \ + aarch64_override_options_after_change + +#undef TARGET_PASS_BY_REFERENCE +#define TARGET_PASS_BY_REFERENCE aarch64_pass_by_reference + +#undef TARGET_PREFERRED_RELOAD_CLASS +#define TARGET_PREFERRED_RELOAD_CLASS aarch64_preferred_reload_class + +#undef TARGET_SECONDARY_RELOAD +#define TARGET_SECONDARY_RELOAD aarch64_secondary_reload + +#undef TARGET_SHIFT_TRUNCATION_MASK +#define TARGET_SHIFT_TRUNCATION_MASK aarch64_shift_truncation_mask + +#undef TARGET_SETUP_INCOMING_VARARGS +#define TARGET_SETUP_INCOMING_VARARGS aarch64_setup_incoming_varargs + +#undef TARGET_STRUCT_VALUE_RTX +#define TARGET_STRUCT_VALUE_RTX aarch64_struct_value_rtx + +#undef TARGET_REGISTER_MOVE_COST +#define TARGET_REGISTER_MOVE_COST aarch64_register_move_cost + +#undef TARGET_RETURN_IN_MEMORY +#define TARGET_RETURN_IN_MEMORY aarch64_return_in_memory + +#undef TARGET_RETURN_IN_MSB +#define TARGET_RETURN_IN_MSB aarch64_return_in_msb + +#undef TARGET_RTX_COSTS +#define TARGET_RTX_COSTS aarch64_rtx_costs + +#undef TARGET_SCHED_ISSUE_RATE +#define TARGET_SCHED_ISSUE_RATE aarch64_sched_issue_rate + +#undef TARGET_TRAMPOLINE_INIT +#define TARGET_TRAMPOLINE_INIT aarch64_trampoline_init + +#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P +#define TARGET_USE_BLOCKS_FOR_CONSTANT_P aarch64_use_blocks_for_constant_p + +#undef TARGET_VECTOR_MODE_SUPPORTED_P +#define TARGET_VECTOR_MODE_SUPPORTED_P aarch64_vector_mode_supported_p + +#undef TARGET_ARRAY_MODE_SUPPORTED_P +#define TARGET_ARRAY_MODE_SUPPORTED_P aarch64_array_mode_supported_p + +#undef TARGET_VECTORIZE_ADD_STMT_COST +#define TARGET_VECTORIZE_ADD_STMT_COST aarch64_add_stmt_cost + +#undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST +#define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \ + aarch64_builtin_vectorization_cost + +#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE +#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE aarch64_preferred_simd_mode + +#undef TARGET_VECTORIZE_BUILTINS +#define TARGET_VECTORIZE_BUILTINS + +#undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION +#define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \ + aarch64_builtin_vectorized_function + +#undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES +#define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \ + aarch64_autovectorize_vector_sizes + +/* Section anchor support. */ + +#undef TARGET_MIN_ANCHOR_OFFSET +#define TARGET_MIN_ANCHOR_OFFSET -256 + +/* Limit the maximum anchor offset to 4k-1, since that's the limit for a + byte offset; we can do much more for larger data types, but have no way + to determine the size of the access. We assume accesses are aligned. */ +#undef TARGET_MAX_ANCHOR_OFFSET +#define TARGET_MAX_ANCHOR_OFFSET 4095 + +#undef TARGET_VECTOR_ALIGNMENT +#define TARGET_VECTOR_ALIGNMENT aarch64_simd_vector_alignment + +#undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE +#define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE \ + aarch64_simd_vector_alignment_reachable + +/* vec_perm support. */ + +#undef TARGET_VECTORIZE_VEC_PERM_CONST_OK +#define TARGET_VECTORIZE_VEC_PERM_CONST_OK \ + aarch64_vectorize_vec_perm_const_ok + + +#undef TARGET_FIXED_CONDITION_CODE_REGS +#define TARGET_FIXED_CONDITION_CODE_REGS aarch64_fixed_condition_code_regs + +struct gcc_target targetm = TARGET_INITIALIZER; + +#include "gt-aarch64.h" diff --git a/gcc-4.9/gcc/config/aarch64/aarch64.h b/gcc-4.9/gcc/config/aarch64/aarch64.h new file mode 100644 index 000000000..7962aa472 --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64.h @@ -0,0 +1,873 @@ +/* Machine description for AArch64 architecture. + Copyright (C) 2009-2014 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + + +#ifndef GCC_AARCH64_H +#define GCC_AARCH64_H + +/* Target CPU builtins. */ +#define TARGET_CPU_CPP_BUILTINS() \ + do \ + { \ + builtin_define ("__aarch64__"); \ + if (TARGET_BIG_END) \ + builtin_define ("__AARCH64EB__"); \ + else \ + builtin_define ("__AARCH64EL__"); \ + \ + if (!TARGET_GENERAL_REGS_ONLY) \ + builtin_define ("__ARM_NEON"); \ + \ + switch (aarch64_cmodel) \ + { \ + case AARCH64_CMODEL_TINY: \ + case AARCH64_CMODEL_TINY_PIC: \ + builtin_define ("__AARCH64_CMODEL_TINY__"); \ + break; \ + case AARCH64_CMODEL_SMALL: \ + case AARCH64_CMODEL_SMALL_PIC: \ + builtin_define ("__AARCH64_CMODEL_SMALL__");\ + break; \ + case AARCH64_CMODEL_LARGE: \ + builtin_define ("__AARCH64_CMODEL_LARGE__"); \ + break; \ + default: \ + break; \ + } \ + \ + if (TARGET_ILP32) \ + { \ + cpp_define (parse_in, "_ILP32"); \ + cpp_define (parse_in, "__ILP32__"); \ + } \ + if (TARGET_CRYPTO) \ + builtin_define ("__ARM_FEATURE_CRYPTO"); \ + } while (0) + + + +/* Target machine storage layout. */ + +#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ + if (GET_MODE_CLASS (MODE) == MODE_INT \ + && GET_MODE_SIZE (MODE) < 4) \ + { \ + if (MODE == QImode || MODE == HImode) \ + { \ + MODE = SImode; \ + } \ + } + +/* Bits are always numbered from the LSBit. */ +#define BITS_BIG_ENDIAN 0 + +/* Big/little-endian flavour. */ +#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0) +#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN) + +/* AdvSIMD is supported in the default configuration, unless disabled by + -mgeneral-regs-only. */ +#define TARGET_SIMD !TARGET_GENERAL_REGS_ONLY +#define TARGET_FLOAT !TARGET_GENERAL_REGS_ONLY + +#define UNITS_PER_WORD 8 + +#define UNITS_PER_VREG 16 + +#define PARM_BOUNDARY 64 + +#define STACK_BOUNDARY 128 + +#define FUNCTION_BOUNDARY 32 + +#define EMPTY_FIELD_BOUNDARY 32 + +#define BIGGEST_ALIGNMENT 128 + +#define SHORT_TYPE_SIZE 16 + +#define INT_TYPE_SIZE 32 + +#define LONG_TYPE_SIZE (TARGET_ILP32 ? 32 : 64) + +#define POINTER_SIZE (TARGET_ILP32 ? 32 : 64) + +#define LONG_LONG_TYPE_SIZE 64 + +#define FLOAT_TYPE_SIZE 32 + +#define DOUBLE_TYPE_SIZE 64 + +#define LONG_DOUBLE_TYPE_SIZE 128 + +/* The architecture reserves all bits of the address for hardware use, + so the vbit must go into the delta field of pointers to member + functions. This is the same config as that in the AArch32 + port. */ +#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta + +/* Make strings word-aligned so that strcpy from constants will be + faster. */ +#define CONSTANT_ALIGNMENT(EXP, ALIGN) \ + ((TREE_CODE (EXP) == STRING_CST \ + && !optimize_size \ + && (ALIGN) < BITS_PER_WORD) \ + ? BITS_PER_WORD : ALIGN) + +#define DATA_ALIGNMENT(EXP, ALIGN) \ + ((((ALIGN) < BITS_PER_WORD) \ + && (TREE_CODE (EXP) == ARRAY_TYPE \ + || TREE_CODE (EXP) == UNION_TYPE \ + || TREE_CODE (EXP) == RECORD_TYPE)) \ + ? BITS_PER_WORD : (ALIGN)) + +#define LOCAL_ALIGNMENT(EXP, ALIGN) DATA_ALIGNMENT(EXP, ALIGN) + +#define STRUCTURE_SIZE_BOUNDARY 8 + +/* Defined by the ABI */ +#define WCHAR_TYPE "unsigned int" +#define WCHAR_TYPE_SIZE 32 + +/* Using long long breaks -ansi and -std=c90, so these will need to be + made conditional for an LLP64 ABI. */ + +#define SIZE_TYPE "long unsigned int" + +#define PTRDIFF_TYPE "long int" + +#define PCC_BITFIELD_TYPE_MATTERS 1 + + +/* Instruction tuning/selection flags. */ + +/* Bit values used to identify processor capabilities. */ +#define AARCH64_FL_SIMD (1 << 0) /* Has SIMD instructions. */ +#define AARCH64_FL_FP (1 << 1) /* Has FP. */ +#define AARCH64_FL_CRYPTO (1 << 2) /* Has crypto. */ +#define AARCH64_FL_SLOWMUL (1 << 3) /* A slow multiply core. */ +#define AARCH64_FL_CRC (1 << 4) /* Has CRC. */ + +/* Has FP and SIMD. */ +#define AARCH64_FL_FPSIMD (AARCH64_FL_FP | AARCH64_FL_SIMD) + +/* Has FP without SIMD. */ +#define AARCH64_FL_FPQ16 (AARCH64_FL_FP & ~AARCH64_FL_SIMD) + +/* Architecture flags that effect instruction selection. */ +#define AARCH64_FL_FOR_ARCH8 (AARCH64_FL_FPSIMD) + +/* Macros to test ISA flags. */ +extern unsigned long aarch64_isa_flags; +#define AARCH64_ISA_CRC (aarch64_isa_flags & AARCH64_FL_CRC) +#define AARCH64_ISA_CRYPTO (aarch64_isa_flags & AARCH64_FL_CRYPTO) +#define AARCH64_ISA_FP (aarch64_isa_flags & AARCH64_FL_FP) +#define AARCH64_ISA_SIMD (aarch64_isa_flags & AARCH64_FL_SIMD) + +/* Macros to test tuning flags. */ +extern unsigned long aarch64_tune_flags; +#define AARCH64_TUNE_SLOWMUL (aarch64_tune_flags & AARCH64_FL_SLOWMUL) + +/* Crypto is an optional feature. */ +#define TARGET_CRYPTO AARCH64_ISA_CRYPTO + +/* Standard register usage. */ + +/* 31 64-bit general purpose registers R0-R30: + R30 LR (link register) + R29 FP (frame pointer) + R19-R28 Callee-saved registers + R18 The platform register; use as temporary register. + R17 IP1 The second intra-procedure-call temporary register + (can be used by call veneers and PLT code); otherwise use + as a temporary register + R16 IP0 The first intra-procedure-call temporary register (can + be used by call veneers and PLT code); otherwise use as a + temporary register + R9-R15 Temporary registers + R8 Structure value parameter / temporary register + R0-R7 Parameter/result registers + + SP stack pointer, encoded as X/R31 where permitted. + ZR zero register, encoded as X/R31 elsewhere + + 32 x 128-bit floating-point/vector registers + V16-V31 Caller-saved (temporary) registers + V8-V15 Callee-saved registers + V0-V7 Parameter/result registers + + The vector register V0 holds scalar B0, H0, S0 and D0 in its least + significant bits. Unlike AArch32 S1 is not packed into D0, + etc. */ + +/* Note that we don't mark X30 as a call-clobbered register. The idea is + that it's really the call instructions themselves which clobber X30. + We don't care what the called function does with it afterwards. + + This approach makes it easier to implement sibcalls. Unlike normal + calls, sibcalls don't clobber X30, so the register reaches the + called function intact. EPILOGUE_USES says that X30 is useful + to the called function. */ + +#define FIXED_REGISTERS \ + { \ + 0, 0, 0, 0, 0, 0, 0, 0, /* R0 - R7 */ \ + 0, 0, 0, 0, 0, 0, 0, 0, /* R8 - R15 */ \ + 0, 0, 0, 0, 0, 0, 0, 0, /* R16 - R23 */ \ + 0, 0, 0, 0, 0, 1, 0, 1, /* R24 - R30, SP */ \ + 0, 0, 0, 0, 0, 0, 0, 0, /* V0 - V7 */ \ + 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \ + 0, 0, 0, 0, 0, 0, 0, 0, /* V16 - V23 */ \ + 0, 0, 0, 0, 0, 0, 0, 0, /* V24 - V31 */ \ + 1, 1, 1, /* SFP, AP, CC */ \ + } + +#define CALL_USED_REGISTERS \ + { \ + 1, 1, 1, 1, 1, 1, 1, 1, /* R0 - R7 */ \ + 1, 1, 1, 1, 1, 1, 1, 1, /* R8 - R15 */ \ + 1, 1, 1, 0, 0, 0, 0, 0, /* R16 - R23 */ \ + 0, 0, 0, 0, 0, 1, 0, 1, /* R24 - R30, SP */ \ + 1, 1, 1, 1, 1, 1, 1, 1, /* V0 - V7 */ \ + 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \ + 1, 1, 1, 1, 1, 1, 1, 1, /* V16 - V23 */ \ + 1, 1, 1, 1, 1, 1, 1, 1, /* V24 - V31 */ \ + 1, 1, 1, /* SFP, AP, CC */ \ + } + +#define REGISTER_NAMES \ + { \ + "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", \ + "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", \ + "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", \ + "x24", "x25", "x26", "x27", "x28", "x29", "x30", "sp", \ + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \ + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \ + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \ + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", \ + "sfp", "ap", "cc", \ + } + +/* Generate the register aliases for core register N */ +#define R_ALIASES(N) {"r" # N, R0_REGNUM + (N)}, \ + {"w" # N, R0_REGNUM + (N)} + +#define V_ALIASES(N) {"q" # N, V0_REGNUM + (N)}, \ + {"d" # N, V0_REGNUM + (N)}, \ + {"s" # N, V0_REGNUM + (N)}, \ + {"h" # N, V0_REGNUM + (N)}, \ + {"b" # N, V0_REGNUM + (N)} + +/* Provide aliases for all of the ISA defined register name forms. + These aliases are convenient for use in the clobber lists of inline + asm statements. */ + +#define ADDITIONAL_REGISTER_NAMES \ + { R_ALIASES(0), R_ALIASES(1), R_ALIASES(2), R_ALIASES(3), \ + R_ALIASES(4), R_ALIASES(5), R_ALIASES(6), R_ALIASES(7), \ + R_ALIASES(8), R_ALIASES(9), R_ALIASES(10), R_ALIASES(11), \ + R_ALIASES(12), R_ALIASES(13), R_ALIASES(14), R_ALIASES(15), \ + R_ALIASES(16), R_ALIASES(17), R_ALIASES(18), R_ALIASES(19), \ + R_ALIASES(20), R_ALIASES(21), R_ALIASES(22), R_ALIASES(23), \ + R_ALIASES(24), R_ALIASES(25), R_ALIASES(26), R_ALIASES(27), \ + R_ALIASES(28), R_ALIASES(29), R_ALIASES(30), {"wsp", R0_REGNUM + 31}, \ + V_ALIASES(0), V_ALIASES(1), V_ALIASES(2), V_ALIASES(3), \ + V_ALIASES(4), V_ALIASES(5), V_ALIASES(6), V_ALIASES(7), \ + V_ALIASES(8), V_ALIASES(9), V_ALIASES(10), V_ALIASES(11), \ + V_ALIASES(12), V_ALIASES(13), V_ALIASES(14), V_ALIASES(15), \ + V_ALIASES(16), V_ALIASES(17), V_ALIASES(18), V_ALIASES(19), \ + V_ALIASES(20), V_ALIASES(21), V_ALIASES(22), V_ALIASES(23), \ + V_ALIASES(24), V_ALIASES(25), V_ALIASES(26), V_ALIASES(27), \ + V_ALIASES(28), V_ALIASES(29), V_ALIASES(30), V_ALIASES(31) \ + } + +/* Say that the epilogue uses the return address register. Note that + in the case of sibcalls, the values "used by the epilogue" are + considered live at the start of the called function. */ + +#define EPILOGUE_USES(REGNO) \ + ((REGNO) == LR_REGNUM) + +/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, + the stack pointer does not matter. The value is tested only in + functions that have frame pointers. */ +#define EXIT_IGNORE_STACK 1 + +#define STATIC_CHAIN_REGNUM R18_REGNUM +#define HARD_FRAME_POINTER_REGNUM R29_REGNUM +#define FRAME_POINTER_REGNUM SFP_REGNUM +#define STACK_POINTER_REGNUM SP_REGNUM +#define ARG_POINTER_REGNUM AP_REGNUM +#define FIRST_PSEUDO_REGISTER 67 + +/* The number of (integer) argument register available. */ +#define NUM_ARG_REGS 8 +#define NUM_FP_ARG_REGS 8 + +/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most + four members. */ +#define HA_MAX_NUM_FLDS 4 + +/* External dwarf register number scheme. These number are used to + identify registers in dwarf debug information, the values are + defined by the AArch64 ABI. The numbering scheme is independent of + GCC's internal register numbering scheme. */ + +#define AARCH64_DWARF_R0 0 + +/* The number of R registers, note 31! not 32. */ +#define AARCH64_DWARF_NUMBER_R 31 + +#define AARCH64_DWARF_SP 31 +#define AARCH64_DWARF_V0 64 + +/* The number of V registers. */ +#define AARCH64_DWARF_NUMBER_V 32 + +/* For signal frames we need to use an alternative return column. This + value must not correspond to a hard register and must be out of the + range of DWARF_FRAME_REGNUM(). */ +#define DWARF_ALT_FRAME_RETURN_COLUMN \ + (AARCH64_DWARF_V0 + AARCH64_DWARF_NUMBER_V) + +/* We add 1 extra frame register for use as the + DWARF_ALT_FRAME_RETURN_COLUMN. */ +#define DWARF_FRAME_REGISTERS (DWARF_ALT_FRAME_RETURN_COLUMN + 1) + + +#define DBX_REGISTER_NUMBER(REGNO) aarch64_dbx_register_number (REGNO) +/* Provide a definition of DWARF_FRAME_REGNUM here so that fallback unwinders + can use DWARF_ALT_FRAME_RETURN_COLUMN defined below. This is just the same + as the default definition in dwarf2out.c. */ +#undef DWARF_FRAME_REGNUM +#define DWARF_FRAME_REGNUM(REGNO) DBX_REGISTER_NUMBER (REGNO) + +#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LR_REGNUM) + +#define HARD_REGNO_NREGS(REGNO, MODE) aarch64_hard_regno_nregs (REGNO, MODE) + +#define HARD_REGNO_MODE_OK(REGNO, MODE) aarch64_hard_regno_mode_ok (REGNO, MODE) + +#define MODES_TIEABLE_P(MODE1, MODE2) \ + (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2)) + +#define DWARF2_UNWIND_INFO 1 + +/* Use R0 through R3 to pass exception handling information. */ +#define EH_RETURN_DATA_REGNO(N) \ + ((N) < 4 ? ((unsigned int) R0_REGNUM + (N)) : INVALID_REGNUM) + +/* Select a format to encode pointers in exception handling data. */ +#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ + aarch64_asm_preferred_eh_data_format ((CODE), (GLOBAL)) + +/* The register that holds the return address in exception handlers. */ +#define AARCH64_EH_STACKADJ_REGNUM (R0_REGNUM + 4) +#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, AARCH64_EH_STACKADJ_REGNUM) + +/* Don't use __builtin_setjmp until we've defined it. */ +#undef DONT_USE_BUILTIN_SETJMP +#define DONT_USE_BUILTIN_SETJMP 1 + +/* Register in which the structure value is to be returned. */ +#define AARCH64_STRUCT_VALUE_REGNUM R8_REGNUM + +/* Non-zero if REGNO is part of the Core register set. + + The rather unusual way of expressing this check is to avoid + warnings when building the compiler when R0_REGNUM is 0 and REGNO + is unsigned. */ +#define GP_REGNUM_P(REGNO) \ + (((unsigned) (REGNO - R0_REGNUM)) <= (R30_REGNUM - R0_REGNUM)) + +#define FP_REGNUM_P(REGNO) \ + (((unsigned) (REGNO - V0_REGNUM)) <= (V31_REGNUM - V0_REGNUM)) + +#define FP_LO_REGNUM_P(REGNO) \ + (((unsigned) (REGNO - V0_REGNUM)) <= (V15_REGNUM - V0_REGNUM)) + + +/* Register and constant classes. */ + +enum reg_class +{ + NO_REGS, + CORE_REGS, + GENERAL_REGS, + STACK_REG, + POINTER_REGS, + FP_LO_REGS, + FP_REGS, + ALL_REGS, + LIM_REG_CLASSES /* Last */ +}; + +#define N_REG_CLASSES ((int) LIM_REG_CLASSES) + +#define REG_CLASS_NAMES \ +{ \ + "NO_REGS", \ + "CORE_REGS", \ + "GENERAL_REGS", \ + "STACK_REG", \ + "POINTER_REGS", \ + "FP_LO_REGS", \ + "FP_REGS", \ + "ALL_REGS" \ +} + +#define REG_CLASS_CONTENTS \ +{ \ + { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \ + { 0x7fffffff, 0x00000000, 0x00000003 }, /* CORE_REGS */ \ + { 0x7fffffff, 0x00000000, 0x00000003 }, /* GENERAL_REGS */ \ + { 0x80000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \ + { 0xffffffff, 0x00000000, 0x00000003 }, /* POINTER_REGS */ \ + { 0x00000000, 0x0000ffff, 0x00000000 }, /* FP_LO_REGS */ \ + { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \ + { 0xffffffff, 0xffffffff, 0x00000007 } /* ALL_REGS */ \ +} + +#define REGNO_REG_CLASS(REGNO) aarch64_regno_regclass (REGNO) + +#define INDEX_REG_CLASS CORE_REGS +#define BASE_REG_CLASS POINTER_REGS + +/* Register pairs used to eliminate unneeded registers that point into + the stack frame. */ +#define ELIMINABLE_REGS \ +{ \ + { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \ + { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \ + { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \ + { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \ +} + +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ + (OFFSET) = aarch64_initial_elimination_offset (FROM, TO) + +/* CPU/ARCH option handling. */ +#include "config/aarch64/aarch64-opts.h" + +enum target_cpus +{ +#define AARCH64_CORE(NAME, INTERNAL_IDENT, IDENT, ARCH, FLAGS, COSTS) \ + TARGET_CPU_##INTERNAL_IDENT, +#include "aarch64-cores.def" +#undef AARCH64_CORE + TARGET_CPU_generic +}; + +/* If there is no CPU defined at configure, use generic as default. */ +#ifndef TARGET_CPU_DEFAULT +#define TARGET_CPU_DEFAULT \ + (TARGET_CPU_generic | (AARCH64_CPU_DEFAULT_FLAGS << 6)) +#endif + +/* The processor for which instructions should be scheduled. */ +extern enum aarch64_processor aarch64_tune; + +/* RTL generation support. */ +#define INIT_EXPANDERS aarch64_init_expanders () + + +/* Stack layout; function entry, exit and calling. */ +#define STACK_GROWS_DOWNWARD 1 + +#define FRAME_GROWS_DOWNWARD 1 + +#define STARTING_FRAME_OFFSET 0 + +#define ACCUMULATE_OUTGOING_ARGS 1 + +#define FIRST_PARM_OFFSET(FNDECL) 0 + +/* Fix for VFP */ +#define LIBCALL_VALUE(MODE) \ + gen_rtx_REG (MODE, FLOAT_MODE_P (MODE) ? V0_REGNUM : R0_REGNUM) + +#define DEFAULT_PCC_STRUCT_RETURN 0 + +#define AARCH64_ROUND_UP(X, ALIGNMENT) \ + (((X) + ((ALIGNMENT) - 1)) & ~((ALIGNMENT) - 1)) + +#define AARCH64_ROUND_DOWN(X, ALIGNMENT) \ + ((X) & ~((ALIGNMENT) - 1)) + +#ifdef HOST_WIDE_INT +struct GTY (()) aarch64_frame +{ + HOST_WIDE_INT reg_offset[FIRST_PSEUDO_REGISTER]; + HOST_WIDE_INT saved_regs_size; + /* Padding if needed after the all the callee save registers have + been saved. */ + HOST_WIDE_INT padding0; + HOST_WIDE_INT hardfp_offset; /* HARD_FRAME_POINTER_REGNUM */ + HOST_WIDE_INT fp_lr_offset; /* Space needed for saving fp and/or lr */ + + bool laid_out; +}; + +typedef struct GTY (()) machine_function +{ + struct aarch64_frame frame; + + /* The number of extra stack bytes taken up by register varargs. + This area is allocated by the callee at the very top of the frame. */ + HOST_WIDE_INT saved_varargs_size; + +} machine_function; +#endif + +/* Which ABI to use. */ +enum aarch64_abi_type +{ + AARCH64_ABI_LP64 = 0, + AARCH64_ABI_ILP32 = 1 +}; + +#ifndef AARCH64_ABI_DEFAULT +#define AARCH64_ABI_DEFAULT AARCH64_ABI_LP64 +#endif + +#define TARGET_ILP32 (aarch64_abi & AARCH64_ABI_ILP32) + +enum arm_pcs +{ + ARM_PCS_AAPCS64, /* Base standard AAPCS for 64 bit. */ + ARM_PCS_UNKNOWN +}; + + +extern enum arm_pcs arm_pcs_variant; + +#ifndef ARM_DEFAULT_PCS +#define ARM_DEFAULT_PCS ARM_PCS_AAPCS64 +#endif + +/* We can't use enum machine_mode inside a generator file because it + hasn't been created yet; we shouldn't be using any code that + needs the real definition though, so this ought to be safe. */ +#ifdef GENERATOR_FILE +#define MACHMODE int +#else +#include "insn-modes.h" +#define MACHMODE enum machine_mode +#endif + + +/* AAPCS related state tracking. */ +typedef struct +{ + enum arm_pcs pcs_variant; + int aapcs_arg_processed; /* No need to lay out this argument again. */ + int aapcs_ncrn; /* Next Core register number. */ + int aapcs_nextncrn; /* Next next core register number. */ + int aapcs_nvrn; /* Next Vector register number. */ + int aapcs_nextnvrn; /* Next Next Vector register number. */ + rtx aapcs_reg; /* Register assigned to this argument. This + is NULL_RTX if this parameter goes on + the stack. */ + MACHMODE aapcs_vfp_rmode; + int aapcs_stack_words; /* If the argument is passed on the stack, this + is the number of words needed, after rounding + up. Only meaningful when + aapcs_reg == NULL_RTX. */ + int aapcs_stack_size; /* The total size (in words, per 8 byte) of the + stack arg area so far. */ +} CUMULATIVE_ARGS; + +#define FUNCTION_ARG_PADDING(MODE, TYPE) \ + (aarch64_pad_arg_upward (MODE, TYPE) ? upward : downward) + +#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \ + (aarch64_pad_reg_upward (MODE, TYPE, FIRST) ? upward : downward) + +#define PAD_VARARGS_DOWN 0 + +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \ + aarch64_init_cumulative_args (&(CUM), FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) + +#define FUNCTION_ARG_REGNO_P(REGNO) \ + aarch64_function_arg_regno_p(REGNO) + + +/* ISA Features. */ + +/* Addressing modes, etc. */ +#define HAVE_POST_INCREMENT 1 +#define HAVE_PRE_INCREMENT 1 +#define HAVE_POST_DECREMENT 1 +#define HAVE_PRE_DECREMENT 1 +#define HAVE_POST_MODIFY_DISP 1 +#define HAVE_PRE_MODIFY_DISP 1 + +#define MAX_REGS_PER_ADDRESS 2 + +#define CONSTANT_ADDRESS_P(X) aarch64_constant_address_p(X) + +/* Try a machine-dependent way of reloading an illegitimate address + operand. If we find one, push the reload and jump to WIN. This + macro is used in only one place: `find_reloads_address' in reload.c. */ + +#define LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_L, WIN) \ +do { \ + rtx new_x = aarch64_legitimize_reload_address (&(X), MODE, OPNUM, TYPE, \ + IND_L); \ + if (new_x) \ + { \ + X = new_x; \ + goto WIN; \ + } \ +} while (0) + +#define REGNO_OK_FOR_BASE_P(REGNO) \ + aarch64_regno_ok_for_base_p (REGNO, true) + +#define REGNO_OK_FOR_INDEX_P(REGNO) \ + aarch64_regno_ok_for_index_p (REGNO, true) + +#define LEGITIMATE_PIC_OPERAND_P(X) \ + aarch64_legitimate_pic_operand_p (X) + +#define CASE_VECTOR_MODE Pmode + +#define DEFAULT_SIGNED_CHAR 0 + +/* An integer expression for the size in bits of the largest integer machine + mode that should actually be used. We allow pairs of registers. */ +#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode) + +/* Maximum bytes moved by a single instruction (load/store pair). */ +#define MOVE_MAX (UNITS_PER_WORD * 2) + +/* The base cost overhead of a memcpy call, for MOVE_RATIO and friends. */ +#define AARCH64_CALL_RATIO 8 + +/* When optimizing for size, give a better estimate of the length of a memcpy + call, but use the default otherwise. But move_by_pieces_ninsns() counts + memory-to-memory moves, and we'll have to generate a load & store for each, + so halve the value to take that into account. */ +#define MOVE_RATIO(speed) \ + (((speed) ? 15 : AARCH64_CALL_RATIO) / 2) + +/* For CLEAR_RATIO, when optimizing for size, give a better estimate + of the length of a memset call, but use the default otherwise. */ +#define CLEAR_RATIO(speed) \ + ((speed) ? 15 : AARCH64_CALL_RATIO) + +/* SET_RATIO is similar to CLEAR_RATIO, but for a non-zero constant, so when + optimizing for size adjust the ratio to account for the overhead of loading + the constant. */ +#define SET_RATIO(speed) \ + ((speed) ? 15 : AARCH64_CALL_RATIO - 2) + +/* STORE_BY_PIECES_P can be used when copying a constant string, but + in that case each 64-bit chunk takes 5 insns instead of 2 (LDR/STR). + For now we always fail this and let the move_by_pieces code copy + the string from read-only memory. */ +#define STORE_BY_PIECES_P(SIZE, ALIGN) 0 + +/* Disable auto-increment in move_by_pieces et al. Use of auto-increment is + rarely a good idea in straight-line code since it adds an extra address + dependency between each instruction. Better to use incrementing offsets. */ +#define USE_LOAD_POST_INCREMENT(MODE) 0 +#define USE_LOAD_POST_DECREMENT(MODE) 0 +#define USE_LOAD_PRE_INCREMENT(MODE) 0 +#define USE_LOAD_PRE_DECREMENT(MODE) 0 +#define USE_STORE_POST_INCREMENT(MODE) 0 +#define USE_STORE_POST_DECREMENT(MODE) 0 +#define USE_STORE_PRE_INCREMENT(MODE) 0 +#define USE_STORE_PRE_DECREMENT(MODE) 0 + +/* ?? #define WORD_REGISTER_OPERATIONS */ + +/* Define if loading from memory in MODE, an integral mode narrower than + BITS_PER_WORD will either zero-extend or sign-extend. The value of this + macro should be the code that says which one of the two operations is + implicitly done, or UNKNOWN if none. */ +#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND + +/* Define this macro to be non-zero if instructions will fail to work + if given data not on the nominal alignment. */ +#define STRICT_ALIGNMENT TARGET_STRICT_ALIGN + +/* Define this macro to be non-zero if accessing less than a word of + memory is no faster than accessing a word of memory, i.e., if such + accesses require more than one instruction or if there is no + difference in cost. + Although there's no difference in instruction count or cycles, + in AArch64 we don't want to expand to a sub-word to a 64-bit access + if we don't have to, for power-saving reasons. */ +#define SLOW_BYTE_ACCESS 0 + +#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1 + +#define NO_FUNCTION_CSE 1 + +/* Specify the machine mode that the hardware addresses have. + After generation of rtl, the compiler makes no further distinction + between pointers and any other objects of this machine mode. */ +#define Pmode DImode + +/* A C expression whose value is zero if pointers that need to be extended + from being `POINTER_SIZE' bits wide to `Pmode' are sign-extended and + greater then zero if they are zero-extended and less then zero if the + ptr_extend instruction should be used. */ +#define POINTERS_EXTEND_UNSIGNED 1 + +/* Mode of a function address in a call instruction (for indexing purposes). */ +#define FUNCTION_MODE Pmode + +#define SELECT_CC_MODE(OP, X, Y) aarch64_select_cc_mode (OP, X, Y) + +#define REVERSIBLE_CC_MODE(MODE) 1 + +#define REVERSE_CONDITION(CODE, MODE) \ + (((MODE) == CCFPmode || (MODE) == CCFPEmode) \ + ? reverse_condition_maybe_unordered (CODE) \ + : reverse_condition (CODE)) + +#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ + ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE)) +#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ + ((VALUE) = ((MODE) == SImode ? 32 : 64), 2) + +#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM) + +#define RETURN_ADDR_RTX aarch64_return_addr + +/* 3 insns + padding + 2 pointer-sized entries. */ +#define TRAMPOLINE_SIZE (TARGET_ILP32 ? 24 : 32) + +/* Trampolines contain dwords, so must be dword aligned. */ +#define TRAMPOLINE_ALIGNMENT 64 + +/* Put trampolines in the text section so that mapping symbols work + correctly. */ +#define TRAMPOLINE_SECTION text_section + +/* To start with. */ +#define BRANCH_COST(SPEED_P, PREDICTABLE_P) 2 + + +/* Assembly output. */ + +/* For now we'll make all jump tables pc-relative. */ +#define CASE_VECTOR_PC_RELATIVE 1 + +#define CASE_VECTOR_SHORTEN_MODE(min, max, body) \ + ((min < -0x1fff0 || max > 0x1fff0) ? SImode \ + : (min < -0x1f0 || max > 0x1f0) ? HImode \ + : QImode) + +/* Jump table alignment is explicit in ASM_OUTPUT_CASE_LABEL. */ +#define ADDR_VEC_ALIGN(JUMPTABLE) 0 + +#define PRINT_OPERAND(STREAM, X, CODE) aarch64_print_operand (STREAM, X, CODE) + +#define PRINT_OPERAND_ADDRESS(STREAM, X) \ + aarch64_print_operand_address (STREAM, X) + +#define MCOUNT_NAME "_mcount" + +#define NO_PROFILE_COUNTERS 1 + +/* Emit rtl for profiling. Output assembler code to FILE + to call "_mcount" for profiling a function entry. */ +#define PROFILE_HOOK(LABEL) \ + { \ + rtx fun, lr; \ + lr = get_hard_reg_initial_val (Pmode, LR_REGNUM); \ + fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \ + emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lr, Pmode); \ + } + +/* All the work done in PROFILE_HOOK, but still required. */ +#define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0) + +/* For some reason, the Linux headers think they know how to define + these macros. They don't!!! */ +#undef ASM_APP_ON +#undef ASM_APP_OFF +#define ASM_APP_ON "\t" ASM_COMMENT_START " Start of user assembly\n" +#define ASM_APP_OFF "\t" ASM_COMMENT_START " End of user assembly\n" + +#define CONSTANT_POOL_BEFORE_FUNCTION 0 + +/* This definition should be relocated to aarch64-elf-raw.h. This macro + should be undefined in aarch64-linux.h and a clear_cache pattern + implmented to emit either the call to __aarch64_sync_cache_range() + directly or preferably the appropriate sycall or cache clear + instructions inline. */ +#define CLEAR_INSN_CACHE(beg, end) \ + extern void __aarch64_sync_cache_range (void *, void *); \ + __aarch64_sync_cache_range (beg, end) + +#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \ + aarch64_cannot_change_mode_class (FROM, TO, CLASS) + +#define SHIFT_COUNT_TRUNCATED !TARGET_SIMD + +/* Callee only saves lower 64-bits of a 128-bit register. Tell the + compiler the callee clobbers the top 64-bits when restoring the + bottom 64-bits. */ +#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) \ + (FP_REGNUM_P (REGNO) && GET_MODE_SIZE (MODE) > 8) + +/* Check TLS Descriptors mechanism is selected. */ +#define TARGET_TLS_DESC (aarch64_tls_dialect == TLS_DESCRIPTORS) + +extern enum aarch64_code_model aarch64_cmodel; + +/* When using the tiny addressing model conditional and unconditional branches + can span the whole of the available address space (1MB). */ +#define HAS_LONG_COND_BRANCH \ + (aarch64_cmodel == AARCH64_CMODEL_TINY \ + || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC) + +#define HAS_LONG_UNCOND_BRANCH \ + (aarch64_cmodel == AARCH64_CMODEL_TINY \ + || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC) + +/* Modes valid for AdvSIMD Q registers. */ +#define AARCH64_VALID_SIMD_QREG_MODE(MODE) \ + ((MODE) == V4SImode || (MODE) == V8HImode || (MODE) == V16QImode \ + || (MODE) == V4SFmode || (MODE) == V2DImode || mode == V2DFmode) + +#define ENDIAN_LANE_N(mode, n) \ + (BYTES_BIG_ENDIAN ? GET_MODE_NUNITS (mode) - 1 - n : n) + +#define BIG_LITTLE_SPEC \ + " %{mcpu=*:-mcpu=%:rewrite_mcpu(%{mcpu=*:%*})}" + +extern const char *aarch64_rewrite_mcpu (int argc, const char **argv); +#define BIG_LITTLE_CPU_SPEC_FUNCTIONS \ + { "rewrite_mcpu", aarch64_rewrite_mcpu }, + +#define ASM_CPU_SPEC \ + BIG_LITTLE_SPEC + +#define EXTRA_SPEC_FUNCTIONS BIG_LITTLE_CPU_SPEC_FUNCTIONS + +#define EXTRA_SPECS \ + { "asm_cpu_spec", ASM_CPU_SPEC } + +#endif /* GCC_AARCH64_H */ diff --git a/gcc-4.9/gcc/config/aarch64/aarch64.md b/gcc-4.9/gcc/config/aarch64/aarch64.md new file mode 100644 index 000000000..c86a29d8e --- /dev/null +++ b/gcc-4.9/gcc/config/aarch64/aarch64.md @@ -0,0 +1,3642 @@ +;; Machine description for AArch64 architecture. +;; Copyright (C) 2009-2014 Free Software Foundation, Inc. +;; Contributed by ARM Ltd. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify it +;; under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, but +;; WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +;; General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; <http://www.gnu.org/licenses/>. + +;; Register numbers +(define_constants + [ + (R0_REGNUM 0) + (R1_REGNUM 1) + (R2_REGNUM 2) + (R3_REGNUM 3) + (R4_REGNUM 4) + (R5_REGNUM 5) + (R6_REGNUM 6) + (R7_REGNUM 7) + (R8_REGNUM 8) + (R9_REGNUM 9) + (R10_REGNUM 10) + (R11_REGNUM 11) + (R12_REGNUM 12) + (R13_REGNUM 13) + (R14_REGNUM 14) + (R15_REGNUM 15) + (R16_REGNUM 16) + (IP0_REGNUM 16) + (R17_REGNUM 17) + (IP1_REGNUM 17) + (R18_REGNUM 18) + (R19_REGNUM 19) + (R20_REGNUM 20) + (R21_REGNUM 21) + (R22_REGNUM 22) + (R23_REGNUM 23) + (R24_REGNUM 24) + (R25_REGNUM 25) + (R26_REGNUM 26) + (R27_REGNUM 27) + (R28_REGNUM 28) + (R29_REGNUM 29) + (R30_REGNUM 30) + (LR_REGNUM 30) + (SP_REGNUM 31) + (V0_REGNUM 32) + (V15_REGNUM 47) + (V31_REGNUM 63) + (SFP_REGNUM 64) + (AP_REGNUM 65) + (CC_REGNUM 66) + ] +) + +(define_c_enum "unspec" [ + UNSPEC_CASESI + UNSPEC_CLS + UNSPEC_FRECPE + UNSPEC_FRECPS + UNSPEC_FRECPX + UNSPEC_FRINTA + UNSPEC_FRINTI + UNSPEC_FRINTM + UNSPEC_FRINTN + UNSPEC_FRINTP + UNSPEC_FRINTX + UNSPEC_FRINTZ + UNSPEC_GOTSMALLPIC + UNSPEC_GOTSMALLTLS + UNSPEC_GOTTINYPIC + UNSPEC_LD1 + UNSPEC_LD2 + UNSPEC_LD3 + UNSPEC_LD4 + UNSPEC_MB + UNSPEC_NOP + UNSPEC_PRLG_STK + UNSPEC_RBIT + UNSPEC_SISD_NEG + UNSPEC_SISD_SSHL + UNSPEC_SISD_USHL + UNSPEC_SSHL_2S + UNSPEC_SSHR64 + UNSPEC_ST1 + UNSPEC_ST2 + UNSPEC_ST3 + UNSPEC_ST4 + UNSPEC_TLS + UNSPEC_TLSDESC + UNSPEC_USHL_2S + UNSPEC_USHR64 + UNSPEC_VSTRUCTDUMMY +]) + +(define_c_enum "unspecv" [ + UNSPECV_EH_RETURN ; Represent EH_RETURN + ] +) + +;; If further include files are added the defintion of MD_INCLUDES +;; must be updated. + +(include "constraints.md") +(include "predicates.md") +(include "iterators.md") + +;; ------------------------------------------------------------------- +;; Instruction types and attributes +;; ------------------------------------------------------------------- + +; The "type" attribute is is included here from AArch32 backend to be able +; to share pipeline descriptions. +(include "../arm/types.md") + +;; Attribute that specifies whether or not the instruction touches fp +;; registers. +(define_attr "fp" "no,yes" (const_string "no")) + +;; Attribute that specifies whether or not the instruction touches simd +;; registers. +(define_attr "simd" "no,yes" (const_string "no")) + +(define_attr "length" "" + (const_int 4)) + +;; Attribute that controls whether an alternative is enabled or not. +;; Currently it is only used to disable alternatives which touch fp or simd +;; registers when -mgeneral-regs-only is specified. +(define_attr "enabled" "no,yes" + (cond [(ior + (and (eq_attr "fp" "yes") + (eq (symbol_ref "TARGET_FLOAT") (const_int 0))) + (and (eq_attr "simd" "yes") + (eq (symbol_ref "TARGET_SIMD") (const_int 0)))) + (const_string "no") + ] (const_string "yes"))) + +;; ------------------------------------------------------------------- +;; Pipeline descriptions and scheduling +;; ------------------------------------------------------------------- + +;; Processor types. +(include "aarch64-tune.md") + +;; True if the generic scheduling description should be used. + +(define_attr "generic_sched" "yes,no" + (const (if_then_else + (eq_attr "tune" "cortexa53,cortexa15") + (const_string "no") + (const_string "yes")))) + +;; Scheduling +(include "../arm/cortex-a53.md") +(include "../arm/cortex-a15.md") + +;; ------------------------------------------------------------------- +;; Jumps and other miscellaneous insns +;; ------------------------------------------------------------------- + +(define_insn "indirect_jump" + [(set (pc) (match_operand:DI 0 "register_operand" "r"))] + "" + "br\\t%0" + [(set_attr "type" "branch")] +) + +(define_insn "jump" + [(set (pc) (label_ref (match_operand 0 "" "")))] + "" + "b\\t%l0" + [(set_attr "type" "branch")] +) + +(define_expand "cbranch<mode>4" + [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator" + [(match_operand:GPI 1 "register_operand" "") + (match_operand:GPI 2 "aarch64_plus_operand" "")]) + (label_ref (match_operand 3 "" "")) + (pc)))] + "" + " + operands[1] = aarch64_gen_compare_reg (GET_CODE (operands[0]), operands[1], + operands[2]); + operands[2] = const0_rtx; + " +) + +(define_expand "cbranch<mode>4" + [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator" + [(match_operand:GPF 1 "register_operand" "") + (match_operand:GPF 2 "aarch64_reg_or_zero" "")]) + (label_ref (match_operand 3 "" "")) + (pc)))] + "" + " + operands[1] = aarch64_gen_compare_reg (GET_CODE (operands[0]), operands[1], + operands[2]); + operands[2] = const0_rtx; + " +) + +(define_insn "*condjump" + [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator" + [(match_operand 1 "cc_register" "") (const_int 0)]) + (label_ref (match_operand 2 "" "")) + (pc)))] + "" + "b%m0\\t%l2" + [(set_attr "type" "branch")] +) + +(define_expand "casesi" + [(match_operand:SI 0 "register_operand" "") ; Index + (match_operand:SI 1 "const_int_operand" "") ; Lower bound + (match_operand:SI 2 "const_int_operand" "") ; Total range + (match_operand:DI 3 "" "") ; Table label + (match_operand:DI 4 "" "")] ; Out of range label + "" + { + if (operands[1] != const0_rtx) + { + rtx reg = gen_reg_rtx (SImode); + + /* Canonical RTL says that if you have: + + (minus (X) (CONST)) + + then this should be emitted as: + + (plus (X) (-CONST)) + + The use of trunc_int_for_mode ensures that the resulting + constant can be represented in SImode, this is important + for the corner case where operand[1] is INT_MIN. */ + + operands[1] = GEN_INT (trunc_int_for_mode (-INTVAL (operands[1]), SImode)); + + if (!(*insn_data[CODE_FOR_addsi3].operand[2].predicate) + (operands[1], SImode)) + operands[1] = force_reg (SImode, operands[1]); + emit_insn (gen_addsi3 (reg, operands[0], operands[1])); + operands[0] = reg; + } + + if (!aarch64_plus_operand (operands[2], SImode)) + operands[2] = force_reg (SImode, operands[2]); + emit_jump_insn (gen_cbranchsi4 (gen_rtx_GTU (SImode, const0_rtx, + const0_rtx), + operands[0], operands[2], operands[4])); + + operands[2] = force_reg (DImode, gen_rtx_LABEL_REF (VOIDmode, operands[3])); + emit_jump_insn (gen_casesi_dispatch (operands[2], operands[0], + operands[3])); + DONE; + } +) + +(define_insn "casesi_dispatch" + [(parallel + [(set (pc) + (mem:DI (unspec [(match_operand:DI 0 "register_operand" "r") + (match_operand:SI 1 "register_operand" "r")] + UNSPEC_CASESI))) + (clobber (reg:CC CC_REGNUM)) + (clobber (match_scratch:DI 3 "=r")) + (clobber (match_scratch:DI 4 "=r")) + (use (label_ref (match_operand 2 "" "")))])] + "" + "* + return aarch64_output_casesi (operands); + " + [(set_attr "length" "16") + (set_attr "type" "branch")] +) + +(define_insn "nop" + [(unspec[(const_int 0)] UNSPEC_NOP)] + "" + "nop" + [(set_attr "type" "no_insn")] +) + +(define_insn "trap" + [(trap_if (const_int 1) (const_int 8))] + "" + "brk #1000" + [(set_attr "type" "trap")]) + +(define_expand "prologue" + [(clobber (const_int 0))] + "" + " + aarch64_expand_prologue (); + DONE; + " +) + +(define_expand "epilogue" + [(clobber (const_int 0))] + "" + " + aarch64_expand_epilogue (false); + DONE; + " +) + +(define_expand "sibcall_epilogue" + [(clobber (const_int 0))] + "" + " + aarch64_expand_epilogue (true); + DONE; + " +) + +(define_insn "*do_return" + [(return)] + "" + "ret" + [(set_attr "type" "branch")] +) + +(define_insn "eh_return" + [(unspec_volatile [(match_operand:DI 0 "register_operand" "r")] + UNSPECV_EH_RETURN)] + "" + "#" + [(set_attr "type" "branch")] + +) + +(define_split + [(unspec_volatile [(match_operand:DI 0 "register_operand" "")] + UNSPECV_EH_RETURN)] + "reload_completed" + [(set (match_dup 1) (match_dup 0))] + { + operands[1] = aarch64_final_eh_return_addr (); + } +) + +(define_insn "*cb<optab><mode>1" + [(set (pc) (if_then_else (EQL (match_operand:GPI 0 "register_operand" "r") + (const_int 0)) + (label_ref (match_operand 1 "" "")) + (pc)))] + "" + "<cbz>\\t%<w>0, %l1" + [(set_attr "type" "branch")] + +) + +(define_insn "*tb<optab><mode>1" + [(set (pc) (if_then_else + (EQL (zero_extract:DI (match_operand:GPI 0 "register_operand" "r") + (const_int 1) + (match_operand 1 "const_int_operand" "n")) + (const_int 0)) + (label_ref (match_operand 2 "" "")) + (pc))) + (clobber (match_scratch:DI 3 "=r"))] + "" + "* + if (get_attr_length (insn) == 8) + return \"ubfx\\t%<w>3, %<w>0, %1, #1\;<cbz>\\t%<w>3, %l2\"; + return \"<tbz>\\t%<w>0, %1, %l2\"; + " + [(set_attr "type" "branch") + (set (attr "length") + (if_then_else (and (ge (minus (match_dup 2) (pc)) (const_int -32768)) + (lt (minus (match_dup 2) (pc)) (const_int 32764))) + (const_int 4) + (const_int 8)))] +) + +(define_insn "*cb<optab><mode>1" + [(set (pc) (if_then_else (LTGE (match_operand:ALLI 0 "register_operand" "r") + (const_int 0)) + (label_ref (match_operand 1 "" "")) + (pc))) + (clobber (match_scratch:DI 2 "=r"))] + "" + "* + if (get_attr_length (insn) == 8) + return \"ubfx\\t%<w>2, %<w>0, <sizem1>, #1\;<cbz>\\t%<w>2, %l1\"; + return \"<tbz>\\t%<w>0, <sizem1>, %l1\"; + " + [(set_attr "type" "branch") + (set (attr "length") + (if_then_else (and (ge (minus (match_dup 1) (pc)) (const_int -32768)) + (lt (minus (match_dup 1) (pc)) (const_int 32764))) + (const_int 4) + (const_int 8)))] +) + +;; ------------------------------------------------------------------- +;; Subroutine calls and sibcalls +;; ------------------------------------------------------------------- + +(define_expand "call" + [(parallel [(call (match_operand 0 "memory_operand" "") + (match_operand 1 "general_operand" "")) + (use (match_operand 2 "" "")) + (clobber (reg:DI LR_REGNUM))])] + "" + " + { + rtx callee; + + /* In an untyped call, we can get NULL for operand 2. */ + if (operands[2] == NULL) + operands[2] = const0_rtx; + + /* Decide if we should generate indirect calls by loading the + 64-bit address of the callee into a register before performing + the branch-and-link. */ + callee = XEXP (operands[0], 0); + if (GET_CODE (callee) == SYMBOL_REF + ? aarch64_is_long_call_p (callee) + : !REG_P (callee)) + XEXP (operands[0], 0) = force_reg (Pmode, callee); + }" +) + +(define_insn "*call_reg" + [(call (mem:DI (match_operand:DI 0 "register_operand" "r")) + (match_operand 1 "" "")) + (use (match_operand 2 "" "")) + (clobber (reg:DI LR_REGNUM))] + "" + "blr\\t%0" + [(set_attr "type" "call")] +) + +(define_insn "*call_symbol" + [(call (mem:DI (match_operand:DI 0 "" "")) + (match_operand 1 "" "")) + (use (match_operand 2 "" "")) + (clobber (reg:DI LR_REGNUM))] + "GET_CODE (operands[0]) == SYMBOL_REF + && !aarch64_is_long_call_p (operands[0])" + "bl\\t%a0" + [(set_attr "type" "call")] +) + +(define_expand "call_value" + [(parallel [(set (match_operand 0 "" "") + (call (match_operand 1 "memory_operand" "") + (match_operand 2 "general_operand" ""))) + (use (match_operand 3 "" "")) + (clobber (reg:DI LR_REGNUM))])] + "" + " + { + rtx callee; + + /* In an untyped call, we can get NULL for operand 3. */ + if (operands[3] == NULL) + operands[3] = const0_rtx; + + /* Decide if we should generate indirect calls by loading the + 64-bit address of the callee into a register before performing + the branch-and-link. */ + callee = XEXP (operands[1], 0); + if (GET_CODE (callee) == SYMBOL_REF + ? aarch64_is_long_call_p (callee) + : !REG_P (callee)) + XEXP (operands[1], 0) = force_reg (Pmode, callee); + }" +) + +(define_insn "*call_value_reg" + [(set (match_operand 0 "" "") + (call (mem:DI (match_operand:DI 1 "register_operand" "r")) + (match_operand 2 "" ""))) + (use (match_operand 3 "" "")) + (clobber (reg:DI LR_REGNUM))] + "" + "blr\\t%1" + [(set_attr "type" "call")] + +) + +(define_insn "*call_value_symbol" + [(set (match_operand 0 "" "") + (call (mem:DI (match_operand:DI 1 "" "")) + (match_operand 2 "" ""))) + (use (match_operand 3 "" "")) + (clobber (reg:DI LR_REGNUM))] + "GET_CODE (operands[1]) == SYMBOL_REF + && !aarch64_is_long_call_p (operands[1])" + "bl\\t%a1" + [(set_attr "type" "call")] +) + +(define_expand "sibcall" + [(parallel [(call (match_operand 0 "memory_operand" "") + (match_operand 1 "general_operand" "")) + (return) + (use (match_operand 2 "" ""))])] + "" + { + if (operands[2] == NULL_RTX) + operands[2] = const0_rtx; + } +) + +(define_expand "sibcall_value" + [(parallel [(set (match_operand 0 "" "") + (call (match_operand 1 "memory_operand" "") + (match_operand 2 "general_operand" ""))) + (return) + (use (match_operand 3 "" ""))])] + "" + { + if (operands[3] == NULL_RTX) + operands[3] = const0_rtx; + } +) + +(define_insn "*sibcall_insn" + [(call (mem:DI (match_operand:DI 0 "" "X")) + (match_operand 1 "" "")) + (return) + (use (match_operand 2 "" ""))] + "GET_CODE (operands[0]) == SYMBOL_REF" + "b\\t%a0" + [(set_attr "type" "branch")] + +) + +(define_insn "*sibcall_value_insn" + [(set (match_operand 0 "" "") + (call (mem:DI (match_operand 1 "" "X")) + (match_operand 2 "" ""))) + (return) + (use (match_operand 3 "" ""))] + "GET_CODE (operands[1]) == SYMBOL_REF" + "b\\t%a1" + [(set_attr "type" "branch")] +) + +;; Call subroutine returning any type. + +(define_expand "untyped_call" + [(parallel [(call (match_operand 0 "") + (const_int 0)) + (match_operand 1 "") + (match_operand 2 "")])] + "" +{ + int i; + + emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx)); + + for (i = 0; i < XVECLEN (operands[2], 0); i++) + { + rtx set = XVECEXP (operands[2], 0, i); + emit_move_insn (SET_DEST (set), SET_SRC (set)); + } + + /* The optimizer does not know that the call sets the function value + registers we stored in the result block. We avoid problems by + claiming that all hard registers are used and clobbered at this + point. */ + emit_insn (gen_blockage ()); + DONE; +}) + +;; ------------------------------------------------------------------- +;; Moves +;; ------------------------------------------------------------------- + +(define_expand "mov<mode>" + [(set (match_operand:SHORT 0 "nonimmediate_operand" "") + (match_operand:SHORT 1 "general_operand" ""))] + "" + " + if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx) + operands[1] = force_reg (<MODE>mode, operands[1]); + " +) + +(define_insn "*mov<mode>_aarch64" + [(set (match_operand:SHORT 0 "nonimmediate_operand" "=r,r, *w,r,*w, m, m, r,*w,*w") + (match_operand:SHORT 1 "general_operand" " r,M,D<hq>,m, m,rZ,*w,*w, r,*w"))] + "(register_operand (operands[0], <MODE>mode) + || aarch64_reg_or_zero (operands[1], <MODE>mode))" +{ + switch (which_alternative) + { + case 0: + return "mov\t%w0, %w1"; + case 1: + return "mov\t%w0, %1"; + case 2: + return aarch64_output_scalar_simd_mov_immediate (operands[1], + <MODE>mode); + case 3: + return "ldr<size>\t%w0, %1"; + case 4: + return "ldr\t%<size>0, %1"; + case 5: + return "str<size>\t%w1, %0"; + case 6: + return "str\t%<size>1, %0"; + case 7: + return "umov\t%w0, %1.<v>[0]"; + case 8: + return "dup\t%0.<Vallxd>, %w1"; + case 9: + return "dup\t%<Vetype>0, %1.<v>[0]"; + default: + gcc_unreachable (); + } +} + [(set_attr "type" "mov_reg,mov_imm,mov_imm,load1,load1,store1,store1,\ + neon_from_gp<q>,neon_from_gp<q>, neon_dup") + (set_attr "simd" "*,*,yes,*,*,*,*,yes,yes,yes")] +) + +(define_expand "mov<mode>" + [(set (match_operand:GPI 0 "nonimmediate_operand" "") + (match_operand:GPI 1 "general_operand" ""))] + "" + " + if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx) + operands[1] = force_reg (<MODE>mode, operands[1]); + + if (CONSTANT_P (operands[1])) + { + aarch64_expand_mov_immediate (operands[0], operands[1]); + DONE; + } + " +) + +(define_insn "*movsi_aarch64" + [(set (match_operand:SI 0 "nonimmediate_operand" "=r,k,r,r,r,*w,m, m,r,r ,*w, r,*w") + (match_operand:SI 1 "aarch64_mov_operand" " r,r,k,M,m, m,rZ,*w,S,Ush,rZ,*w,*w"))] + "(register_operand (operands[0], SImode) + || aarch64_reg_or_zero (operands[1], SImode))" + "@ + mov\\t%w0, %w1 + mov\\t%w0, %w1 + mov\\t%w0, %w1 + mov\\t%w0, %1 + ldr\\t%w0, %1 + ldr\\t%s0, %1 + str\\t%w1, %0 + str\\t%s1, %0 + adr\\t%x0, %a1 + adrp\\t%x0, %A1 + fmov\\t%s0, %w1 + fmov\\t%w0, %s1 + fmov\\t%s0, %s1" + [(set_attr "type" "mov_reg,mov_reg,mov_reg,mov_imm,load1,load1,store1,store1,\ + adr,adr,fmov,fmov,fmov") + (set_attr "fp" "*,*,*,*,*,yes,*,yes,*,*,yes,yes,yes")] +) + +(define_insn "*movdi_aarch64" + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,k,r,r,r,*w,m, m,r,r, *w, r,*w,w") + (match_operand:DI 1 "aarch64_mov_operand" " r,r,k,N,m, m,rZ,*w,S,Ush,rZ,*w,*w,Dd"))] + "(register_operand (operands[0], DImode) + || aarch64_reg_or_zero (operands[1], DImode))" + "@ + mov\\t%x0, %x1 + mov\\t%0, %x1 + mov\\t%x0, %1 + mov\\t%x0, %1 + ldr\\t%x0, %1 + ldr\\t%d0, %1 + str\\t%x1, %0 + str\\t%d1, %0 + adr\\t%x0, %a1 + adrp\\t%x0, %A1 + fmov\\t%d0, %x1 + fmov\\t%x0, %d1 + fmov\\t%d0, %d1 + movi\\t%d0, %1" + [(set_attr "type" "mov_reg,mov_reg,mov_reg,mov_imm,load1,load1,store1,store1,\ + adr,adr,fmov,fmov,fmov,fmov") + (set_attr "fp" "*,*,*,*,*,yes,*,yes,*,*,yes,yes,yes,*") + (set_attr "simd" "*,*,*,*,*,*,*,*,*,*,*,*,*,yes")] +) + +(define_insn "insv_imm<mode>" + [(set (zero_extract:GPI (match_operand:GPI 0 "register_operand" "+r") + (const_int 16) + (match_operand:GPI 1 "const_int_operand" "n")) + (match_operand:GPI 2 "const_int_operand" "n"))] + "UINTVAL (operands[1]) < GET_MODE_BITSIZE (<MODE>mode) + && UINTVAL (operands[1]) % 16 == 0" + "movk\\t%<w>0, %X2, lsl %1" + [(set_attr "type" "mov_imm")] +) + +(define_expand "movti" + [(set (match_operand:TI 0 "nonimmediate_operand" "") + (match_operand:TI 1 "general_operand" ""))] + "" + " + if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx) + operands[1] = force_reg (TImode, operands[1]); + " +) + +(define_insn "*movti_aarch64" + [(set (match_operand:TI 0 + "nonimmediate_operand" "=r, *w,r ,*w,r ,Ump,Ump,*w,m") + (match_operand:TI 1 + "aarch64_movti_operand" " rn,r ,*w,*w,Ump,r ,Z , m,*w"))] + "(register_operand (operands[0], TImode) + || aarch64_reg_or_zero (operands[1], TImode))" + "@ + # + # + # + orr\\t%0.16b, %1.16b, %1.16b + ldp\\t%0, %H0, %1 + stp\\t%1, %H1, %0 + stp\\txzr, xzr, %0 + ldr\\t%q0, %1 + str\\t%q1, %0" + [(set_attr "type" "multiple,f_mcr,f_mrc,neon_logic_q, \ + load2,store2,store2,f_loadd,f_stored") + (set_attr "length" "8,8,8,4,4,4,4,4,4") + (set_attr "simd" "*,*,*,yes,*,*,*,*,*") + (set_attr "fp" "*,*,*,*,*,*,*,yes,yes")] +) + +;; Split a TImode register-register or register-immediate move into +;; its component DImode pieces, taking care to handle overlapping +;; source and dest registers. +(define_split + [(set (match_operand:TI 0 "register_operand" "") + (match_operand:TI 1 "aarch64_reg_or_imm" ""))] + "reload_completed && aarch64_split_128bit_move_p (operands[0], operands[1])" + [(const_int 0)] +{ + aarch64_split_128bit_move (operands[0], operands[1]); + DONE; +}) + +(define_expand "mov<mode>" + [(set (match_operand:GPF 0 "nonimmediate_operand" "") + (match_operand:GPF 1 "general_operand" ""))] + "" + " + if (!TARGET_FLOAT) + { + sorry (\"%qs and floating point code\", \"-mgeneral-regs-only\"); + FAIL; + } + + if (GET_CODE (operands[0]) == MEM) + operands[1] = force_reg (<MODE>mode, operands[1]); + " +) + +(define_insn "*movsf_aarch64" + [(set (match_operand:SF 0 "nonimmediate_operand" "=w, ?r,w,w ,w,m,r,m ,r") + (match_operand:SF 1 "general_operand" "?rY, w,w,Ufc,m,w,m,rY,r"))] + "TARGET_FLOAT && (register_operand (operands[0], SFmode) + || register_operand (operands[1], SFmode))" + "@ + fmov\\t%s0, %w1 + fmov\\t%w0, %s1 + fmov\\t%s0, %s1 + fmov\\t%s0, %1 + ldr\\t%s0, %1 + str\\t%s1, %0 + ldr\\t%w0, %1 + str\\t%w1, %0 + mov\\t%w0, %w1" + [(set_attr "type" "f_mcr,f_mrc,fmov,fconsts,\ + f_loads,f_stores,f_loads,f_stores,fmov")] +) + +(define_insn "*movdf_aarch64" + [(set (match_operand:DF 0 "nonimmediate_operand" "=w, ?r,w,w ,w,m,r,m ,r") + (match_operand:DF 1 "general_operand" "?rY, w,w,Ufc,m,w,m,rY,r"))] + "TARGET_FLOAT && (register_operand (operands[0], DFmode) + || register_operand (operands[1], DFmode))" + "@ + fmov\\t%d0, %x1 + fmov\\t%x0, %d1 + fmov\\t%d0, %d1 + fmov\\t%d0, %1 + ldr\\t%d0, %1 + str\\t%d1, %0 + ldr\\t%x0, %1 + str\\t%x1, %0 + mov\\t%x0, %x1" + [(set_attr "type" "f_mcr,f_mrc,fmov,fconstd,\ + f_loadd,f_stored,f_loadd,f_stored,mov_reg")] +) + +(define_expand "movtf" + [(set (match_operand:TF 0 "nonimmediate_operand" "") + (match_operand:TF 1 "general_operand" ""))] + "" + " + if (!TARGET_FLOAT) + { + sorry (\"%qs and floating point code\", \"-mgeneral-regs-only\"); + FAIL; + } + + if (GET_CODE (operands[0]) == MEM) + operands[1] = force_reg (TFmode, operands[1]); + " +) + +(define_insn "*movtf_aarch64" + [(set (match_operand:TF 0 + "nonimmediate_operand" "=w,?&r,w ,?r,w,?w,w,m,?r ,Ump") + (match_operand:TF 1 + "general_operand" " w,?r, ?r,w ,Y,Y ,m,w,Ump,?rY"))] + "TARGET_FLOAT && (register_operand (operands[0], TFmode) + || register_operand (operands[1], TFmode))" + "@ + orr\\t%0.16b, %1.16b, %1.16b + # + # + # + movi\\t%0.2d, #0 + fmov\\t%s0, wzr + ldr\\t%q0, %1 + str\\t%q1, %0 + ldp\\t%0, %H0, %1 + stp\\t%1, %H1, %0" + [(set_attr "type" "logic_reg,multiple,f_mcr,f_mrc,fconstd,fconstd,\ + f_loadd,f_stored,neon_load1_2reg,neon_store1_2reg") + (set_attr "length" "4,8,8,8,4,4,4,4,4,4") + (set_attr "fp" "*,*,yes,yes,*,yes,yes,yes,*,*") + (set_attr "simd" "yes,*,*,*,yes,*,*,*,*,*")] +) + +(define_split + [(set (match_operand:TF 0 "register_operand" "") + (match_operand:TF 1 "aarch64_reg_or_imm" ""))] + "reload_completed && aarch64_split_128bit_move_p (operands[0], operands[1])" + [(const_int 0)] + { + aarch64_split_128bit_move (operands[0], operands[1]); + DONE; + } +) + +;; Operands 1 and 3 are tied together by the final condition; so we allow +;; fairly lax checking on the second memory operation. +(define_insn "load_pair<mode>" + [(set (match_operand:GPI 0 "register_operand" "=r") + (match_operand:GPI 1 "aarch64_mem_pair_operand" "Ump")) + (set (match_operand:GPI 2 "register_operand" "=r") + (match_operand:GPI 3 "memory_operand" "m"))] + "rtx_equal_p (XEXP (operands[3], 0), + plus_constant (Pmode, + XEXP (operands[1], 0), + GET_MODE_SIZE (<MODE>mode)))" + "ldp\\t%<w>0, %<w>2, %1" + [(set_attr "type" "load2")] +) + +;; Operands 0 and 2 are tied together by the final condition; so we allow +;; fairly lax checking on the second memory operation. +(define_insn "store_pair<mode>" + [(set (match_operand:GPI 0 "aarch64_mem_pair_operand" "=Ump") + (match_operand:GPI 1 "register_operand" "r")) + (set (match_operand:GPI 2 "memory_operand" "=m") + (match_operand:GPI 3 "register_operand" "r"))] + "rtx_equal_p (XEXP (operands[2], 0), + plus_constant (Pmode, + XEXP (operands[0], 0), + GET_MODE_SIZE (<MODE>mode)))" + "stp\\t%<w>1, %<w>3, %0" + [(set_attr "type" "store2")] +) + +;; Operands 1 and 3 are tied together by the final condition; so we allow +;; fairly lax checking on the second memory operation. +(define_insn "load_pair<mode>" + [(set (match_operand:GPF 0 "register_operand" "=w") + (match_operand:GPF 1 "aarch64_mem_pair_operand" "Ump")) + (set (match_operand:GPF 2 "register_operand" "=w") + (match_operand:GPF 3 "memory_operand" "m"))] + "rtx_equal_p (XEXP (operands[3], 0), + plus_constant (Pmode, + XEXP (operands[1], 0), + GET_MODE_SIZE (<MODE>mode)))" + "ldp\\t%<w>0, %<w>2, %1" + [(set_attr "type" "neon_load1_2reg<q>")] +) + +;; Operands 0 and 2 are tied together by the final condition; so we allow +;; fairly lax checking on the second memory operation. +(define_insn "store_pair<mode>" + [(set (match_operand:GPF 0 "aarch64_mem_pair_operand" "=Ump") + (match_operand:GPF 1 "register_operand" "w")) + (set (match_operand:GPF 2 "memory_operand" "=m") + (match_operand:GPF 3 "register_operand" "w"))] + "rtx_equal_p (XEXP (operands[2], 0), + plus_constant (Pmode, + XEXP (operands[0], 0), + GET_MODE_SIZE (<MODE>mode)))" + "stp\\t%<w>1, %<w>3, %0" + [(set_attr "type" "neon_store1_2reg<q>")] +) + +;; Load pair with writeback. This is primarily used in function epilogues +;; when restoring [fp,lr] +(define_insn "loadwb_pair<GPI:mode>_<P:mode>" + [(parallel + [(set (match_operand:P 0 "register_operand" "=k") + (plus:P (match_operand:P 1 "register_operand" "0") + (match_operand:P 4 "const_int_operand" "n"))) + (set (match_operand:GPI 2 "register_operand" "=r") + (mem:GPI (plus:P (match_dup 1) + (match_dup 4)))) + (set (match_operand:GPI 3 "register_operand" "=r") + (mem:GPI (plus:P (match_dup 1) + (match_operand:P 5 "const_int_operand" "n"))))])] + "INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPI:MODE>mode)" + "ldp\\t%<w>2, %<w>3, [%1], %4" + [(set_attr "type" "load2")] +) + +;; Store pair with writeback. This is primarily used in function prologues +;; when saving [fp,lr] +(define_insn "storewb_pair<GPI:mode>_<P:mode>" + [(parallel + [(set (match_operand:P 0 "register_operand" "=&k") + (plus:P (match_operand:P 1 "register_operand" "0") + (match_operand:P 4 "const_int_operand" "n"))) + (set (mem:GPI (plus:P (match_dup 0) + (match_dup 4))) + (match_operand:GPI 2 "register_operand" "r")) + (set (mem:GPI (plus:P (match_dup 0) + (match_operand:P 5 "const_int_operand" "n"))) + (match_operand:GPI 3 "register_operand" "r"))])] + "INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPI:MODE>mode)" + "stp\\t%<w>2, %<w>3, [%0, %4]!" + [(set_attr "type" "store2")] +) + +;; ------------------------------------------------------------------- +;; Sign/Zero extension +;; ------------------------------------------------------------------- + +(define_expand "<optab>sidi2" + [(set (match_operand:DI 0 "register_operand") + (ANY_EXTEND:DI (match_operand:SI 1 "nonimmediate_operand")))] + "" +) + +(define_insn "*extendsidi2_aarch64" + [(set (match_operand:DI 0 "register_operand" "=r,r") + (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))] + "" + "@ + sxtw\t%0, %w1 + ldrsw\t%0, %1" + [(set_attr "type" "extend,load1")] +) + +(define_insn "*zero_extendsidi2_aarch64" + [(set (match_operand:DI 0 "register_operand" "=r,r") + (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))] + "" + "@ + uxtw\t%0, %w1 + ldr\t%w0, %1" + [(set_attr "type" "extend,load1")] +) + +(define_expand "<ANY_EXTEND:optab><SHORT:mode><GPI:mode>2" + [(set (match_operand:GPI 0 "register_operand") + (ANY_EXTEND:GPI (match_operand:SHORT 1 "nonimmediate_operand")))] + "" +) + +(define_insn "*extend<SHORT:mode><GPI:mode>2_aarch64" + [(set (match_operand:GPI 0 "register_operand" "=r,r") + (sign_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))] + "" + "@ + sxt<SHORT:size>\t%<GPI:w>0, %w1 + ldrs<SHORT:size>\t%<GPI:w>0, %1" + [(set_attr "type" "extend,load1")] +) + +(define_insn "*zero_extend<SHORT:mode><GPI:mode>2_aarch64" + [(set (match_operand:GPI 0 "register_operand" "=r,r,*w") + (zero_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m,m")))] + "" + "@ + uxt<SHORT:size>\t%<GPI:w>0, %w1 + ldr<SHORT:size>\t%w0, %1 + ldr\t%<SHORT:size>0, %1" + [(set_attr "type" "extend,load1,load1")] +) + +(define_expand "<optab>qihi2" + [(set (match_operand:HI 0 "register_operand") + (ANY_EXTEND:HI (match_operand:QI 1 "nonimmediate_operand")))] + "" +) + +(define_insn "*<optab>qihi2_aarch64" + [(set (match_operand:HI 0 "register_operand" "=r,r") + (ANY_EXTEND:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))] + "" + "@ + <su>xtb\t%w0, %w1 + <ldrxt>b\t%w0, %1" + [(set_attr "type" "extend,load1")] +) + +;; ------------------------------------------------------------------- +;; Simple arithmetic +;; ------------------------------------------------------------------- + +(define_expand "add<mode>3" + [(set + (match_operand:GPI 0 "register_operand" "") + (plus:GPI (match_operand:GPI 1 "register_operand" "") + (match_operand:GPI 2 "aarch64_pluslong_operand" "")))] + "" + " + if (! aarch64_plus_operand (operands[2], VOIDmode)) + { + rtx subtarget = ((optimize && can_create_pseudo_p ()) + ? gen_reg_rtx (<MODE>mode) : operands[0]); + HOST_WIDE_INT imm = INTVAL (operands[2]); + + if (imm < 0) + imm = -(-imm & ~0xfff); + else + imm &= ~0xfff; + + emit_insn (gen_add<mode>3 (subtarget, operands[1], GEN_INT (imm))); + operands[1] = subtarget; + operands[2] = GEN_INT (INTVAL (operands[2]) - imm); + } + " +) + +(define_insn "*addsi3_aarch64" + [(set + (match_operand:SI 0 "register_operand" "=rk,rk,rk") + (plus:SI + (match_operand:SI 1 "register_operand" "%rk,rk,rk") + (match_operand:SI 2 "aarch64_plus_operand" "I,r,J")))] + "" + "@ + add\\t%w0, %w1, %2 + add\\t%w0, %w1, %w2 + sub\\t%w0, %w1, #%n2" + [(set_attr "type" "alu_imm,alu_reg,alu_imm")] +) + +;; zero_extend version of above +(define_insn "*addsi3_aarch64_uxtw" + [(set + (match_operand:DI 0 "register_operand" "=rk,rk,rk") + (zero_extend:DI + (plus:SI (match_operand:SI 1 "register_operand" "%rk,rk,rk") + (match_operand:SI 2 "aarch64_plus_operand" "I,r,J"))))] + "" + "@ + add\\t%w0, %w1, %2 + add\\t%w0, %w1, %w2 + sub\\t%w0, %w1, #%n2" + [(set_attr "type" "alu_imm,alu_reg,alu_imm")] +) + +(define_insn "*adddi3_aarch64" + [(set + (match_operand:DI 0 "register_operand" "=rk,rk,rk,!w") + (plus:DI + (match_operand:DI 1 "register_operand" "%rk,rk,rk,!w") + (match_operand:DI 2 "aarch64_plus_operand" "I,r,J,!w")))] + "" + "@ + add\\t%x0, %x1, %2 + add\\t%x0, %x1, %x2 + sub\\t%x0, %x1, #%n2 + add\\t%d0, %d1, %d2" + [(set_attr "type" "alu_imm,alu_reg,alu_imm,alu_reg") + (set_attr "simd" "*,*,*,yes")] +) + +(define_insn "*add<mode>3_compare0" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (plus:GPI (match_operand:GPI 1 "register_operand" "%r,r,r") + (match_operand:GPI 2 "aarch64_plus_operand" "r,I,J")) + (const_int 0))) + (set (match_operand:GPI 0 "register_operand" "=r,r,r") + (plus:GPI (match_dup 1) (match_dup 2)))] + "" + "@ + adds\\t%<w>0, %<w>1, %<w>2 + adds\\t%<w>0, %<w>1, %<w>2 + subs\\t%<w>0, %<w>1, #%n2" + [(set_attr "type" "alus_reg,alus_imm,alus_imm")] +) + +;; zero_extend version of above +(define_insn "*addsi3_compare0_uxtw" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (plus:SI (match_operand:SI 1 "register_operand" "%r,r,r") + (match_operand:SI 2 "aarch64_plus_operand" "r,I,J")) + (const_int 0))) + (set (match_operand:DI 0 "register_operand" "=r,r,r") + (zero_extend:DI (plus:SI (match_dup 1) (match_dup 2))))] + "" + "@ + adds\\t%w0, %w1, %w2 + adds\\t%w0, %w1, %w2 + subs\\t%w0, %w1, #%n2" + [(set_attr "type" "alus_reg,alus_imm,alus_imm")] +) + +(define_insn "*adds_mul_imm_<mode>" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (plus:GPI (mult:GPI + (match_operand:GPI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n")) + (match_operand:GPI 3 "register_operand" "r")) + (const_int 0))) + (set (match_operand:GPI 0 "register_operand" "=r") + (plus:GPI (mult:GPI (match_dup 1) (match_dup 2)) + (match_dup 3)))] + "" + "adds\\t%<w>0, %<w>3, %<w>1, lsl %p2" + [(set_attr "type" "alus_shift_imm")] +) + +(define_insn "*subs_mul_imm_<mode>" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (minus:GPI (match_operand:GPI 1 "register_operand" "r") + (mult:GPI + (match_operand:GPI 2 "register_operand" "r") + (match_operand:QI 3 "aarch64_pwr_2_<mode>" "n"))) + (const_int 0))) + (set (match_operand:GPI 0 "register_operand" "=r") + (minus:GPI (match_dup 1) + (mult:GPI (match_dup 2) (match_dup 3))))] + "" + "subs\\t%<w>0, %<w>1, %<w>2, lsl %p3" + [(set_attr "type" "alus_shift_imm")] +) + +(define_insn "*adds_<optab><ALLX:mode>_<GPI:mode>" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (plus:GPI + (ANY_EXTEND:GPI (match_operand:ALLX 1 "register_operand" "r")) + (match_operand:GPI 2 "register_operand" "r")) + (const_int 0))) + (set (match_operand:GPI 0 "register_operand" "=r") + (plus:GPI (ANY_EXTEND:GPI (match_dup 1)) (match_dup 2)))] + "" + "adds\\t%<GPI:w>0, %<GPI:w>2, %<GPI:w>1, <su>xt<ALLX:size>" + [(set_attr "type" "alus_ext")] +) + +(define_insn "*subs_<optab><ALLX:mode>_<GPI:mode>" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (minus:GPI (match_operand:GPI 1 "register_operand" "r") + (ANY_EXTEND:GPI + (match_operand:ALLX 2 "register_operand" "r"))) + (const_int 0))) + (set (match_operand:GPI 0 "register_operand" "=r") + (minus:GPI (match_dup 1) (ANY_EXTEND:GPI (match_dup 2))))] + "" + "subs\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size>" + [(set_attr "type" "alus_ext")] +) + +(define_insn "*adds_<optab><mode>_multp2" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (plus:GPI (ANY_EXTRACT:GPI + (mult:GPI (match_operand:GPI 1 "register_operand" "r") + (match_operand 2 "aarch64_pwr_imm3" "Up3")) + (match_operand 3 "const_int_operand" "n") + (const_int 0)) + (match_operand:GPI 4 "register_operand" "r")) + (const_int 0))) + (set (match_operand:GPI 0 "register_operand" "=r") + (plus:GPI (ANY_EXTRACT:GPI (mult:GPI (match_dup 1) (match_dup 2)) + (match_dup 3) + (const_int 0)) + (match_dup 4)))] + "aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])" + "adds\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2" + [(set_attr "type" "alus_ext")] +) + +(define_insn "*subs_<optab><mode>_multp2" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (minus:GPI (match_operand:GPI 4 "register_operand" "r") + (ANY_EXTRACT:GPI + (mult:GPI (match_operand:GPI 1 "register_operand" "r") + (match_operand 2 "aarch64_pwr_imm3" "Up3")) + (match_operand 3 "const_int_operand" "n") + (const_int 0))) + (const_int 0))) + (set (match_operand:GPI 0 "register_operand" "=r") + (minus:GPI (match_dup 4) (ANY_EXTRACT:GPI + (mult:GPI (match_dup 1) (match_dup 2)) + (match_dup 3) + (const_int 0))))] + "aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])" + "subs\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2" + [(set_attr "type" "alus_ext")] +) + +(define_insn "*add<mode>3nr_compare0" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (plus:GPI (match_operand:GPI 0 "register_operand" "%r,r,r") + (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J")) + (const_int 0)))] + "" + "@ + cmn\\t%<w>0, %<w>1 + cmn\\t%<w>0, %<w>1 + cmp\\t%<w>0, #%n1" + [(set_attr "type" "alus_reg,alus_imm,alus_imm")] +) + +(define_insn "*compare_neg<mode>" + [(set (reg:CC_Z CC_REGNUM) + (compare:CC_Z + (neg:GPI (match_operand:GPI 0 "register_operand" "r")) + (match_operand:GPI 1 "register_operand" "r")))] + "" + "cmn\\t%<w>1, %<w>0" + [(set_attr "type" "alus_reg")] +) + +(define_insn "*add_<shift>_<mode>" + [(set (match_operand:GPI 0 "register_operand" "=r") + (plus:GPI (ASHIFT:GPI (match_operand:GPI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n")) + (match_operand:GPI 3 "register_operand" "r")))] + "" + "add\\t%<w>0, %<w>3, %<w>1, <shift> %2" + [(set_attr "type" "alu_shift_imm")] +) + +;; zero_extend version of above +(define_insn "*add_<shift>_si_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (plus:SI (ASHIFT:SI (match_operand:SI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_si" "n")) + (match_operand:SI 3 "register_operand" "r"))))] + "" + "add\\t%w0, %w3, %w1, <shift> %2" + [(set_attr "type" "alu_shift_imm")] +) + +(define_insn "*add_mul_imm_<mode>" + [(set (match_operand:GPI 0 "register_operand" "=r") + (plus:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n")) + (match_operand:GPI 3 "register_operand" "r")))] + "" + "add\\t%<w>0, %<w>3, %<w>1, lsl %p2" + [(set_attr "type" "alu_shift_imm")] +) + +(define_insn "*add_<optab><ALLX:mode>_<GPI:mode>" + [(set (match_operand:GPI 0 "register_operand" "=rk") + (plus:GPI (ANY_EXTEND:GPI (match_operand:ALLX 1 "register_operand" "r")) + (match_operand:GPI 2 "register_operand" "r")))] + "" + "add\\t%<GPI:w>0, %<GPI:w>2, %<GPI:w>1, <su>xt<ALLX:size>" + [(set_attr "type" "alu_ext")] +) + +;; zero_extend version of above +(define_insn "*add_<optab><SHORT:mode>_si_uxtw" + [(set (match_operand:DI 0 "register_operand" "=rk") + (zero_extend:DI + (plus:SI (ANY_EXTEND:SI (match_operand:SHORT 1 "register_operand" "r")) + (match_operand:GPI 2 "register_operand" "r"))))] + "" + "add\\t%w0, %w2, %w1, <su>xt<SHORT:size>" + [(set_attr "type" "alu_ext")] +) + +(define_insn "*add_<optab><ALLX:mode>_shft_<GPI:mode>" + [(set (match_operand:GPI 0 "register_operand" "=rk") + (plus:GPI (ashift:GPI (ANY_EXTEND:GPI + (match_operand:ALLX 1 "register_operand" "r")) + (match_operand 2 "aarch64_imm3" "Ui3")) + (match_operand:GPI 3 "register_operand" "r")))] + "" + "add\\t%<GPI:w>0, %<GPI:w>3, %<GPI:w>1, <su>xt<ALLX:size> %2" + [(set_attr "type" "alu_ext")] +) + +;; zero_extend version of above +(define_insn "*add_<optab><SHORT:mode>_shft_si_uxtw" + [(set (match_operand:DI 0 "register_operand" "=rk") + (zero_extend:DI + (plus:SI (ashift:SI (ANY_EXTEND:SI + (match_operand:SHORT 1 "register_operand" "r")) + (match_operand 2 "aarch64_imm3" "Ui3")) + (match_operand:SI 3 "register_operand" "r"))))] + "" + "add\\t%w0, %w3, %w1, <su>xt<SHORT:size> %2" + [(set_attr "type" "alu_ext")] +) + +(define_insn "*add_<optab><ALLX:mode>_mult_<GPI:mode>" + [(set (match_operand:GPI 0 "register_operand" "=rk") + (plus:GPI (mult:GPI (ANY_EXTEND:GPI + (match_operand:ALLX 1 "register_operand" "r")) + (match_operand 2 "aarch64_pwr_imm3" "Up3")) + (match_operand:GPI 3 "register_operand" "r")))] + "" + "add\\t%<GPI:w>0, %<GPI:w>3, %<GPI:w>1, <su>xt<ALLX:size> %p2" + [(set_attr "type" "alu_ext")] +) + +;; zero_extend version of above +(define_insn "*add_<optab><SHORT:mode>_mult_si_uxtw" + [(set (match_operand:DI 0 "register_operand" "=rk") + (zero_extend:DI (plus:SI (mult:SI (ANY_EXTEND:SI + (match_operand:SHORT 1 "register_operand" "r")) + (match_operand 2 "aarch64_pwr_imm3" "Up3")) + (match_operand:SI 3 "register_operand" "r"))))] + "" + "add\\t%w0, %w3, %w1, <su>xt<SHORT:size> %p2" + [(set_attr "type" "alu_ext")] +) + +(define_insn "*add_<optab><mode>_multp2" + [(set (match_operand:GPI 0 "register_operand" "=rk") + (plus:GPI (ANY_EXTRACT:GPI + (mult:GPI (match_operand:GPI 1 "register_operand" "r") + (match_operand 2 "aarch64_pwr_imm3" "Up3")) + (match_operand 3 "const_int_operand" "n") + (const_int 0)) + (match_operand:GPI 4 "register_operand" "r")))] + "aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])" + "add\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2" + [(set_attr "type" "alu_ext")] +) + +;; zero_extend version of above +(define_insn "*add_<optab>si_multp2_uxtw" + [(set (match_operand:DI 0 "register_operand" "=rk") + (zero_extend:DI + (plus:SI (ANY_EXTRACT:SI + (mult:SI (match_operand:SI 1 "register_operand" "r") + (match_operand 2 "aarch64_pwr_imm3" "Up3")) + (match_operand 3 "const_int_operand" "n") + (const_int 0)) + (match_operand:SI 4 "register_operand" "r"))))] + "aarch64_is_extend_from_extract (SImode, operands[2], operands[3])" + "add\\t%w0, %w4, %w1, <su>xt%e3 %p2" + [(set_attr "type" "alu_ext")] +) + +(define_insn "*add<mode>3_carryin" + [(set + (match_operand:GPI 0 "register_operand" "=r") + (plus:GPI (geu:GPI (reg:CC CC_REGNUM) (const_int 0)) + (plus:GPI + (match_operand:GPI 1 "register_operand" "r") + (match_operand:GPI 2 "register_operand" "r"))))] + "" + "adc\\t%<w>0, %<w>1, %<w>2" + [(set_attr "type" "adc_reg")] +) + +;; zero_extend version of above +(define_insn "*addsi3_carryin_uxtw" + [(set + (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (plus:SI (geu:SI (reg:CC CC_REGNUM) (const_int 0)) + (plus:SI + (match_operand:SI 1 "register_operand" "r") + (match_operand:SI 2 "register_operand" "r")))))] + "" + "adc\\t%w0, %w1, %w2" + [(set_attr "type" "adc_reg")] +) + +(define_insn "*add<mode>3_carryin_alt1" + [(set + (match_operand:GPI 0 "register_operand" "=r") + (plus:GPI (plus:GPI + (match_operand:GPI 1 "register_operand" "r") + (match_operand:GPI 2 "register_operand" "r")) + (geu:GPI (reg:CC CC_REGNUM) (const_int 0))))] + "" + "adc\\t%<w>0, %<w>1, %<w>2" + [(set_attr "type" "adc_reg")] +) + +;; zero_extend version of above +(define_insn "*addsi3_carryin_alt1_uxtw" + [(set + (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (plus:SI (plus:SI + (match_operand:SI 1 "register_operand" "r") + (match_operand:SI 2 "register_operand" "r")) + (geu:SI (reg:CC CC_REGNUM) (const_int 0)))))] + "" + "adc\\t%w0, %w1, %w2" + [(set_attr "type" "adc_reg")] +) + +(define_insn "*add<mode>3_carryin_alt2" + [(set + (match_operand:GPI 0 "register_operand" "=r") + (plus:GPI (plus:GPI + (geu:GPI (reg:CC CC_REGNUM) (const_int 0)) + (match_operand:GPI 1 "register_operand" "r")) + (match_operand:GPI 2 "register_operand" "r")))] + "" + "adc\\t%<w>0, %<w>1, %<w>2" + [(set_attr "type" "adc_reg")] +) + +;; zero_extend version of above +(define_insn "*addsi3_carryin_alt2_uxtw" + [(set + (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (plus:SI (plus:SI + (geu:SI (reg:CC CC_REGNUM) (const_int 0)) + (match_operand:SI 1 "register_operand" "r")) + (match_operand:SI 2 "register_operand" "r"))))] + "" + "adc\\t%w0, %w1, %w2" + [(set_attr "type" "adc_reg")] +) + +(define_insn "*add<mode>3_carryin_alt3" + [(set + (match_operand:GPI 0 "register_operand" "=r") + (plus:GPI (plus:GPI + (geu:GPI (reg:CC CC_REGNUM) (const_int 0)) + (match_operand:GPI 2 "register_operand" "r")) + (match_operand:GPI 1 "register_operand" "r")))] + "" + "adc\\t%<w>0, %<w>1, %<w>2" + [(set_attr "type" "adc_reg")] +) + +;; zero_extend version of above +(define_insn "*addsi3_carryin_alt3_uxtw" + [(set + (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (plus:SI (plus:SI + (geu:SI (reg:CC CC_REGNUM) (const_int 0)) + (match_operand:SI 2 "register_operand" "r")) + (match_operand:SI 1 "register_operand" "r"))))] + "" + "adc\\t%w0, %w1, %w2" + [(set_attr "type" "adc_reg")] +) + +(define_insn "*add_uxt<mode>_multp2" + [(set (match_operand:GPI 0 "register_operand" "=rk") + (plus:GPI (and:GPI + (mult:GPI (match_operand:GPI 1 "register_operand" "r") + (match_operand 2 "aarch64_pwr_imm3" "Up3")) + (match_operand 3 "const_int_operand" "n")) + (match_operand:GPI 4 "register_operand" "r")))] + "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), INTVAL (operands[3])) != 0" + "* + operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), + INTVAL (operands[3]))); + return \"add\t%<w>0, %<w>4, %<w>1, uxt%e3 %p2\";" + [(set_attr "type" "alu_ext")] +) + +;; zero_extend version of above +(define_insn "*add_uxtsi_multp2_uxtw" + [(set (match_operand:DI 0 "register_operand" "=rk") + (zero_extend:DI + (plus:SI (and:SI + (mult:SI (match_operand:SI 1 "register_operand" "r") + (match_operand 2 "aarch64_pwr_imm3" "Up3")) + (match_operand 3 "const_int_operand" "n")) + (match_operand:SI 4 "register_operand" "r"))))] + "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), INTVAL (operands[3])) != 0" + "* + operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), + INTVAL (operands[3]))); + return \"add\t%w0, %w4, %w1, uxt%e3 %p2\";" + [(set_attr "type" "alu_ext")] +) + +(define_insn "subsi3" + [(set (match_operand:SI 0 "register_operand" "=rk") + (minus:SI (match_operand:SI 1 "register_operand" "r") + (match_operand:SI 2 "register_operand" "r")))] + "" + "sub\\t%w0, %w1, %w2" + [(set_attr "type" "alu_reg")] +) + +;; zero_extend version of above +(define_insn "*subsi3_uxtw" + [(set (match_operand:DI 0 "register_operand" "=rk") + (zero_extend:DI + (minus:SI (match_operand:SI 1 "register_operand" "r") + (match_operand:SI 2 "register_operand" "r"))))] + "" + "sub\\t%w0, %w1, %w2" + [(set_attr "type" "alu_reg")] +) + +(define_insn "subdi3" + [(set (match_operand:DI 0 "register_operand" "=rk,!w") + (minus:DI (match_operand:DI 1 "register_operand" "r,!w") + (match_operand:DI 2 "register_operand" "r,!w")))] + "" + "@ + sub\\t%x0, %x1, %x2 + sub\\t%d0, %d1, %d2" + [(set_attr "type" "alu_reg, neon_sub") + (set_attr "simd" "*,yes")] +) + + +(define_insn "*sub<mode>3_compare0" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ (minus:GPI (match_operand:GPI 1 "register_operand" "r") + (match_operand:GPI 2 "register_operand" "r")) + (const_int 0))) + (set (match_operand:GPI 0 "register_operand" "=r") + (minus:GPI (match_dup 1) (match_dup 2)))] + "" + "subs\\t%<w>0, %<w>1, %<w>2" + [(set_attr "type" "alus_reg")] +) + +;; zero_extend version of above +(define_insn "*subsi3_compare0_uxtw" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ (minus:SI (match_operand:SI 1 "register_operand" "r") + (match_operand:SI 2 "register_operand" "r")) + (const_int 0))) + (set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI (minus:SI (match_dup 1) (match_dup 2))))] + "" + "subs\\t%w0, %w1, %w2" + [(set_attr "type" "alus_reg")] +) + +(define_insn "*sub_<shift>_<mode>" + [(set (match_operand:GPI 0 "register_operand" "=r") + (minus:GPI (match_operand:GPI 3 "register_operand" "r") + (ASHIFT:GPI + (match_operand:GPI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))] + "" + "sub\\t%<w>0, %<w>3, %<w>1, <shift> %2" + [(set_attr "type" "alu_shift_imm")] +) + +;; zero_extend version of above +(define_insn "*sub_<shift>_si_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (minus:SI (match_operand:SI 3 "register_operand" "r") + (ASHIFT:SI + (match_operand:SI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_si" "n")))))] + "" + "sub\\t%w0, %w3, %w1, <shift> %2" + [(set_attr "type" "alu_shift_imm")] +) + +(define_insn "*sub_mul_imm_<mode>" + [(set (match_operand:GPI 0 "register_operand" "=r") + (minus:GPI (match_operand:GPI 3 "register_operand" "r") + (mult:GPI + (match_operand:GPI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))))] + "" + "sub\\t%<w>0, %<w>3, %<w>1, lsl %p2" + [(set_attr "type" "alu_shift_imm")] +) + +;; zero_extend version of above +(define_insn "*sub_mul_imm_si_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (minus:SI (match_operand:SI 3 "register_operand" "r") + (mult:SI + (match_operand:SI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_pwr_2_si" "n")))))] + "" + "sub\\t%w0, %w3, %w1, lsl %p2" + [(set_attr "type" "alu_shift_imm")] +) + +(define_insn "*sub_<optab><ALLX:mode>_<GPI:mode>" + [(set (match_operand:GPI 0 "register_operand" "=rk") + (minus:GPI (match_operand:GPI 1 "register_operand" "r") + (ANY_EXTEND:GPI + (match_operand:ALLX 2 "register_operand" "r"))))] + "" + "sub\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size>" + [(set_attr "type" "alu_ext")] +) + +;; zero_extend version of above +(define_insn "*sub_<optab><SHORT:mode>_si_uxtw" + [(set (match_operand:DI 0 "register_operand" "=rk") + (zero_extend:DI + (minus:SI (match_operand:SI 1 "register_operand" "r") + (ANY_EXTEND:SI + (match_operand:SHORT 2 "register_operand" "r")))))] + "" + "sub\\t%w0, %w1, %w2, <su>xt<SHORT:size>" + [(set_attr "type" "alu_ext")] +) + +(define_insn "*sub_<optab><ALLX:mode>_shft_<GPI:mode>" + [(set (match_operand:GPI 0 "register_operand" "=rk") + (minus:GPI (match_operand:GPI 1 "register_operand" "r") + (ashift:GPI (ANY_EXTEND:GPI + (match_operand:ALLX 2 "register_operand" "r")) + (match_operand 3 "aarch64_imm3" "Ui3"))))] + "" + "sub\\t%<GPI:w>0, %<GPI:w>1, %<GPI:w>2, <su>xt<ALLX:size> %3" + [(set_attr "type" "alu_ext")] +) + +;; zero_extend version of above +(define_insn "*sub_<optab><SHORT:mode>_shft_si_uxtw" + [(set (match_operand:DI 0 "register_operand" "=rk") + (zero_extend:DI + (minus:SI (match_operand:SI 1 "register_operand" "r") + (ashift:SI (ANY_EXTEND:SI + (match_operand:SHORT 2 "register_operand" "r")) + (match_operand 3 "aarch64_imm3" "Ui3")))))] + "" + "sub\\t%w0, %w1, %w2, <su>xt<SHORT:size> %3" + [(set_attr "type" "alu_ext")] +) + +(define_insn "*sub_<optab><mode>_multp2" + [(set (match_operand:GPI 0 "register_operand" "=rk") + (minus:GPI (match_operand:GPI 4 "register_operand" "r") + (ANY_EXTRACT:GPI + (mult:GPI (match_operand:GPI 1 "register_operand" "r") + (match_operand 2 "aarch64_pwr_imm3" "Up3")) + (match_operand 3 "const_int_operand" "n") + (const_int 0))))] + "aarch64_is_extend_from_extract (<MODE>mode, operands[2], operands[3])" + "sub\\t%<w>0, %<w>4, %<w>1, <su>xt%e3 %p2" + [(set_attr "type" "alu_ext")] +) + +;; zero_extend version of above +(define_insn "*sub_<optab>si_multp2_uxtw" + [(set (match_operand:DI 0 "register_operand" "=rk") + (zero_extend:DI + (minus:SI (match_operand:SI 4 "register_operand" "r") + (ANY_EXTRACT:SI + (mult:SI (match_operand:SI 1 "register_operand" "r") + (match_operand 2 "aarch64_pwr_imm3" "Up3")) + (match_operand 3 "const_int_operand" "n") + (const_int 0)))))] + "aarch64_is_extend_from_extract (SImode, operands[2], operands[3])" + "sub\\t%w0, %w4, %w1, <su>xt%e3 %p2" + [(set_attr "type" "alu_ext")] +) + +(define_insn "*sub<mode>3_carryin" + [(set + (match_operand:GPI 0 "register_operand" "=r") + (minus:GPI (minus:GPI + (match_operand:GPI 1 "register_operand" "r") + (ltu:GPI (reg:CC CC_REGNUM) (const_int 0))) + (match_operand:GPI 2 "register_operand" "r")))] + "" + "sbc\\t%<w>0, %<w>1, %<w>2" + [(set_attr "type" "adc_reg")] +) + +;; zero_extend version of the above +(define_insn "*subsi3_carryin_uxtw" + [(set + (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (minus:SI (minus:SI + (match_operand:SI 1 "register_operand" "r") + (ltu:SI (reg:CC CC_REGNUM) (const_int 0))) + (match_operand:SI 2 "register_operand" "r"))))] + "" + "sbc\\t%w0, %w1, %w2" + [(set_attr "type" "adc_reg")] +) + +(define_insn "*sub_uxt<mode>_multp2" + [(set (match_operand:GPI 0 "register_operand" "=rk") + (minus:GPI (match_operand:GPI 4 "register_operand" "r") + (and:GPI + (mult:GPI (match_operand:GPI 1 "register_operand" "r") + (match_operand 2 "aarch64_pwr_imm3" "Up3")) + (match_operand 3 "const_int_operand" "n"))))] + "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),INTVAL (operands[3])) != 0" + "* + operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), + INTVAL (operands[3]))); + return \"sub\t%<w>0, %<w>4, %<w>1, uxt%e3 %p2\";" + [(set_attr "type" "alu_ext")] +) + +;; zero_extend version of above +(define_insn "*sub_uxtsi_multp2_uxtw" + [(set (match_operand:DI 0 "register_operand" "=rk") + (zero_extend:DI + (minus:SI (match_operand:SI 4 "register_operand" "r") + (and:SI + (mult:SI (match_operand:SI 1 "register_operand" "r") + (match_operand 2 "aarch64_pwr_imm3" "Up3")) + (match_operand 3 "const_int_operand" "n")))))] + "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),INTVAL (operands[3])) != 0" + "* + operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), + INTVAL (operands[3]))); + return \"sub\t%w0, %w4, %w1, uxt%e3 %p2\";" + [(set_attr "type" "alu_ext")] +) + +(define_insn_and_split "absdi2" + [(set (match_operand:DI 0 "register_operand" "=r,w") + (abs:DI (match_operand:DI 1 "register_operand" "r,w"))) + (clobber (match_scratch:DI 2 "=&r,X"))] + "" + "@ + # + abs\\t%d0, %d1" + "reload_completed + && GP_REGNUM_P (REGNO (operands[0])) + && GP_REGNUM_P (REGNO (operands[1]))" + [(const_int 0)] + { + emit_insn (gen_rtx_SET (VOIDmode, operands[2], + gen_rtx_XOR (DImode, + gen_rtx_ASHIFTRT (DImode, + operands[1], + GEN_INT (63)), + operands[1]))); + emit_insn (gen_rtx_SET (VOIDmode, + operands[0], + gen_rtx_MINUS (DImode, + operands[2], + gen_rtx_ASHIFTRT (DImode, + operands[1], + GEN_INT (63))))); + DONE; + } + [(set_attr "type" "alu_reg")] +) + +(define_insn "neg<mode>2" + [(set (match_operand:GPI 0 "register_operand" "=r,w") + (neg:GPI (match_operand:GPI 1 "register_operand" "r,w")))] + "" + "@ + neg\\t%<w>0, %<w>1 + neg\\t%<rtn>0<vas>, %<rtn>1<vas>" + [(set_attr "type" "alu_reg, neon_neg<q>") + (set_attr "simd" "*,yes")] +) + +;; zero_extend version of above +(define_insn "*negsi2_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI (neg:SI (match_operand:SI 1 "register_operand" "r"))))] + "" + "neg\\t%w0, %w1" + [(set_attr "type" "alu_reg")] +) + +(define_insn "*ngc<mode>" + [(set (match_operand:GPI 0 "register_operand" "=r") + (minus:GPI (neg:GPI (ltu:GPI (reg:CC CC_REGNUM) (const_int 0))) + (match_operand:GPI 1 "register_operand" "r")))] + "" + "ngc\\t%<w>0, %<w>1" + [(set_attr "type" "adc_reg")] +) + +(define_insn "*ngcsi_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (minus:SI (neg:SI (ltu:SI (reg:CC CC_REGNUM) (const_int 0))) + (match_operand:SI 1 "register_operand" "r"))))] + "" + "ngc\\t%w0, %w1" + [(set_attr "type" "adc_reg")] +) + +(define_insn "*neg<mode>2_compare0" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ (neg:GPI (match_operand:GPI 1 "register_operand" "r")) + (const_int 0))) + (set (match_operand:GPI 0 "register_operand" "=r") + (neg:GPI (match_dup 1)))] + "" + "negs\\t%<w>0, %<w>1" + [(set_attr "type" "alus_reg")] +) + +;; zero_extend version of above +(define_insn "*negsi2_compare0_uxtw" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ (neg:SI (match_operand:SI 1 "register_operand" "r")) + (const_int 0))) + (set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI (neg:SI (match_dup 1))))] + "" + "negs\\t%w0, %w1" + [(set_attr "type" "alus_reg")] +) + +(define_insn "*neg_<shift><mode>3_compare0" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (neg:GPI (ASHIFT:GPI + (match_operand:GPI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))) + (const_int 0))) + (set (match_operand:GPI 0 "register_operand" "=r") + (neg:GPI (ASHIFT:GPI (match_dup 1) (match_dup 2))))] + "" + "negs\\t%<w>0, %<w>1, <shift> %2" + [(set_attr "type" "alus_shift_imm")] +) + +(define_insn "*neg_<shift>_<mode>2" + [(set (match_operand:GPI 0 "register_operand" "=r") + (neg:GPI (ASHIFT:GPI + (match_operand:GPI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))] + "" + "neg\\t%<w>0, %<w>1, <shift> %2" + [(set_attr "type" "alu_shift_imm")] +) + +;; zero_extend version of above +(define_insn "*neg_<shift>_si2_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (neg:SI (ASHIFT:SI + (match_operand:SI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_si" "n")))))] + "" + "neg\\t%w0, %w1, <shift> %2" + [(set_attr "type" "alu_shift_imm")] +) + +(define_insn "*neg_mul_imm_<mode>2" + [(set (match_operand:GPI 0 "register_operand" "=r") + (neg:GPI (mult:GPI + (match_operand:GPI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_pwr_2_<mode>" "n"))))] + "" + "neg\\t%<w>0, %<w>1, lsl %p2" + [(set_attr "type" "alu_shift_imm")] +) + +;; zero_extend version of above +(define_insn "*neg_mul_imm_si2_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (neg:SI (mult:SI + (match_operand:SI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_pwr_2_si" "n")))))] + "" + "neg\\t%w0, %w1, lsl %p2" + [(set_attr "type" "alu_shift_imm")] +) + +(define_insn "mul<mode>3" + [(set (match_operand:GPI 0 "register_operand" "=r") + (mult:GPI (match_operand:GPI 1 "register_operand" "r") + (match_operand:GPI 2 "register_operand" "r")))] + "" + "mul\\t%<w>0, %<w>1, %<w>2" + [(set_attr "type" "mul")] +) + +;; zero_extend version of above +(define_insn "*mulsi3_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (mult:SI (match_operand:SI 1 "register_operand" "r") + (match_operand:SI 2 "register_operand" "r"))))] + "" + "mul\\t%w0, %w1, %w2" + [(set_attr "type" "mul")] +) + +(define_insn "*madd<mode>" + [(set (match_operand:GPI 0 "register_operand" "=r") + (plus:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r") + (match_operand:GPI 2 "register_operand" "r")) + (match_operand:GPI 3 "register_operand" "r")))] + "" + "madd\\t%<w>0, %<w>1, %<w>2, %<w>3" + [(set_attr "type" "mla")] +) + +;; zero_extend version of above +(define_insn "*maddsi_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r") + (match_operand:SI 2 "register_operand" "r")) + (match_operand:SI 3 "register_operand" "r"))))] + "" + "madd\\t%w0, %w1, %w2, %w3" + [(set_attr "type" "mla")] +) + +(define_insn "*msub<mode>" + [(set (match_operand:GPI 0 "register_operand" "=r") + (minus:GPI (match_operand:GPI 3 "register_operand" "r") + (mult:GPI (match_operand:GPI 1 "register_operand" "r") + (match_operand:GPI 2 "register_operand" "r"))))] + + "" + "msub\\t%<w>0, %<w>1, %<w>2, %<w>3" + [(set_attr "type" "mla")] +) + +;; zero_extend version of above +(define_insn "*msubsi_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (minus:SI (match_operand:SI 3 "register_operand" "r") + (mult:SI (match_operand:SI 1 "register_operand" "r") + (match_operand:SI 2 "register_operand" "r")))))] + + "" + "msub\\t%w0, %w1, %w2, %w3" + [(set_attr "type" "mla")] +) + +(define_insn "*mul<mode>_neg" + [(set (match_operand:GPI 0 "register_operand" "=r") + (mult:GPI (neg:GPI (match_operand:GPI 1 "register_operand" "r")) + (match_operand:GPI 2 "register_operand" "r")))] + + "" + "mneg\\t%<w>0, %<w>1, %<w>2" + [(set_attr "type" "mul")] +) + +;; zero_extend version of above +(define_insn "*mulsi_neg_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (mult:SI (neg:SI (match_operand:SI 1 "register_operand" "r")) + (match_operand:SI 2 "register_operand" "r"))))] + + "" + "mneg\\t%w0, %w1, %w2" + [(set_attr "type" "mul")] +) + +(define_insn "<su_optab>mulsidi3" + [(set (match_operand:DI 0 "register_operand" "=r") + (mult:DI (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r")) + (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))))] + "" + "<su>mull\\t%0, %w1, %w2" + [(set_attr "type" "<su>mull")] +) + +(define_insn "<su_optab>maddsidi4" + [(set (match_operand:DI 0 "register_operand" "=r") + (plus:DI (mult:DI + (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r")) + (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))) + (match_operand:DI 3 "register_operand" "r")))] + "" + "<su>maddl\\t%0, %w1, %w2, %3" + [(set_attr "type" "<su>mlal")] +) + +(define_insn "<su_optab>msubsidi4" + [(set (match_operand:DI 0 "register_operand" "=r") + (minus:DI + (match_operand:DI 3 "register_operand" "r") + (mult:DI (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r")) + (ANY_EXTEND:DI + (match_operand:SI 2 "register_operand" "r")))))] + "" + "<su>msubl\\t%0, %w1, %w2, %3" + [(set_attr "type" "<su>mlal")] +) + +(define_insn "*<su_optab>mulsidi_neg" + [(set (match_operand:DI 0 "register_operand" "=r") + (mult:DI (neg:DI + (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r"))) + (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))))] + "" + "<su>mnegl\\t%0, %w1, %w2" + [(set_attr "type" "<su>mull")] +) + +(define_insn "<su>muldi3_highpart" + [(set (match_operand:DI 0 "register_operand" "=r") + (truncate:DI + (lshiftrt:TI + (mult:TI + (ANY_EXTEND:TI (match_operand:DI 1 "register_operand" "r")) + (ANY_EXTEND:TI (match_operand:DI 2 "register_operand" "r"))) + (const_int 64))))] + "" + "<su>mulh\\t%0, %1, %2" + [(set_attr "type" "<su>mull")] +) + +(define_insn "<su_optab>div<mode>3" + [(set (match_operand:GPI 0 "register_operand" "=r") + (ANY_DIV:GPI (match_operand:GPI 1 "register_operand" "r") + (match_operand:GPI 2 "register_operand" "r")))] + "" + "<su>div\\t%<w>0, %<w>1, %<w>2" + [(set_attr "type" "<su>div")] +) + +;; zero_extend version of above +(define_insn "*<su_optab>divsi3_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (ANY_DIV:SI (match_operand:SI 1 "register_operand" "r") + (match_operand:SI 2 "register_operand" "r"))))] + "" + "<su>div\\t%w0, %w1, %w2" + [(set_attr "type" "<su>div")] +) + +;; ------------------------------------------------------------------- +;; Comparison insns +;; ------------------------------------------------------------------- + +(define_insn "*cmp<mode>" + [(set (reg:CC CC_REGNUM) + (compare:CC (match_operand:GPI 0 "register_operand" "r,r,r") + (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J")))] + "" + "@ + cmp\\t%<w>0, %<w>1 + cmp\\t%<w>0, %<w>1 + cmn\\t%<w>0, #%n1" + [(set_attr "type" "alus_reg,alus_imm,alus_imm")] +) + +(define_insn "*cmp<mode>" + [(set (reg:CCFP CC_REGNUM) + (compare:CCFP (match_operand:GPF 0 "register_operand" "w,w") + (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))] + "TARGET_FLOAT" + "@ + fcmp\\t%<s>0, #0.0 + fcmp\\t%<s>0, %<s>1" + [(set_attr "type" "fcmp<s>")] +) + +(define_insn "*cmpe<mode>" + [(set (reg:CCFPE CC_REGNUM) + (compare:CCFPE (match_operand:GPF 0 "register_operand" "w,w") + (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))] + "TARGET_FLOAT" + "@ + fcmpe\\t%<s>0, #0.0 + fcmpe\\t%<s>0, %<s>1" + [(set_attr "type" "fcmp<s>")] +) + +(define_insn "*cmp_swp_<shift>_reg<mode>" + [(set (reg:CC_SWP CC_REGNUM) + (compare:CC_SWP (ASHIFT:GPI + (match_operand:GPI 0 "register_operand" "r") + (match_operand:QI 1 "aarch64_shift_imm_<mode>" "n")) + (match_operand:GPI 2 "aarch64_reg_or_zero" "rZ")))] + "" + "cmp\\t%<w>2, %<w>0, <shift> %1" + [(set_attr "type" "alus_shift_imm")] +) + +(define_insn "*cmp_swp_<optab><ALLX:mode>_reg<GPI:mode>" + [(set (reg:CC_SWP CC_REGNUM) + (compare:CC_SWP (ANY_EXTEND:GPI + (match_operand:ALLX 0 "register_operand" "r")) + (match_operand:GPI 1 "register_operand" "r")))] + "" + "cmp\\t%<GPI:w>1, %<GPI:w>0, <su>xt<ALLX:size>" + [(set_attr "type" "alus_ext")] +) + +(define_insn "*cmp_swp_<optab><ALLX:mode>_shft_<GPI:mode>" + [(set (reg:CC_SWP CC_REGNUM) + (compare:CC_SWP (ashift:GPI + (ANY_EXTEND:GPI + (match_operand:ALLX 0 "register_operand" "r")) + (match_operand 1 "aarch64_imm3" "Ui3")) + (match_operand:GPI 2 "register_operand" "r")))] + "" + "cmp\\t%<GPI:w>2, %<GPI:w>0, <su>xt<ALLX:size> %1" + [(set_attr "type" "alus_ext")] +) + +;; ------------------------------------------------------------------- +;; Store-flag and conditional select insns +;; ------------------------------------------------------------------- + +(define_expand "cstore<mode>4" + [(set (match_operand:SI 0 "register_operand" "") + (match_operator:SI 1 "aarch64_comparison_operator" + [(match_operand:GPI 2 "register_operand" "") + (match_operand:GPI 3 "aarch64_plus_operand" "")]))] + "" + " + operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2], + operands[3]); + operands[3] = const0_rtx; + " +) + +(define_expand "cstore<mode>4" + [(set (match_operand:SI 0 "register_operand" "") + (match_operator:SI 1 "aarch64_comparison_operator" + [(match_operand:GPF 2 "register_operand" "") + (match_operand:GPF 3 "register_operand" "")]))] + "" + " + operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2], + operands[3]); + operands[3] = const0_rtx; + " +) + +(define_insn "*cstore<mode>_insn" + [(set (match_operand:ALLI 0 "register_operand" "=r") + (match_operator:ALLI 1 "aarch64_comparison_operator" + [(match_operand 2 "cc_register" "") (const_int 0)]))] + "" + "cset\\t%<w>0, %m1" + [(set_attr "type" "csel")] +) + +;; zero_extend version of the above +(define_insn "*cstoresi_insn_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (match_operator:SI 1 "aarch64_comparison_operator" + [(match_operand 2 "cc_register" "") (const_int 0)])))] + "" + "cset\\t%w0, %m1" + [(set_attr "type" "csel")] +) + +(define_insn "cstore<mode>_neg" + [(set (match_operand:ALLI 0 "register_operand" "=r") + (neg:ALLI (match_operator:ALLI 1 "aarch64_comparison_operator" + [(match_operand 2 "cc_register" "") (const_int 0)])))] + "" + "csetm\\t%<w>0, %m1" + [(set_attr "type" "csel")] +) + +;; zero_extend version of the above +(define_insn "*cstoresi_neg_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (neg:SI (match_operator:SI 1 "aarch64_comparison_operator" + [(match_operand 2 "cc_register" "") (const_int 0)]))))] + "" + "csetm\\t%w0, %m1" + [(set_attr "type" "csel")] +) + +(define_expand "cmov<mode>6" + [(set (match_operand:GPI 0 "register_operand" "") + (if_then_else:GPI + (match_operator 1 "aarch64_comparison_operator" + [(match_operand:GPI 2 "register_operand" "") + (match_operand:GPI 3 "aarch64_plus_operand" "")]) + (match_operand:GPI 4 "register_operand" "") + (match_operand:GPI 5 "register_operand" "")))] + "" + " + operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2], + operands[3]); + operands[3] = const0_rtx; + " +) + +(define_expand "cmov<mode>6" + [(set (match_operand:GPF 0 "register_operand" "") + (if_then_else:GPF + (match_operator 1 "aarch64_comparison_operator" + [(match_operand:GPF 2 "register_operand" "") + (match_operand:GPF 3 "register_operand" "")]) + (match_operand:GPF 4 "register_operand" "") + (match_operand:GPF 5 "register_operand" "")))] + "" + " + operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2], + operands[3]); + operands[3] = const0_rtx; + " +) + +(define_insn "*cmov<mode>_insn" + [(set (match_operand:ALLI 0 "register_operand" "=r,r,r,r,r,r,r") + (if_then_else:ALLI + (match_operator 1 "aarch64_comparison_operator" + [(match_operand 2 "cc_register" "") (const_int 0)]) + (match_operand:ALLI 3 "aarch64_reg_zero_or_m1_or_1" "rZ,rZ,UsM,rZ,Ui1,UsM,Ui1") + (match_operand:ALLI 4 "aarch64_reg_zero_or_m1_or_1" "rZ,UsM,rZ,Ui1,rZ,UsM,Ui1")))] + "!((operands[3] == const1_rtx && operands[4] == constm1_rtx) + || (operands[3] == constm1_rtx && operands[4] == const1_rtx))" + ;; Final two alternatives should be unreachable, but included for completeness + "@ + csel\\t%<w>0, %<w>3, %<w>4, %m1 + csinv\\t%<w>0, %<w>3, <w>zr, %m1 + csinv\\t%<w>0, %<w>4, <w>zr, %M1 + csinc\\t%<w>0, %<w>3, <w>zr, %m1 + csinc\\t%<w>0, %<w>4, <w>zr, %M1 + mov\\t%<w>0, -1 + mov\\t%<w>0, 1" + [(set_attr "type" "csel")] +) + +;; zero_extend version of above +(define_insn "*cmovsi_insn_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r,r,r") + (zero_extend:DI + (if_then_else:SI + (match_operator 1 "aarch64_comparison_operator" + [(match_operand 2 "cc_register" "") (const_int 0)]) + (match_operand:SI 3 "aarch64_reg_zero_or_m1_or_1" "rZ,rZ,UsM,rZ,Ui1,UsM,Ui1") + (match_operand:SI 4 "aarch64_reg_zero_or_m1_or_1" "rZ,UsM,rZ,Ui1,rZ,UsM,Ui1"))))] + "!((operands[3] == const1_rtx && operands[4] == constm1_rtx) + || (operands[3] == constm1_rtx && operands[4] == const1_rtx))" + ;; Final two alternatives should be unreachable, but included for completeness + "@ + csel\\t%w0, %w3, %w4, %m1 + csinv\\t%w0, %w3, wzr, %m1 + csinv\\t%w0, %w4, wzr, %M1 + csinc\\t%w0, %w3, wzr, %m1 + csinc\\t%w0, %w4, wzr, %M1 + mov\\t%w0, -1 + mov\\t%w0, 1" + [(set_attr "type" "csel")] +) + +(define_insn "*cmov<mode>_insn" + [(set (match_operand:GPF 0 "register_operand" "=w") + (if_then_else:GPF + (match_operator 1 "aarch64_comparison_operator" + [(match_operand 2 "cc_register" "") (const_int 0)]) + (match_operand:GPF 3 "register_operand" "w") + (match_operand:GPF 4 "register_operand" "w")))] + "TARGET_FLOAT" + "fcsel\\t%<s>0, %<s>3, %<s>4, %m1" + [(set_attr "type" "fcsel")] +) + +(define_expand "mov<mode>cc" + [(set (match_operand:ALLI 0 "register_operand" "") + (if_then_else:ALLI (match_operand 1 "aarch64_comparison_operator" "") + (match_operand:ALLI 2 "register_operand" "") + (match_operand:ALLI 3 "register_operand" "")))] + "" + { + rtx ccreg; + enum rtx_code code = GET_CODE (operands[1]); + + if (code == UNEQ || code == LTGT) + FAIL; + + ccreg = aarch64_gen_compare_reg (code, XEXP (operands[1], 0), + XEXP (operands[1], 1)); + operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx); + } +) + +(define_expand "mov<GPF:mode><GPI:mode>cc" + [(set (match_operand:GPI 0 "register_operand" "") + (if_then_else:GPI (match_operand 1 "aarch64_comparison_operator" "") + (match_operand:GPF 2 "register_operand" "") + (match_operand:GPF 3 "register_operand" "")))] + "" + { + rtx ccreg; + enum rtx_code code = GET_CODE (operands[1]); + + if (code == UNEQ || code == LTGT) + FAIL; + + ccreg = aarch64_gen_compare_reg (code, XEXP (operands[1], 0), + XEXP (operands[1], 1)); + operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx); + } +) + +(define_insn "*csinc2<mode>_insn" + [(set (match_operand:GPI 0 "register_operand" "=r") + (plus:GPI (match_operator:GPI 2 "aarch64_comparison_operator" + [(match_operand:CC 3 "cc_register" "") (const_int 0)]) + (match_operand:GPI 1 "register_operand" "r")))] + "" + "csinc\\t%<w>0, %<w>1, %<w>1, %M2" + [(set_attr "type" "csel")] +) + +(define_insn "csinc3<mode>_insn" + [(set (match_operand:GPI 0 "register_operand" "=r") + (if_then_else:GPI + (match_operator:GPI 1 "aarch64_comparison_operator" + [(match_operand:CC 2 "cc_register" "") (const_int 0)]) + (plus:GPI (match_operand:GPI 3 "register_operand" "r") + (const_int 1)) + (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))] + "" + "csinc\\t%<w>0, %<w>4, %<w>3, %M1" + [(set_attr "type" "csel")] +) + +(define_insn "*csinv3<mode>_insn" + [(set (match_operand:GPI 0 "register_operand" "=r") + (if_then_else:GPI + (match_operator:GPI 1 "aarch64_comparison_operator" + [(match_operand:CC 2 "cc_register" "") (const_int 0)]) + (not:GPI (match_operand:GPI 3 "register_operand" "r")) + (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))] + "" + "csinv\\t%<w>0, %<w>4, %<w>3, %M1" + [(set_attr "type" "csel")] +) + +(define_insn "*csneg3<mode>_insn" + [(set (match_operand:GPI 0 "register_operand" "=r") + (if_then_else:GPI + (match_operator:GPI 1 "aarch64_comparison_operator" + [(match_operand:CC 2 "cc_register" "") (const_int 0)]) + (neg:GPI (match_operand:GPI 3 "register_operand" "r")) + (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))] + "" + "csneg\\t%<w>0, %<w>4, %<w>3, %M1" + [(set_attr "type" "csel")] +) + +;; ------------------------------------------------------------------- +;; Logical operations +;; ------------------------------------------------------------------- + +(define_insn "<optab><mode>3" + [(set (match_operand:GPI 0 "register_operand" "=r,rk") + (LOGICAL:GPI (match_operand:GPI 1 "register_operand" "%r,r") + (match_operand:GPI 2 "aarch64_logical_operand" "r,<lconst>")))] + "" + "<logical>\\t%<w>0, %<w>1, %<w>2" + [(set_attr "type" "logic_reg,logic_imm")] +) + +;; zero_extend version of above +(define_insn "*<optab>si3_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r,rk") + (zero_extend:DI + (LOGICAL:SI (match_operand:SI 1 "register_operand" "%r,r") + (match_operand:SI 2 "aarch64_logical_operand" "r,K"))))] + "" + "<logical>\\t%w0, %w1, %w2" + [(set_attr "type" "logic_reg,logic_imm")] +) + +(define_insn "*and<mode>3_compare0" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (and:GPI (match_operand:GPI 1 "register_operand" "%r,r") + (match_operand:GPI 2 "aarch64_logical_operand" "r,<lconst>")) + (const_int 0))) + (set (match_operand:GPI 0 "register_operand" "=r,r") + (and:GPI (match_dup 1) (match_dup 2)))] + "" + "ands\\t%<w>0, %<w>1, %<w>2" + [(set_attr "type" "logics_reg,logics_imm")] +) + +;; zero_extend version of above +(define_insn "*andsi3_compare0_uxtw" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (and:SI (match_operand:SI 1 "register_operand" "%r,r") + (match_operand:SI 2 "aarch64_logical_operand" "r,K")) + (const_int 0))) + (set (match_operand:DI 0 "register_operand" "=r,r") + (zero_extend:DI (and:SI (match_dup 1) (match_dup 2))))] + "" + "ands\\t%w0, %w1, %w2" + [(set_attr "type" "logics_reg,logics_imm")] +) + +(define_insn "*and_<SHIFT:optab><mode>3_compare0" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (and:GPI (SHIFT:GPI + (match_operand:GPI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n")) + (match_operand:GPI 3 "register_operand" "r")) + (const_int 0))) + (set (match_operand:GPI 0 "register_operand" "=r") + (and:GPI (SHIFT:GPI (match_dup 1) (match_dup 2)) (match_dup 3)))] + "" + "ands\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2" + [(set_attr "type" "logics_shift_imm")] +) + +;; zero_extend version of above +(define_insn "*and_<SHIFT:optab>si3_compare0_uxtw" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (and:SI (SHIFT:SI + (match_operand:SI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_si" "n")) + (match_operand:SI 3 "register_operand" "r")) + (const_int 0))) + (set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI (and:SI (SHIFT:SI (match_dup 1) (match_dup 2)) + (match_dup 3))))] + "" + "ands\\t%w0, %w3, %w1, <SHIFT:shift> %2" + [(set_attr "type" "logics_shift_imm")] +) + +(define_insn "*<LOGICAL:optab>_<SHIFT:optab><mode>3" + [(set (match_operand:GPI 0 "register_operand" "=r") + (LOGICAL:GPI (SHIFT:GPI + (match_operand:GPI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n")) + (match_operand:GPI 3 "register_operand" "r")))] + "" + "<LOGICAL:logical>\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2" + [(set_attr "type" "logic_shift_imm")] +) + +;; zero_extend version of above +(define_insn "*<LOGICAL:optab>_<SHIFT:optab>si3_uxtw" + [(set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI + (LOGICAL:SI (SHIFT:SI + (match_operand:SI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_si" "n")) + (match_operand:SI 3 "register_operand" "r"))))] + "" + "<LOGICAL:logical>\\t%w0, %w3, %w1, <SHIFT:shift> %2" + [(set_attr "type" "logic_shift_imm")] +) + +(define_insn "one_cmpl<mode>2" + [(set (match_operand:GPI 0 "register_operand" "=r") + (not:GPI (match_operand:GPI 1 "register_operand" "r")))] + "" + "mvn\\t%<w>0, %<w>1" + [(set_attr "type" "logic_reg")] +) + +(define_insn "*one_cmpl_<optab><mode>2" + [(set (match_operand:GPI 0 "register_operand" "=r") + (not:GPI (SHIFT:GPI (match_operand:GPI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))))] + "" + "mvn\\t%<w>0, %<w>1, <shift> %2" + [(set_attr "type" "logic_shift_imm")] +) + +(define_insn "*<LOGICAL:optab>_one_cmpl<mode>3" + [(set (match_operand:GPI 0 "register_operand" "=r") + (LOGICAL:GPI (not:GPI + (match_operand:GPI 1 "register_operand" "r")) + (match_operand:GPI 2 "register_operand" "r")))] + "" + "<LOGICAL:nlogical>\\t%<w>0, %<w>2, %<w>1" + [(set_attr "type" "logic_reg")] +) + +(define_insn "*and_one_cmpl<mode>3_compare0" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (and:GPI (not:GPI + (match_operand:GPI 1 "register_operand" "r")) + (match_operand:GPI 2 "register_operand" "r")) + (const_int 0))) + (set (match_operand:GPI 0 "register_operand" "=r") + (and:GPI (not:GPI (match_dup 1)) (match_dup 2)))] + "" + "bics\\t%<w>0, %<w>2, %<w>1" + [(set_attr "type" "logics_reg")] +) + +;; zero_extend version of above +(define_insn "*and_one_cmplsi3_compare0_uxtw" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (and:SI (not:SI + (match_operand:SI 1 "register_operand" "r")) + (match_operand:SI 2 "register_operand" "r")) + (const_int 0))) + (set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI (and:SI (not:SI (match_dup 1)) (match_dup 2))))] + "" + "bics\\t%w0, %w2, %w1" + [(set_attr "type" "logics_reg")] +) + +(define_insn "*<LOGICAL:optab>_one_cmpl_<SHIFT:optab><mode>3" + [(set (match_operand:GPI 0 "register_operand" "=r") + (LOGICAL:GPI (not:GPI + (SHIFT:GPI + (match_operand:GPI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))) + (match_operand:GPI 3 "register_operand" "r")))] + "" + "<LOGICAL:nlogical>\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2" + [(set_attr "type" "logics_shift_imm")] +) + +(define_insn "*and_one_cmpl_<SHIFT:optab><mode>3_compare0" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (and:GPI (not:GPI + (SHIFT:GPI + (match_operand:GPI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_<mode>" "n"))) + (match_operand:GPI 3 "register_operand" "r")) + (const_int 0))) + (set (match_operand:GPI 0 "register_operand" "=r") + (and:GPI (not:GPI + (SHIFT:GPI + (match_dup 1) (match_dup 2))) (match_dup 3)))] + "" + "bics\\t%<w>0, %<w>3, %<w>1, <SHIFT:shift> %2" + [(set_attr "type" "logics_shift_imm")] +) + +;; zero_extend version of above +(define_insn "*and_one_cmpl_<SHIFT:optab>si3_compare0_uxtw" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (and:SI (not:SI + (SHIFT:SI + (match_operand:SI 1 "register_operand" "r") + (match_operand:QI 2 "aarch64_shift_imm_si" "n"))) + (match_operand:SI 3 "register_operand" "r")) + (const_int 0))) + (set (match_operand:DI 0 "register_operand" "=r") + (zero_extend:DI (and:SI + (not:SI + (SHIFT:SI (match_dup 1) (match_dup 2))) (match_dup 3))))] + "" + "bics\\t%w0, %w3, %w1, <SHIFT:shift> %2" + [(set_attr "type" "logics_shift_imm")] +) + +(define_insn "clz<mode>2" + [(set (match_operand:GPI 0 "register_operand" "=r") + (clz:GPI (match_operand:GPI 1 "register_operand" "r")))] + "" + "clz\\t%<w>0, %<w>1" + [(set_attr "type" "clz")] +) + +(define_expand "ffs<mode>2" + [(match_operand:GPI 0 "register_operand") + (match_operand:GPI 1 "register_operand")] + "" + { + rtx ccreg = aarch64_gen_compare_reg (EQ, operands[1], const0_rtx); + rtx x = gen_rtx_NE (VOIDmode, ccreg, const0_rtx); + + emit_insn (gen_rbit<mode>2 (operands[0], operands[1])); + emit_insn (gen_clz<mode>2 (operands[0], operands[0])); + emit_insn (gen_csinc3<mode>_insn (operands[0], x, ccreg, operands[0], const0_rtx)); + DONE; + } +) + +(define_insn "clrsb<mode>2" + [(set (match_operand:GPI 0 "register_operand" "=r") + (unspec:GPI [(match_operand:GPI 1 "register_operand" "r")] UNSPEC_CLS))] + "" + "cls\\t%<w>0, %<w>1" + [(set_attr "type" "clz")] +) + +(define_insn "rbit<mode>2" + [(set (match_operand:GPI 0 "register_operand" "=r") + (unspec:GPI [(match_operand:GPI 1 "register_operand" "r")] UNSPEC_RBIT))] + "" + "rbit\\t%<w>0, %<w>1" + [(set_attr "type" "rbit")] +) + +(define_expand "ctz<mode>2" + [(match_operand:GPI 0 "register_operand") + (match_operand:GPI 1 "register_operand")] + "" + { + emit_insn (gen_rbit<mode>2 (operands[0], operands[1])); + emit_insn (gen_clz<mode>2 (operands[0], operands[0])); + DONE; + } +) + +(define_insn "*and<mode>3nr_compare0" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (and:GPI (match_operand:GPI 0 "register_operand" "%r,r") + (match_operand:GPI 1 "aarch64_logical_operand" "r,<lconst>")) + (const_int 0)))] + "" + "tst\\t%<w>0, %<w>1" + [(set_attr "type" "logics_reg")] +) + +(define_insn "*and_<SHIFT:optab><mode>3nr_compare0" + [(set (reg:CC_NZ CC_REGNUM) + (compare:CC_NZ + (and:GPI (SHIFT:GPI + (match_operand:GPI 0 "register_operand" "r") + (match_operand:QI 1 "aarch64_shift_imm_<mode>" "n")) + (match_operand:GPI 2 "register_operand" "r")) + (const_int 0)))] + "" + "tst\\t%<w>2, %<w>0, <SHIFT:shift> %1" + [(set_attr "type" "logics_shift_imm")] +) + +;; ------------------------------------------------------------------- +;; Shifts +;; ------------------------------------------------------------------- + +(define_expand "<optab><mode>3" + [(set (match_operand:GPI 0 "register_operand") + (ASHIFT:GPI (match_operand:GPI 1 "register_operand") + (match_operand:QI 2 "nonmemory_operand")))] + ""< |