;; Machine description for AArch64 architecture. ;; Copyright (C) 2009-2014 Free Software Foundation, Inc. ;; Contributed by ARM Ltd. ;; ;; This file is part of GCC. ;; ;; GCC is free software; you can redistribute it and/or modify it ;; under the terms of the GNU General Public License as published by ;; the Free Software Foundation; either version 3, or (at your option) ;; any later version. ;; ;; GCC is distributed in the hope that it will be useful, but ;; WITHOUT ANY WARRANTY; without even the implied warranty of ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ;; General Public License for more details. ;; ;; You should have received a copy of the GNU General Public License ;; along with GCC; see the file COPYING3. If not see ;; . ;; Register numbers (define_constants [ (R0_REGNUM 0) (R1_REGNUM 1) (R2_REGNUM 2) (R3_REGNUM 3) (R4_REGNUM 4) (R5_REGNUM 5) (R6_REGNUM 6) (R7_REGNUM 7) (R8_REGNUM 8) (R9_REGNUM 9) (R10_REGNUM 10) (R11_REGNUM 11) (R12_REGNUM 12) (R13_REGNUM 13) (R14_REGNUM 14) (R15_REGNUM 15) (R16_REGNUM 16) (IP0_REGNUM 16) (R17_REGNUM 17) (IP1_REGNUM 17) (R18_REGNUM 18) (R19_REGNUM 19) (R20_REGNUM 20) (R21_REGNUM 21) (R22_REGNUM 22) (R23_REGNUM 23) (R24_REGNUM 24) (R25_REGNUM 25) (R26_REGNUM 26) (R27_REGNUM 27) (R28_REGNUM 28) (R29_REGNUM 29) (R30_REGNUM 30) (LR_REGNUM 30) (SP_REGNUM 31) (V0_REGNUM 32) (V15_REGNUM 47) (V31_REGNUM 63) (SFP_REGNUM 64) (AP_REGNUM 65) (CC_REGNUM 66) ] ) (define_c_enum "unspec" [ UNSPEC_CASESI UNSPEC_CLS UNSPEC_FRECPE UNSPEC_FRECPS UNSPEC_FRECPX UNSPEC_FRINTA UNSPEC_FRINTI UNSPEC_FRINTM UNSPEC_FRINTN UNSPEC_FRINTP UNSPEC_FRINTX UNSPEC_FRINTZ UNSPEC_GOTSMALLPIC UNSPEC_GOTSMALLTLS UNSPEC_GOTTINYPIC UNSPEC_LD1 UNSPEC_LD2 UNSPEC_LD3 UNSPEC_LD4 UNSPEC_MB UNSPEC_NOP UNSPEC_PRLG_STK UNSPEC_RBIT UNSPEC_SISD_NEG UNSPEC_SISD_SSHL UNSPEC_SISD_USHL UNSPEC_SSHL_2S UNSPEC_SSHR64 UNSPEC_ST1 UNSPEC_ST2 UNSPEC_ST3 UNSPEC_ST4 UNSPEC_TLS UNSPEC_TLSDESC UNSPEC_USHL_2S UNSPEC_USHR64 UNSPEC_VSTRUCTDUMMY ]) (define_c_enum "unspecv" [ UNSPECV_EH_RETURN ; Represent EH_RETURN ] ) ;; If further include files are added the defintion of MD_INCLUDES ;; must be updated. (include "constraints.md") (include "predicates.md") (include "iterators.md") ;; ------------------------------------------------------------------- ;; Instruction types and attributes ;; ------------------------------------------------------------------- ; The "type" attribute is is included here from AArch32 backend to be able ; to share pipeline descriptions. (include "../arm/types.md") ;; Attribute that specifies whether or not the instruction touches fp ;; registers. (define_attr "fp" "no,yes" (const_string "no")) ;; Attribute that specifies whether or not the instruction touches simd ;; registers. (define_attr "simd" "no,yes" (const_string "no")) (define_attr "length" "" (const_int 4)) ;; Attribute that controls whether an alternative is enabled or not. ;; Currently it is only used to disable alternatives which touch fp or simd ;; registers when -mgeneral-regs-only is specified. (define_attr "enabled" "no,yes" (cond [(ior (and (eq_attr "fp" "yes") (eq (symbol_ref "TARGET_FLOAT") (const_int 0))) (and (eq_attr "simd" "yes") (eq (symbol_ref "TARGET_SIMD") (const_int 0)))) (const_string "no") ] (const_string "yes"))) ;; ------------------------------------------------------------------- ;; Pipeline descriptions and scheduling ;; ------------------------------------------------------------------- ;; Processor types. (include "aarch64-tune.md") ;; True if the generic scheduling description should be used. (define_attr "generic_sched" "yes,no" (const (if_then_else (eq_attr "tune" "cortexa53,cortexa15") (const_string "no") (const_string "yes")))) ;; Scheduling (include "../arm/cortex-a53.md") (include "../arm/cortex-a15.md") ;; ------------------------------------------------------------------- ;; Jumps and other miscellaneous insns ;; ------------------------------------------------------------------- (define_insn "indirect_jump" [(set (pc) (match_operand:DI 0 "register_operand" "r"))] "" "br\\t%0" [(set_attr "type" "branch")] ) (define_insn "jump" [(set (pc) (label_ref (match_operand 0 "" "")))] "" "b\\t%l0" [(set_attr "type" "branch")] ) (define_expand "cbranch4" [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator" [(match_operand:GPI 1 "register_operand" "") (match_operand:GPI 2 "aarch64_plus_operand" "")]) (label_ref (match_operand 3 "" "")) (pc)))] "" " operands[1] = aarch64_gen_compare_reg (GET_CODE (operands[0]), operands[1], operands[2]); operands[2] = const0_rtx; " ) (define_expand "cbranch4" [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator" [(match_operand:GPF 1 "register_operand" "") (match_operand:GPF 2 "aarch64_reg_or_zero" "")]) (label_ref (match_operand 3 "" "")) (pc)))] "" " operands[1] = aarch64_gen_compare_reg (GET_CODE (operands[0]), operands[1], operands[2]); operands[2] = const0_rtx; " ) (define_insn "*condjump" [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator" [(match_operand 1 "cc_register" "") (const_int 0)]) (label_ref (match_operand 2 "" "")) (pc)))] "" "b%m0\\t%l2" [(set_attr "type" "branch")] ) (define_expand "casesi" [(match_operand:SI 0 "register_operand" "") ; Index (match_operand:SI 1 "const_int_operand" "") ; Lower bound (match_operand:SI 2 "const_int_operand" "") ; Total range (match_operand:DI 3 "" "") ; Table label (match_operand:DI 4 "" "")] ; Out of range label "" { if (operands[1] != const0_rtx) { rtx reg = gen_reg_rtx (SImode); /* Canonical RTL says that if you have: (minus (X) (CONST)) then this should be emitted as: (plus (X) (-CONST)) The use of trunc_int_for_mode ensures that the resulting constant can be represented in SImode, this is important for the corner case where operand[1] is INT_MIN. */ operands[1] = GEN_INT (trunc_int_for_mode (-INTVAL (operands[1]), SImode)); if (!(*insn_data[CODE_FOR_addsi3].operand[2].predicate) (operands[1], SImode)) operands[1] = force_reg (SImode, operands[1]); emit_insn (gen_addsi3 (reg, operands[0], operands[1])); operands[0] = reg; } if (!aarch64_plus_operand (operands[2], SImode)) operands[2] = force_reg (SImode, operands[2]); emit_jump_insn (gen_cbranchsi4 (gen_rtx_GTU (SImode, const0_rtx, const0_rtx), operands[0], operands[2], operands[4])); operands[2] = force_reg (DImode, gen_rtx_LABEL_REF (VOIDmode, operands[3])); emit_jump_insn (gen_casesi_dispatch (operands[2], operands[0], operands[3])); DONE; } ) (define_insn "casesi_dispatch" [(parallel [(set (pc) (mem:DI (unspec [(match_operand:DI 0 "register_operand" "r") (match_operand:SI 1 "register_operand" "r")] UNSPEC_CASESI))) (clobber (reg:CC CC_REGNUM)) (clobber (match_scratch:DI 3 "=r")) (clobber (match_scratch:DI 4 "=r")) (use (label_ref (match_operand 2 "" "")))])] "" "* return aarch64_output_casesi (operands); " [(set_attr "length" "16") (set_attr "type" "branch")] ) (define_insn "nop" [(unspec[(const_int 0)] UNSPEC_NOP)] "" "nop" [(set_attr "type" "no_insn")] ) (define_insn "trap" [(trap_if (const_int 1) (const_int 8))] "" "brk #1000" [(set_attr "type" "trap")]) (define_expand "prologue" [(clobber (const_int 0))] "" " aarch64_expand_prologue (); DONE; " ) (define_expand "epilogue" [(clobber (const_int 0))] "" " aarch64_expand_epilogue (false); DONE; " ) (define_expand "sibcall_epilogue" [(clobber (const_int 0))] "" " aarch64_expand_epilogue (true); DONE; " ) (define_insn "*do_return" [(return)] "" "ret" [(set_attr "type" "branch")] ) (define_insn "eh_return" [(unspec_volatile [(match_operand:DI 0 "register_operand" "r")] UNSPECV_EH_RETURN)] "" "#" [(set_attr "type" "branch")] ) (define_split [(unspec_volatile [(match_operand:DI 0 "register_operand" "")] UNSPECV_EH_RETURN)] "reload_completed" [(set (match_dup 1) (match_dup 0))] { operands[1] = aarch64_final_eh_return_addr (); } ) (define_insn "*cb1" [(set (pc) (if_then_else (EQL (match_operand:GPI 0 "register_operand" "r") (const_int 0)) (label_ref (match_operand 1 "" "")) (pc)))] "" "\\t%0, %l1" [(set_attr "type" "branch")] ) (define_insn "*tb1" [(set (pc) (if_then_else (EQL (zero_extract:DI (match_operand:GPI 0 "register_operand" "r") (const_int 1) (match_operand 1 "const_int_operand" "n")) (const_int 0)) (label_ref (match_operand 2 "" "")) (pc))) (clobber (match_scratch:DI 3 "=r"))] "" "* if (get_attr_length (insn) == 8) return \"ubfx\\t%3, %0, %1, #1\;\\t%3, %l2\"; return \"\\t%0, %1, %l2\"; " [(set_attr "type" "branch") (set (attr "length") (if_then_else (and (ge (minus (match_dup 2) (pc)) (const_int -32768)) (lt (minus (match_dup 2) (pc)) (const_int 32764))) (const_int 4) (const_int 8)))] ) (define_insn "*cb1" [(set (pc) (if_then_else (LTGE (match_operand:ALLI 0 "register_operand" "r") (const_int 0)) (label_ref (match_operand 1 "" "")) (pc))) (clobber (match_scratch:DI 2 "=r"))] "" "* if (get_attr_length (insn) == 8) return \"ubfx\\t%2, %0, , #1\;\\t%2, %l1\"; return \"\\t%0, , %l1\"; " [(set_attr "type" "branch") (set (attr "length") (if_then_else (and (ge (minus (match_dup 1) (pc)) (const_int -32768)) (lt (minus (match_dup 1) (pc)) (const_int 32764))) (const_int 4) (const_int 8)))] ) ;; ------------------------------------------------------------------- ;; Subroutine calls and sibcalls ;; ------------------------------------------------------------------- (define_expand "call" [(parallel [(call (match_operand 0 "memory_operand" "") (match_operand 1 "general_operand" "")) (use (match_operand 2 "" "")) (clobber (reg:DI LR_REGNUM))])] "" " { rtx callee; /* In an untyped call, we can get NULL for operand 2. */ if (operands[2] == NULL) operands[2] = const0_rtx; /* Decide if we should generate indirect calls by loading the 64-bit address of the callee into a register before performing the branch-and-link. */ callee = XEXP (operands[0], 0); if (GET_CODE (callee) == SYMBOL_REF ? aarch64_is_long_call_p (callee) : !REG_P (callee)) XEXP (operands[0], 0) = force_reg (Pmode, callee); }" ) (define_insn "*call_reg" [(call (mem:DI (match_operand:DI 0 "register_operand" "r")) (match_operand 1 "" "")) (use (match_operand 2 "" "")) (clobber (reg:DI LR_REGNUM))] "" "blr\\t%0" [(set_attr "type" "call")] ) (define_insn "*call_symbol" [(call (mem:DI (match_operand:DI 0 "" "")) (match_operand 1 "" "")) (use (match_operand 2 "" "")) (clobber (reg:DI LR_REGNUM))] "GET_CODE (operands[0]) == SYMBOL_REF && !aarch64_is_long_call_p (operands[0])" "bl\\t%a0" [(set_attr "type" "call")] ) (define_expand "call_value" [(parallel [(set (match_operand 0 "" "") (call (match_operand 1 "memory_operand" "") (match_operand 2 "general_operand" ""))) (use (match_operand 3 "" "")) (clobber (reg:DI LR_REGNUM))])] "" " { rtx callee; /* In an untyped call, we can get NULL for operand 3. */ if (operands[3] == NULL) operands[3] = const0_rtx; /* Decide if we should generate indirect calls by loading the 64-bit address of the callee into a register before performing the branch-and-link. */ callee = XEXP (operands[1], 0); if (GET_CODE (callee) == SYMBOL_REF ? aarch64_is_long_call_p (callee) : !REG_P (callee)) XEXP (operands[1], 0) = force_reg (Pmode, callee); }" ) (define_insn "*call_value_reg" [(set (match_operand 0 "" "") (call (mem:DI (match_operand:DI 1 "register_operand" "r")) (match_operand 2 "" ""))) (use (match_operand 3 "" "")) (clobber (reg:DI LR_REGNUM))] "" "blr\\t%1" [(set_attr "type" "call")] ) (define_insn "*call_value_symbol" [(set (match_operand 0 "" "") (call (mem:DI (match_operand:DI 1 "" "")) (match_operand 2 "" ""))) (use (match_operand 3 "" "")) (clobber (reg:DI LR_REGNUM))] "GET_CODE (operands[1]) == SYMBOL_REF && !aarch64_is_long_call_p (operands[1])" "bl\\t%a1" [(set_attr "type" "call")] ) (define_expand "sibcall" [(parallel [(call (match_operand 0 "memory_operand" "") (match_operand 1 "general_operand" "")) (return) (use (match_operand 2 "" ""))])] "" { if (operands[2] == NULL_RTX) operands[2] = const0_rtx; } ) (define_expand "sibcall_value" [(parallel [(set (match_operand 0 "" "") (call (match_operand 1 "memory_operand" "") (match_operand 2 "general_operand" ""))) (return) (use (match_operand 3 "" ""))])] "" { if (operands[3] == NULL_RTX) operands[3] = const0_rtx; } ) (define_insn "*sibcall_insn" [(call (mem:DI (match_operand:DI 0 "" "X")) (match_operand 1 "" "")) (return) (use (match_operand 2 "" ""))] "GET_CODE (operands[0]) == SYMBOL_REF" "b\\t%a0" [(set_attr "type" "branch")] ) (define_insn "*sibcall_value_insn" [(set (match_operand 0 "" "") (call (mem:DI (match_operand 1 "" "X")) (match_operand 2 "" ""))) (return) (use (match_operand 3 "" ""))] "GET_CODE (operands[1]) == SYMBOL_REF" "b\\t%a1" [(set_attr "type" "branch")] ) ;; Call subroutine returning any type. (define_expand "untyped_call" [(parallel [(call (match_operand 0 "") (const_int 0)) (match_operand 1 "") (match_operand 2 "")])] "" { int i; emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx)); for (i = 0; i < XVECLEN (operands[2], 0); i++) { rtx set = XVECEXP (operands[2], 0, i); emit_move_insn (SET_DEST (set), SET_SRC (set)); } /* The optimizer does not know that the call sets the function value registers we stored in the result block. We avoid problems by claiming that all hard registers are used and clobbered at this point. */ emit_insn (gen_blockage ()); DONE; }) ;; ------------------------------------------------------------------- ;; Moves ;; ------------------------------------------------------------------- (define_expand "mov" [(set (match_operand:SHORT 0 "nonimmediate_operand" "") (match_operand:SHORT 1 "general_operand" ""))] "" " if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx) operands[1] = force_reg (mode, operands[1]); " ) (define_insn "*mov_aarch64" [(set (match_operand:SHORT 0 "nonimmediate_operand" "=r,r, *w,r,*w, m, m, r,*w,*w") (match_operand:SHORT 1 "general_operand" " r,M,D,m, m,rZ,*w,*w, r,*w"))] "(register_operand (operands[0], mode) || aarch64_reg_or_zero (operands[1], mode))" { switch (which_alternative) { case 0: return "mov\t%w0, %w1"; case 1: return "mov\t%w0, %1"; case 2: return aarch64_output_scalar_simd_mov_immediate (operands[1], mode); case 3: return "ldr\t%w0, %1"; case 4: return "ldr\t%0, %1"; case 5: return "str\t%w1, %0"; case 6: return "str\t%1, %0"; case 7: return "umov\t%w0, %1.[0]"; case 8: return "dup\t%0., %w1"; case 9: return "dup\t%0, %1.[0]"; default: gcc_unreachable (); } } [(set_attr "type" "mov_reg,mov_imm,mov_imm,load1,load1,store1,store1,\ neon_from_gp,neon_from_gp, neon_dup") (set_attr "simd" "*,*,yes,*,*,*,*,yes,yes,yes")] ) (define_expand "mov" [(set (match_operand:GPI 0 "nonimmediate_operand" "") (match_operand:GPI 1 "general_operand" ""))] "" " if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx) operands[1] = force_reg (mode, operands[1]); if (CONSTANT_P (operands[1])) { aarch64_expand_mov_immediate (operands[0], operands[1]); DONE; } " ) (define_insn "*movsi_aarch64" [(set (match_operand:SI 0 "nonimmediate_operand" "=r,k,r,r,r,*w,m, m,r,r ,*w, r,*w") (match_operand:SI 1 "aarch64_mov_operand" " r,r,k,M,m, m,rZ,*w,S,Ush,rZ,*w,*w"))] "(register_operand (operands[0], SImode) || aarch64_reg_or_zero (operands[1], SImode))" "@ mov\\t%w0, %w1 mov\\t%w0, %w1 mov\\t%w0, %w1 mov\\t%w0, %1 ldr\\t%w0, %1 ldr\\t%s0, %1 str\\t%w1, %0 str\\t%s1, %0 adr\\t%x0, %a1 adrp\\t%x0, %A1 fmov\\t%s0, %w1 fmov\\t%w0, %s1 fmov\\t%s0, %s1" [(set_attr "type" "mov_reg,mov_reg,mov_reg,mov_imm,load1,load1,store1,store1,\ adr,adr,fmov,fmov,fmov") (set_attr "fp" "*,*,*,*,*,yes,*,yes,*,*,yes,yes,yes")] ) (define_insn "*movdi_aarch64" [(set (match_operand:DI 0 "nonimmediate_operand" "=r,k,r,r,r,*w,m, m,r,r, *w, r,*w,w") (match_operand:DI 1 "aarch64_mov_operand" " r,r,k,N,m, m,rZ,*w,S,Ush,rZ,*w,*w,Dd"))] "(register_operand (operands[0], DImode) || aarch64_reg_or_zero (operands[1], DImode))" "@ mov\\t%x0, %x1 mov\\t%0, %x1 mov\\t%x0, %1 mov\\t%x0, %1 ldr\\t%x0, %1 ldr\\t%d0, %1 str\\t%x1, %0 str\\t%d1, %0 adr\\t%x0, %a1 adrp\\t%x0, %A1 fmov\\t%d0, %x1 fmov\\t%x0, %d1 fmov\\t%d0, %d1 movi\\t%d0, %1" [(set_attr "type" "mov_reg,mov_reg,mov_reg,mov_imm,load1,load1,store1,store1,\ adr,adr,fmov,fmov,fmov,fmov") (set_attr "fp" "*,*,*,*,*,yes,*,yes,*,*,yes,yes,yes,*") (set_attr "simd" "*,*,*,*,*,*,*,*,*,*,*,*,*,yes")] ) (define_insn "insv_imm" [(set (zero_extract:GPI (match_operand:GPI 0 "register_operand" "+r") (const_int 16) (match_operand:GPI 1 "const_int_operand" "n")) (match_operand:GPI 2 "const_int_operand" "n"))] "UINTVAL (operands[1]) < GET_MODE_BITSIZE (mode) && UINTVAL (operands[1]) % 16 == 0" "movk\\t%0, %X2, lsl %1" [(set_attr "type" "mov_imm")] ) (define_expand "movti" [(set (match_operand:TI 0 "nonimmediate_operand" "") (match_operand:TI 1 "general_operand" ""))] "" " if (GET_CODE (operands[0]) == MEM && operands[1] != const0_rtx) operands[1] = force_reg (TImode, operands[1]); " ) (define_insn "*movti_aarch64" [(set (match_operand:TI 0 "nonimmediate_operand" "=r, *w,r ,*w,r ,Ump,Ump,*w,m") (match_operand:TI 1 "aarch64_movti_operand" " rn,r ,*w,*w,Ump,r ,Z , m,*w"))] "(register_operand (operands[0], TImode) || aarch64_reg_or_zero (operands[1], TImode))" "@ # # # orr\\t%0.16b, %1.16b, %1.16b ldp\\t%0, %H0, %1 stp\\t%1, %H1, %0 stp\\txzr, xzr, %0 ldr\\t%q0, %1 str\\t%q1, %0" [(set_attr "type" "multiple,f_mcr,f_mrc,neon_logic_q, \ load2,store2,store2,f_loadd,f_stored") (set_attr "length" "8,8,8,4,4,4,4,4,4") (set_attr "simd" "*,*,*,yes,*,*,*,*,*") (set_attr "fp" "*,*,*,*,*,*,*,yes,yes")] ) ;; Split a TImode register-register or register-immediate move into ;; its component DImode pieces, taking care to handle overlapping ;; source and dest registers. (define_split [(set (match_operand:TI 0 "register_operand" "") (match_operand:TI 1 "aarch64_reg_or_imm" ""))] "reload_completed && aarch64_split_128bit_move_p (operands[0], operands[1])" [(const_int 0)] { aarch64_split_128bit_move (operands[0], operands[1]); DONE; }) (define_expand "mov" [(set (match_operand:GPF 0 "nonimmediate_operand" "") (match_operand:GPF 1 "general_operand" ""))] "" " if (!TARGET_FLOAT) { sorry (\"%qs and floating point code\", \"-mgeneral-regs-only\"); FAIL; } if (GET_CODE (operands[0]) == MEM) operands[1] = force_reg (mode, operands[1]); " ) (define_insn "*movsf_aarch64" [(set (match_operand:SF 0 "nonimmediate_operand" "=w, ?r,w,w ,w,m,r,m ,r") (match_operand:SF 1 "general_operand" "?rY, w,w,Ufc,m,w,m,rY,r"))] "TARGET_FLOAT && (register_operand (operands[0], SFmode) || register_operand (operands[1], SFmode))" "@ fmov\\t%s0, %w1 fmov\\t%w0, %s1 fmov\\t%s0, %s1 fmov\\t%s0, %1 ldr\\t%s0, %1 str\\t%s1, %0 ldr\\t%w0, %1 str\\t%w1, %0 mov\\t%w0, %w1" [(set_attr "type" "f_mcr,f_mrc,fmov,fconsts,\ f_loads,f_stores,f_loads,f_stores,fmov")] ) (define_insn "*movdf_aarch64" [(set (match_operand:DF 0 "nonimmediate_operand" "=w, ?r,w,w ,w,m,r,m ,r") (match_operand:DF 1 "general_operand" "?rY, w,w,Ufc,m,w,m,rY,r"))] "TARGET_FLOAT && (register_operand (operands[0], DFmode) || register_operand (operands[1], DFmode))" "@ fmov\\t%d0, %x1 fmov\\t%x0, %d1 fmov\\t%d0, %d1 fmov\\t%d0, %1 ldr\\t%d0, %1 str\\t%d1, %0 ldr\\t%x0, %1 str\\t%x1, %0 mov\\t%x0, %x1" [(set_attr "type" "f_mcr,f_mrc,fmov,fconstd,\ f_loadd,f_stored,f_loadd,f_stored,mov_reg")] ) (define_expand "movtf" [(set (match_operand:TF 0 "nonimmediate_operand" "") (match_operand:TF 1 "general_operand" ""))] "" " if (!TARGET_FLOAT) { sorry (\"%qs and floating point code\", \"-mgeneral-regs-only\"); FAIL; } if (GET_CODE (operands[0]) == MEM) operands[1] = force_reg (TFmode, operands[1]); " ) (define_insn "*movtf_aarch64" [(set (match_operand:TF 0 "nonimmediate_operand" "=w,?&r,w ,?r,w,?w,w,m,?r ,Ump") (match_operand:TF 1 "general_operand" " w,?r, ?r,w ,Y,Y ,m,w,Ump,?rY"))] "TARGET_FLOAT && (register_operand (operands[0], TFmode) || register_operand (operands[1], TFmode))" "@ orr\\t%0.16b, %1.16b, %1.16b # # # movi\\t%0.2d, #0 fmov\\t%s0, wzr ldr\\t%q0, %1 str\\t%q1, %0 ldp\\t%0, %H0, %1 stp\\t%1, %H1, %0" [(set_attr "type" "logic_reg,multiple,f_mcr,f_mrc,fconstd,fconstd,\ f_loadd,f_stored,neon_load1_2reg,neon_store1_2reg") (set_attr "length" "4,8,8,8,4,4,4,4,4,4") (set_attr "fp" "*,*,yes,yes,*,yes,yes,yes,*,*") (set_attr "simd" "yes,*,*,*,yes,*,*,*,*,*")] ) (define_split [(set (match_operand:TF 0 "register_operand" "") (match_operand:TF 1 "aarch64_reg_or_imm" ""))] "reload_completed && aarch64_split_128bit_move_p (operands[0], operands[1])" [(const_int 0)] { aarch64_split_128bit_move (operands[0], operands[1]); DONE; } ) ;; Operands 1 and 3 are tied together by the final condition; so we allow ;; fairly lax checking on the second memory operation. (define_insn "load_pair" [(set (match_operand:GPI 0 "register_operand" "=r") (match_operand:GPI 1 "aarch64_mem_pair_operand" "Ump")) (set (match_operand:GPI 2 "register_operand" "=r") (match_operand:GPI 3 "memory_operand" "m"))] "rtx_equal_p (XEXP (operands[3], 0), plus_constant (Pmode, XEXP (operands[1], 0), GET_MODE_SIZE (mode)))" "ldp\\t%0, %2, %1" [(set_attr "type" "load2")] ) ;; Operands 0 and 2 are tied together by the final condition; so we allow ;; fairly lax checking on the second memory operation. (define_insn "store_pair" [(set (match_operand:GPI 0 "aarch64_mem_pair_operand" "=Ump") (match_operand:GPI 1 "register_operand" "r")) (set (match_operand:GPI 2 "memory_operand" "=m") (match_operand:GPI 3 "register_operand" "r"))] "rtx_equal_p (XEXP (operands[2], 0), plus_constant (Pmode, XEXP (operands[0], 0), GET_MODE_SIZE (mode)))" "stp\\t%1, %3, %0" [(set_attr "type" "store2")] ) ;; Operands 1 and 3 are tied together by the final condition; so we allow ;; fairly lax checking on the second memory operation. (define_insn "load_pair" [(set (match_operand:GPF 0 "register_operand" "=w") (match_operand:GPF 1 "aarch64_mem_pair_operand" "Ump")) (set (match_operand:GPF 2 "register_operand" "=w") (match_operand:GPF 3 "memory_operand" "m"))] "rtx_equal_p (XEXP (operands[3], 0), plus_constant (Pmode, XEXP (operands[1], 0), GET_MODE_SIZE (mode)))" "ldp\\t%0, %2, %1" [(set_attr "type" "neon_load1_2reg")] ) ;; Operands 0 and 2 are tied together by the final condition; so we allow ;; fairly lax checking on the second memory operation. (define_insn "store_pair" [(set (match_operand:GPF 0 "aarch64_mem_pair_operand" "=Ump") (match_operand:GPF 1 "register_operand" "w")) (set (match_operand:GPF 2 "memory_operand" "=m") (match_operand:GPF 3 "register_operand" "w"))] "rtx_equal_p (XEXP (operands[2], 0), plus_constant (Pmode, XEXP (operands[0], 0), GET_MODE_SIZE (mode)))" "stp\\t%1, %3, %0" [(set_attr "type" "neon_store1_2reg")] ) ;; Load pair with writeback. This is primarily used in function epilogues ;; when restoring [fp,lr] (define_insn "loadwb_pair_" [(parallel [(set (match_operand:P 0 "register_operand" "=k") (plus:P (match_operand:P 1 "register_operand" "0") (match_operand:P 4 "const_int_operand" "n"))) (set (match_operand:GPI 2 "register_operand" "=r") (mem:GPI (plus:P (match_dup 1) (match_dup 4)))) (set (match_operand:GPI 3 "register_operand" "=r") (mem:GPI (plus:P (match_dup 1) (match_operand:P 5 "const_int_operand" "n"))))])] "INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (mode)" "ldp\\t%2, %3, [%1], %4" [(set_attr "type" "load2")] ) ;; Store pair with writeback. This is primarily used in function prologues ;; when saving [fp,lr] (define_insn "storewb_pair_" [(parallel [(set (match_operand:P 0 "register_operand" "=&k") (plus:P (match_operand:P 1 "register_operand" "0") (match_operand:P 4 "const_int_operand" "n"))) (set (mem:GPI (plus:P (match_dup 0) (match_dup 4))) (match_operand:GPI 2 "register_operand" "r")) (set (mem:GPI (plus:P (match_dup 0) (match_operand:P 5 "const_int_operand" "n"))) (match_operand:GPI 3 "register_operand" "r"))])] "INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (mode)" "stp\\t%2, %3, [%0, %4]!" [(set_attr "type" "store2")] ) ;; ------------------------------------------------------------------- ;; Sign/Zero extension ;; ------------------------------------------------------------------- (define_expand "sidi2" [(set (match_operand:DI 0 "register_operand") (ANY_EXTEND:DI (match_operand:SI 1 "nonimmediate_operand")))] "" ) (define_insn "*extendsidi2_aarch64" [(set (match_operand:DI 0 "register_operand" "=r,r") (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))] "" "@ sxtw\t%0, %w1 ldrsw\t%0, %1" [(set_attr "type" "extend,load1")] ) (define_insn "*zero_extendsidi2_aarch64" [(set (match_operand:DI 0 "register_operand" "=r,r") (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))] "" "@ uxtw\t%0, %w1 ldr\t%w0, %1" [(set_attr "type" "extend,load1")] ) (define_expand "2" [(set (match_operand:GPI 0 "register_operand") (ANY_EXTEND:GPI (match_operand:SHORT 1 "nonimmediate_operand")))] "" ) (define_insn "*extend2_aarch64" [(set (match_operand:GPI 0 "register_operand" "=r,r") (sign_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))] "" "@ sxt\t%0, %w1 ldrs\t%0, %1" [(set_attr "type" "extend,load1")] ) (define_insn "*zero_extend2_aarch64" [(set (match_operand:GPI 0 "register_operand" "=r,r,*w") (zero_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m,m")))] "" "@ uxt\t%0, %w1 ldr\t%w0, %1 ldr\t%0, %1" [(set_attr "type" "extend,load1,load1")] ) (define_expand "qihi2" [(set (match_operand:HI 0 "register_operand") (ANY_EXTEND:HI (match_operand:QI 1 "nonimmediate_operand")))] "" ) (define_insn "*qihi2_aarch64" [(set (match_operand:HI 0 "register_operand" "=r,r") (ANY_EXTEND:HI (match_operand:QI 1 "nonimmediate_operand" "r,m")))] "" "@ xtb\t%w0, %w1 b\t%w0, %1" [(set_attr "type" "extend,load1")] ) ;; ------------------------------------------------------------------- ;; Simple arithmetic ;; ------------------------------------------------------------------- (define_expand "add3" [(set (match_operand:GPI 0 "register_operand" "") (plus:GPI (match_operand:GPI 1 "register_operand" "") (match_operand:GPI 2 "aarch64_pluslong_operand" "")))] "" " if (! aarch64_plus_operand (operands[2], VOIDmode)) { rtx subtarget = ((optimize && can_create_pseudo_p ()) ? gen_reg_rtx (mode) : operands[0]); HOST_WIDE_INT imm = INTVAL (operands[2]); if (imm < 0) imm = -(-imm & ~0xfff); else imm &= ~0xfff; emit_insn (gen_add3 (subtarget, operands[1], GEN_INT (imm))); operands[1] = subtarget; operands[2] = GEN_INT (INTVAL (operands[2]) - imm); } " ) (define_insn "*addsi3_aarch64" [(set (match_operand:SI 0 "register_operand" "=rk,rk,rk") (plus:SI (match_operand:SI 1 "register_operand" "%rk,rk,rk") (match_operand:SI 2 "aarch64_plus_operand" "I,r,J")))] "" "@ add\\t%w0, %w1, %2 add\\t%w0, %w1, %w2 sub\\t%w0, %w1, #%n2" [(set_attr "type" "alu_imm,alu_reg,alu_imm")] ) ;; zero_extend version of above (define_insn "*addsi3_aarch64_uxtw" [(set (match_operand:DI 0 "register_operand" "=rk,rk,rk") (zero_extend:DI (plus:SI (match_operand:SI 1 "register_operand" "%rk,rk,rk") (match_operand:SI 2 "aarch64_plus_operand" "I,r,J"))))] "" "@ add\\t%w0, %w1, %2 add\\t%w0, %w1, %w2 sub\\t%w0, %w1, #%n2" [(set_attr "type" "alu_imm,alu_reg,alu_imm")] ) (define_insn "*adddi3_aarch64" [(set (match_operand:DI 0 "register_operand" "=rk,rk,rk,!w") (plus:DI (match_operand:DI 1 "register_operand" "%rk,rk,rk,!w") (match_operand:DI 2 "aarch64_plus_operand" "I,r,J,!w")))] "" "@ add\\t%x0, %x1, %2 add\\t%x0, %x1, %x2 sub\\t%x0, %x1, #%n2 add\\t%d0, %d1, %d2" [(set_attr "type" "alu_imm,alu_reg,alu_imm,alu_reg") (set_attr "simd" "*,*,*,yes")] ) (define_insn "*add3_compare0" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (plus:GPI (match_operand:GPI 1 "register_operand" "%r,r,r") (match_operand:GPI 2 "aarch64_plus_operand" "r,I,J")) (const_int 0))) (set (match_operand:GPI 0 "register_operand" "=r,r,r") (plus:GPI (match_dup 1) (match_dup 2)))] "" "@ adds\\t%0, %1, %2 adds\\t%0, %1, %2 subs\\t%0, %1, #%n2" [(set_attr "type" "alus_reg,alus_imm,alus_imm")] ) ;; zero_extend version of above (define_insn "*addsi3_compare0_uxtw" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (plus:SI (match_operand:SI 1 "register_operand" "%r,r,r") (match_operand:SI 2 "aarch64_plus_operand" "r,I,J")) (const_int 0))) (set (match_operand:DI 0 "register_operand" "=r,r,r") (zero_extend:DI (plus:SI (match_dup 1) (match_dup 2))))] "" "@ adds\\t%w0, %w1, %w2 adds\\t%w0, %w1, %w2 subs\\t%w0, %w1, #%n2" [(set_attr "type" "alus_reg,alus_imm,alus_imm")] ) (define_insn "*adds_mul_imm_" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (plus:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_pwr_2_" "n")) (match_operand:GPI 3 "register_operand" "r")) (const_int 0))) (set (match_operand:GPI 0 "register_operand" "=r") (plus:GPI (mult:GPI (match_dup 1) (match_dup 2)) (match_dup 3)))] "" "adds\\t%0, %3, %1, lsl %p2" [(set_attr "type" "alus_shift_imm")] ) (define_insn "*subs_mul_imm_" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (minus:GPI (match_operand:GPI 1 "register_operand" "r") (mult:GPI (match_operand:GPI 2 "register_operand" "r") (match_operand:QI 3 "aarch64_pwr_2_" "n"))) (const_int 0))) (set (match_operand:GPI 0 "register_operand" "=r") (minus:GPI (match_dup 1) (mult:GPI (match_dup 2) (match_dup 3))))] "" "subs\\t%0, %1, %2, lsl %p3" [(set_attr "type" "alus_shift_imm")] ) (define_insn "*adds__" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (plus:GPI (ANY_EXTEND:GPI (match_operand:ALLX 1 "register_operand" "r")) (match_operand:GPI 2 "register_operand" "r")) (const_int 0))) (set (match_operand:GPI 0 "register_operand" "=r") (plus:GPI (ANY_EXTEND:GPI (match_dup 1)) (match_dup 2)))] "" "adds\\t%0, %2, %1, xt" [(set_attr "type" "alus_ext")] ) (define_insn "*subs__" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (minus:GPI (match_operand:GPI 1 "register_operand" "r") (ANY_EXTEND:GPI (match_operand:ALLX 2 "register_operand" "r"))) (const_int 0))) (set (match_operand:GPI 0 "register_operand" "=r") (minus:GPI (match_dup 1) (ANY_EXTEND:GPI (match_dup 2))))] "" "subs\\t%0, %1, %2, xt" [(set_attr "type" "alus_ext")] ) (define_insn "*adds__multp2" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (plus:GPI (ANY_EXTRACT:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand 2 "aarch64_pwr_imm3" "Up3")) (match_operand 3 "const_int_operand" "n") (const_int 0)) (match_operand:GPI 4 "register_operand" "r")) (const_int 0))) (set (match_operand:GPI 0 "register_operand" "=r") (plus:GPI (ANY_EXTRACT:GPI (mult:GPI (match_dup 1) (match_dup 2)) (match_dup 3) (const_int 0)) (match_dup 4)))] "aarch64_is_extend_from_extract (mode, operands[2], operands[3])" "adds\\t%0, %4, %1, xt%e3 %p2" [(set_attr "type" "alus_ext")] ) (define_insn "*subs__multp2" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (minus:GPI (match_operand:GPI 4 "register_operand" "r") (ANY_EXTRACT:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand 2 "aarch64_pwr_imm3" "Up3")) (match_operand 3 "const_int_operand" "n") (const_int 0))) (const_int 0))) (set (match_operand:GPI 0 "register_operand" "=r") (minus:GPI (match_dup 4) (ANY_EXTRACT:GPI (mult:GPI (match_dup 1) (match_dup 2)) (match_dup 3) (const_int 0))))] "aarch64_is_extend_from_extract (mode, operands[2], operands[3])" "subs\\t%0, %4, %1, xt%e3 %p2" [(set_attr "type" "alus_ext")] ) (define_insn "*add3nr_compare0" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (plus:GPI (match_operand:GPI 0 "register_operand" "%r,r,r") (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J")) (const_int 0)))] "" "@ cmn\\t%0, %1 cmn\\t%0, %1 cmp\\t%0, #%n1" [(set_attr "type" "alus_reg,alus_imm,alus_imm")] ) (define_insn "*compare_neg" [(set (reg:CC_Z CC_REGNUM) (compare:CC_Z (neg:GPI (match_operand:GPI 0 "register_operand" "r")) (match_operand:GPI 1 "register_operand" "r")))] "" "cmn\\t%1, %0" [(set_attr "type" "alus_reg")] ) (define_insn "*add__" [(set (match_operand:GPI 0 "register_operand" "=r") (plus:GPI (ASHIFT:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_" "n")) (match_operand:GPI 3 "register_operand" "r")))] "" "add\\t%0, %3, %1, %2" [(set_attr "type" "alu_shift_imm")] ) ;; zero_extend version of above (define_insn "*add__si_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (plus:SI (ASHIFT:SI (match_operand:SI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_si" "n")) (match_operand:SI 3 "register_operand" "r"))))] "" "add\\t%w0, %w3, %w1, %2" [(set_attr "type" "alu_shift_imm")] ) (define_insn "*add_mul_imm_" [(set (match_operand:GPI 0 "register_operand" "=r") (plus:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_pwr_2_" "n")) (match_operand:GPI 3 "register_operand" "r")))] "" "add\\t%0, %3, %1, lsl %p2" [(set_attr "type" "alu_shift_imm")] ) (define_insn "*add__" [(set (match_operand:GPI 0 "register_operand" "=rk") (plus:GPI (ANY_EXTEND:GPI (match_operand:ALLX 1 "register_operand" "r")) (match_operand:GPI 2 "register_operand" "r")))] "" "add\\t%0, %2, %1, xt" [(set_attr "type" "alu_ext")] ) ;; zero_extend version of above (define_insn "*add__si_uxtw" [(set (match_operand:DI 0 "register_operand" "=rk") (zero_extend:DI (plus:SI (ANY_EXTEND:SI (match_operand:SHORT 1 "register_operand" "r")) (match_operand:GPI 2 "register_operand" "r"))))] "" "add\\t%w0, %w2, %w1, xt" [(set_attr "type" "alu_ext")] ) (define_insn "*add__shft_" [(set (match_operand:GPI 0 "register_operand" "=rk") (plus:GPI (ashift:GPI (ANY_EXTEND:GPI (match_operand:ALLX 1 "register_operand" "r")) (match_operand 2 "aarch64_imm3" "Ui3")) (match_operand:GPI 3 "register_operand" "r")))] "" "add\\t%0, %3, %1, xt %2" [(set_attr "type" "alu_ext")] ) ;; zero_extend version of above (define_insn "*add__shft_si_uxtw" [(set (match_operand:DI 0 "register_operand" "=rk") (zero_extend:DI (plus:SI (ashift:SI (ANY_EXTEND:SI (match_operand:SHORT 1 "register_operand" "r")) (match_operand 2 "aarch64_imm3" "Ui3")) (match_operand:SI 3 "register_operand" "r"))))] "" "add\\t%w0, %w3, %w1, xt %2" [(set_attr "type" "alu_ext")] ) (define_insn "*add__mult_" [(set (match_operand:GPI 0 "register_operand" "=rk") (plus:GPI (mult:GPI (ANY_EXTEND:GPI (match_operand:ALLX 1 "register_operand" "r")) (match_operand 2 "aarch64_pwr_imm3" "Up3")) (match_operand:GPI 3 "register_operand" "r")))] "" "add\\t%0, %3, %1, xt %p2" [(set_attr "type" "alu_ext")] ) ;; zero_extend version of above (define_insn "*add__mult_si_uxtw" [(set (match_operand:DI 0 "register_operand" "=rk") (zero_extend:DI (plus:SI (mult:SI (ANY_EXTEND:SI (match_operand:SHORT 1 "register_operand" "r")) (match_operand 2 "aarch64_pwr_imm3" "Up3")) (match_operand:SI 3 "register_operand" "r"))))] "" "add\\t%w0, %w3, %w1, xt %p2" [(set_attr "type" "alu_ext")] ) (define_insn "*add__multp2" [(set (match_operand:GPI 0 "register_operand" "=rk") (plus:GPI (ANY_EXTRACT:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand 2 "aarch64_pwr_imm3" "Up3")) (match_operand 3 "const_int_operand" "n") (const_int 0)) (match_operand:GPI 4 "register_operand" "r")))] "aarch64_is_extend_from_extract (mode, operands[2], operands[3])" "add\\t%0, %4, %1, xt%e3 %p2" [(set_attr "type" "alu_ext")] ) ;; zero_extend version of above (define_insn "*add_si_multp2_uxtw" [(set (match_operand:DI 0 "register_operand" "=rk") (zero_extend:DI (plus:SI (ANY_EXTRACT:SI (mult:SI (match_operand:SI 1 "register_operand" "r") (match_operand 2 "aarch64_pwr_imm3" "Up3")) (match_operand 3 "const_int_operand" "n") (const_int 0)) (match_operand:SI 4 "register_operand" "r"))))] "aarch64_is_extend_from_extract (SImode, operands[2], operands[3])" "add\\t%w0, %w4, %w1, xt%e3 %p2" [(set_attr "type" "alu_ext")] ) (define_insn "*add3_carryin" [(set (match_operand:GPI 0 "register_operand" "=r") (plus:GPI (geu:GPI (reg:CC CC_REGNUM) (const_int 0)) (plus:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:GPI 2 "register_operand" "r"))))] "" "adc\\t%0, %1, %2" [(set_attr "type" "adc_reg")] ) ;; zero_extend version of above (define_insn "*addsi3_carryin_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (plus:SI (geu:SI (reg:CC CC_REGNUM) (const_int 0)) (plus:SI (match_operand:SI 1 "register_operand" "r") (match_operand:SI 2 "register_operand" "r")))))] "" "adc\\t%w0, %w1, %w2" [(set_attr "type" "adc_reg")] ) (define_insn "*add3_carryin_alt1" [(set (match_operand:GPI 0 "register_operand" "=r") (plus:GPI (plus:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:GPI 2 "register_operand" "r")) (geu:GPI (reg:CC CC_REGNUM) (const_int 0))))] "" "adc\\t%0, %1, %2" [(set_attr "type" "adc_reg")] ) ;; zero_extend version of above (define_insn "*addsi3_carryin_alt1_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (plus:SI (plus:SI (match_operand:SI 1 "register_operand" "r") (match_operand:SI 2 "register_operand" "r")) (geu:SI (reg:CC CC_REGNUM) (const_int 0)))))] "" "adc\\t%w0, %w1, %w2" [(set_attr "type" "adc_reg")] ) (define_insn "*add3_carryin_alt2" [(set (match_operand:GPI 0 "register_operand" "=r") (plus:GPI (plus:GPI (geu:GPI (reg:CC CC_REGNUM) (const_int 0)) (match_operand:GPI 1 "register_operand" "r")) (match_operand:GPI 2 "register_operand" "r")))] "" "adc\\t%0, %1, %2" [(set_attr "type" "adc_reg")] ) ;; zero_extend version of above (define_insn "*addsi3_carryin_alt2_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (plus:SI (plus:SI (geu:SI (reg:CC CC_REGNUM) (const_int 0)) (match_operand:SI 1 "register_operand" "r")) (match_operand:SI 2 "register_operand" "r"))))] "" "adc\\t%w0, %w1, %w2" [(set_attr "type" "adc_reg")] ) (define_insn "*add3_carryin_alt3" [(set (match_operand:GPI 0 "register_operand" "=r") (plus:GPI (plus:GPI (geu:GPI (reg:CC CC_REGNUM) (const_int 0)) (match_operand:GPI 2 "register_operand" "r")) (match_operand:GPI 1 "register_operand" "r")))] "" "adc\\t%0, %1, %2" [(set_attr "type" "adc_reg")] ) ;; zero_extend version of above (define_insn "*addsi3_carryin_alt3_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (plus:SI (plus:SI (geu:SI (reg:CC CC_REGNUM) (const_int 0)) (match_operand:SI 2 "register_operand" "r")) (match_operand:SI 1 "register_operand" "r"))))] "" "adc\\t%w0, %w1, %w2" [(set_attr "type" "adc_reg")] ) (define_insn "*add_uxt_multp2" [(set (match_operand:GPI 0 "register_operand" "=rk") (plus:GPI (and:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand 2 "aarch64_pwr_imm3" "Up3")) (match_operand 3 "const_int_operand" "n")) (match_operand:GPI 4 "register_operand" "r")))] "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), INTVAL (operands[3])) != 0" "* operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), INTVAL (operands[3]))); return \"add\t%0, %4, %1, uxt%e3 %p2\";" [(set_attr "type" "alu_ext")] ) ;; zero_extend version of above (define_insn "*add_uxtsi_multp2_uxtw" [(set (match_operand:DI 0 "register_operand" "=rk") (zero_extend:DI (plus:SI (and:SI (mult:SI (match_operand:SI 1 "register_operand" "r") (match_operand 2 "aarch64_pwr_imm3" "Up3")) (match_operand 3 "const_int_operand" "n")) (match_operand:SI 4 "register_operand" "r"))))] "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), INTVAL (operands[3])) != 0" "* operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), INTVAL (operands[3]))); return \"add\t%w0, %w4, %w1, uxt%e3 %p2\";" [(set_attr "type" "alu_ext")] ) (define_insn "subsi3" [(set (match_operand:SI 0 "register_operand" "=rk") (minus:SI (match_operand:SI 1 "register_operand" "r") (match_operand:SI 2 "register_operand" "r")))] "" "sub\\t%w0, %w1, %w2" [(set_attr "type" "alu_reg")] ) ;; zero_extend version of above (define_insn "*subsi3_uxtw" [(set (match_operand:DI 0 "register_operand" "=rk") (zero_extend:DI (minus:SI (match_operand:SI 1 "register_operand" "r") (match_operand:SI 2 "register_operand" "r"))))] "" "sub\\t%w0, %w1, %w2" [(set_attr "type" "alu_reg")] ) (define_insn "subdi3" [(set (match_operand:DI 0 "register_operand" "=rk,!w") (minus:DI (match_operand:DI 1 "register_operand" "r,!w") (match_operand:DI 2 "register_operand" "r,!w")))] "" "@ sub\\t%x0, %x1, %x2 sub\\t%d0, %d1, %d2" [(set_attr "type" "alu_reg, neon_sub") (set_attr "simd" "*,yes")] ) (define_insn "*sub3_compare0" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (minus:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:GPI 2 "register_operand" "r")) (const_int 0))) (set (match_operand:GPI 0 "register_operand" "=r") (minus:GPI (match_dup 1) (match_dup 2)))] "" "subs\\t%0, %1, %2" [(set_attr "type" "alus_reg")] ) ;; zero_extend version of above (define_insn "*subsi3_compare0_uxtw" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (minus:SI (match_operand:SI 1 "register_operand" "r") (match_operand:SI 2 "register_operand" "r")) (const_int 0))) (set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (minus:SI (match_dup 1) (match_dup 2))))] "" "subs\\t%w0, %w1, %w2" [(set_attr "type" "alus_reg")] ) (define_insn "*sub__" [(set (match_operand:GPI 0 "register_operand" "=r") (minus:GPI (match_operand:GPI 3 "register_operand" "r") (ASHIFT:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_" "n"))))] "" "sub\\t%0, %3, %1, %2" [(set_attr "type" "alu_shift_imm")] ) ;; zero_extend version of above (define_insn "*sub__si_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (minus:SI (match_operand:SI 3 "register_operand" "r") (ASHIFT:SI (match_operand:SI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_si" "n")))))] "" "sub\\t%w0, %w3, %w1, %2" [(set_attr "type" "alu_shift_imm")] ) (define_insn "*sub_mul_imm_" [(set (match_operand:GPI 0 "register_operand" "=r") (minus:GPI (match_operand:GPI 3 "register_operand" "r") (mult:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_pwr_2_" "n"))))] "" "sub\\t%0, %3, %1, lsl %p2" [(set_attr "type" "alu_shift_imm")] ) ;; zero_extend version of above (define_insn "*sub_mul_imm_si_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (minus:SI (match_operand:SI 3 "register_operand" "r") (mult:SI (match_operand:SI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_pwr_2_si" "n")))))] "" "sub\\t%w0, %w3, %w1, lsl %p2" [(set_attr "type" "alu_shift_imm")] ) (define_insn "*sub__" [(set (match_operand:GPI 0 "register_operand" "=rk") (minus:GPI (match_operand:GPI 1 "register_operand" "r") (ANY_EXTEND:GPI (match_operand:ALLX 2 "register_operand" "r"))))] "" "sub\\t%0, %1, %2, xt" [(set_attr "type" "alu_ext")] ) ;; zero_extend version of above (define_insn "*sub__si_uxtw" [(set (match_operand:DI 0 "register_operand" "=rk") (zero_extend:DI (minus:SI (match_operand:SI 1 "register_operand" "r") (ANY_EXTEND:SI (match_operand:SHORT 2 "register_operand" "r")))))] "" "sub\\t%w0, %w1, %w2, xt" [(set_attr "type" "alu_ext")] ) (define_insn "*sub__shft_" [(set (match_operand:GPI 0 "register_operand" "=rk") (minus:GPI (match_operand:GPI 1 "register_operand" "r") (ashift:GPI (ANY_EXTEND:GPI (match_operand:ALLX 2 "register_operand" "r")) (match_operand 3 "aarch64_imm3" "Ui3"))))] "" "sub\\t%0, %1, %2, xt %3" [(set_attr "type" "alu_ext")] ) ;; zero_extend version of above (define_insn "*sub__shft_si_uxtw" [(set (match_operand:DI 0 "register_operand" "=rk") (zero_extend:DI (minus:SI (match_operand:SI 1 "register_operand" "r") (ashift:SI (ANY_EXTEND:SI (match_operand:SHORT 2 "register_operand" "r")) (match_operand 3 "aarch64_imm3" "Ui3")))))] "" "sub\\t%w0, %w1, %w2, xt %3" [(set_attr "type" "alu_ext")] ) (define_insn "*sub__multp2" [(set (match_operand:GPI 0 "register_operand" "=rk") (minus:GPI (match_operand:GPI 4 "register_operand" "r") (ANY_EXTRACT:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand 2 "aarch64_pwr_imm3" "Up3")) (match_operand 3 "const_int_operand" "n") (const_int 0))))] "aarch64_is_extend_from_extract (mode, operands[2], operands[3])" "sub\\t%0, %4, %1, xt%e3 %p2" [(set_attr "type" "alu_ext")] ) ;; zero_extend version of above (define_insn "*sub_si_multp2_uxtw" [(set (match_operand:DI 0 "register_operand" "=rk") (zero_extend:DI (minus:SI (match_operand:SI 4 "register_operand" "r") (ANY_EXTRACT:SI (mult:SI (match_operand:SI 1 "register_operand" "r") (match_operand 2 "aarch64_pwr_imm3" "Up3")) (match_operand 3 "const_int_operand" "n") (const_int 0)))))] "aarch64_is_extend_from_extract (SImode, operands[2], operands[3])" "sub\\t%w0, %w4, %w1, xt%e3 %p2" [(set_attr "type" "alu_ext")] ) (define_insn "*sub3_carryin" [(set (match_operand:GPI 0 "register_operand" "=r") (minus:GPI (minus:GPI (match_operand:GPI 1 "register_operand" "r") (ltu:GPI (reg:CC CC_REGNUM) (const_int 0))) (match_operand:GPI 2 "register_operand" "r")))] "" "sbc\\t%0, %1, %2" [(set_attr "type" "adc_reg")] ) ;; zero_extend version of the above (define_insn "*subsi3_carryin_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r") (ltu:SI (reg:CC CC_REGNUM) (const_int 0))) (match_operand:SI 2 "register_operand" "r"))))] "" "sbc\\t%w0, %w1, %w2" [(set_attr "type" "adc_reg")] ) (define_insn "*sub_uxt_multp2" [(set (match_operand:GPI 0 "register_operand" "=rk") (minus:GPI (match_operand:GPI 4 "register_operand" "r") (and:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand 2 "aarch64_pwr_imm3" "Up3")) (match_operand 3 "const_int_operand" "n"))))] "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),INTVAL (operands[3])) != 0" "* operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), INTVAL (operands[3]))); return \"sub\t%0, %4, %1, uxt%e3 %p2\";" [(set_attr "type" "alu_ext")] ) ;; zero_extend version of above (define_insn "*sub_uxtsi_multp2_uxtw" [(set (match_operand:DI 0 "register_operand" "=rk") (zero_extend:DI (minus:SI (match_operand:SI 4 "register_operand" "r") (and:SI (mult:SI (match_operand:SI 1 "register_operand" "r") (match_operand 2 "aarch64_pwr_imm3" "Up3")) (match_operand 3 "const_int_operand" "n")))))] "aarch64_uxt_size (exact_log2 (INTVAL (operands[2])),INTVAL (operands[3])) != 0" "* operands[3] = GEN_INT (aarch64_uxt_size (exact_log2 (INTVAL (operands[2])), INTVAL (operands[3]))); return \"sub\t%w0, %w4, %w1, uxt%e3 %p2\";" [(set_attr "type" "alu_ext")] ) (define_insn_and_split "absdi2" [(set (match_operand:DI 0 "register_operand" "=r,w") (abs:DI (match_operand:DI 1 "register_operand" "r,w"))) (clobber (match_scratch:DI 2 "=&r,X"))] "" "@ # abs\\t%d0, %d1" "reload_completed && GP_REGNUM_P (REGNO (operands[0])) && GP_REGNUM_P (REGNO (operands[1]))" [(const_int 0)] { emit_insn (gen_rtx_SET (VOIDmode, operands[2], gen_rtx_XOR (DImode, gen_rtx_ASHIFTRT (DImode, operands[1], GEN_INT (63)), operands[1]))); emit_insn (gen_rtx_SET (VOIDmode, operands[0], gen_rtx_MINUS (DImode, operands[2], gen_rtx_ASHIFTRT (DImode, operands[1], GEN_INT (63))))); DONE; } [(set_attr "type" "alu_reg")] ) (define_insn "neg2" [(set (match_operand:GPI 0 "register_operand" "=r,w") (neg:GPI (match_operand:GPI 1 "register_operand" "r,w")))] "" "@ neg\\t%0, %1 neg\\t%0, %1" [(set_attr "type" "alu_reg, neon_neg") (set_attr "simd" "*,yes")] ) ;; zero_extend version of above (define_insn "*negsi2_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (neg:SI (match_operand:SI 1 "register_operand" "r"))))] "" "neg\\t%w0, %w1" [(set_attr "type" "alu_reg")] ) (define_insn "*ngc" [(set (match_operand:GPI 0 "register_operand" "=r") (minus:GPI (neg:GPI (ltu:GPI (reg:CC CC_REGNUM) (const_int 0))) (match_operand:GPI 1 "register_operand" "r")))] "" "ngc\\t%0, %1" [(set_attr "type" "adc_reg")] ) (define_insn "*ngcsi_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (minus:SI (neg:SI (ltu:SI (reg:CC CC_REGNUM) (const_int 0))) (match_operand:SI 1 "register_operand" "r"))))] "" "ngc\\t%w0, %w1" [(set_attr "type" "adc_reg")] ) (define_insn "*neg2_compare0" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (neg:GPI (match_operand:GPI 1 "register_operand" "r")) (const_int 0))) (set (match_operand:GPI 0 "register_operand" "=r") (neg:GPI (match_dup 1)))] "" "negs\\t%0, %1" [(set_attr "type" "alus_reg")] ) ;; zero_extend version of above (define_insn "*negsi2_compare0_uxtw" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (neg:SI (match_operand:SI 1 "register_operand" "r")) (const_int 0))) (set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (neg:SI (match_dup 1))))] "" "negs\\t%w0, %w1" [(set_attr "type" "alus_reg")] ) (define_insn "*neg_3_compare0" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (neg:GPI (ASHIFT:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_" "n"))) (const_int 0))) (set (match_operand:GPI 0 "register_operand" "=r") (neg:GPI (ASHIFT:GPI (match_dup 1) (match_dup 2))))] "" "negs\\t%0, %1, %2" [(set_attr "type" "alus_shift_imm")] ) (define_insn "*neg__2" [(set (match_operand:GPI 0 "register_operand" "=r") (neg:GPI (ASHIFT:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_" "n"))))] "" "neg\\t%0, %1, %2" [(set_attr "type" "alu_shift_imm")] ) ;; zero_extend version of above (define_insn "*neg__si2_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (neg:SI (ASHIFT:SI (match_operand:SI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_si" "n")))))] "" "neg\\t%w0, %w1, %2" [(set_attr "type" "alu_shift_imm")] ) (define_insn "*neg_mul_imm_2" [(set (match_operand:GPI 0 "register_operand" "=r") (neg:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_pwr_2_" "n"))))] "" "neg\\t%0, %1, lsl %p2" [(set_attr "type" "alu_shift_imm")] ) ;; zero_extend version of above (define_insn "*neg_mul_imm_si2_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (neg:SI (mult:SI (match_operand:SI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_pwr_2_si" "n")))))] "" "neg\\t%w0, %w1, lsl %p2" [(set_attr "type" "alu_shift_imm")] ) (define_insn "mul3" [(set (match_operand:GPI 0 "register_operand" "=r") (mult:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:GPI 2 "register_operand" "r")))] "" "mul\\t%0, %1, %2" [(set_attr "type" "mul")] ) ;; zero_extend version of above (define_insn "*mulsi3_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (mult:SI (match_operand:SI 1 "register_operand" "r") (match_operand:SI 2 "register_operand" "r"))))] "" "mul\\t%w0, %w1, %w2" [(set_attr "type" "mul")] ) (define_insn "*madd" [(set (match_operand:GPI 0 "register_operand" "=r") (plus:GPI (mult:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:GPI 2 "register_operand" "r")) (match_operand:GPI 3 "register_operand" "r")))] "" "madd\\t%0, %1, %2, %3" [(set_attr "type" "mla")] ) ;; zero_extend version of above (define_insn "*maddsi_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r") (match_operand:SI 2 "register_operand" "r")) (match_operand:SI 3 "register_operand" "r"))))] "" "madd\\t%w0, %w1, %w2, %w3" [(set_attr "type" "mla")] ) (define_insn "*msub" [(set (match_operand:GPI 0 "register_operand" "=r") (minus:GPI (match_operand:GPI 3 "register_operand" "r") (mult:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:GPI 2 "register_operand" "r"))))] "" "msub\\t%0, %1, %2, %3" [(set_attr "type" "mla")] ) ;; zero_extend version of above (define_insn "*msubsi_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (minus:SI (match_operand:SI 3 "register_operand" "r") (mult:SI (match_operand:SI 1 "register_operand" "r") (match_operand:SI 2 "register_operand" "r")))))] "" "msub\\t%w0, %w1, %w2, %w3" [(set_attr "type" "mla")] ) (define_insn "*mul_neg" [(set (match_operand:GPI 0 "register_operand" "=r") (mult:GPI (neg:GPI (match_operand:GPI 1 "register_operand" "r")) (match_operand:GPI 2 "register_operand" "r")))] "" "mneg\\t%0, %1, %2" [(set_attr "type" "mul")] ) ;; zero_extend version of above (define_insn "*mulsi_neg_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (mult:SI (neg:SI (match_operand:SI 1 "register_operand" "r")) (match_operand:SI 2 "register_operand" "r"))))] "" "mneg\\t%w0, %w1, %w2" [(set_attr "type" "mul")] ) (define_insn "mulsidi3" [(set (match_operand:DI 0 "register_operand" "=r") (mult:DI (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r")) (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))))] "" "mull\\t%0, %w1, %w2" [(set_attr "type" "mull")] ) (define_insn "maddsidi4" [(set (match_operand:DI 0 "register_operand" "=r") (plus:DI (mult:DI (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r")) (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))) (match_operand:DI 3 "register_operand" "r")))] "" "maddl\\t%0, %w1, %w2, %3" [(set_attr "type" "mlal")] ) (define_insn "msubsidi4" [(set (match_operand:DI 0 "register_operand" "=r") (minus:DI (match_operand:DI 3 "register_operand" "r") (mult:DI (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r")) (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r")))))] "" "msubl\\t%0, %w1, %w2, %3" [(set_attr "type" "mlal")] ) (define_insn "*mulsidi_neg" [(set (match_operand:DI 0 "register_operand" "=r") (mult:DI (neg:DI (ANY_EXTEND:DI (match_operand:SI 1 "register_operand" "r"))) (ANY_EXTEND:DI (match_operand:SI 2 "register_operand" "r"))))] "" "mnegl\\t%0, %w1, %w2" [(set_attr "type" "mull")] ) (define_insn "muldi3_highpart" [(set (match_operand:DI 0 "register_operand" "=r") (truncate:DI (lshiftrt:TI (mult:TI (ANY_EXTEND:TI (match_operand:DI 1 "register_operand" "r")) (ANY_EXTEND:TI (match_operand:DI 2 "register_operand" "r"))) (const_int 64))))] "" "mulh\\t%0, %1, %2" [(set_attr "type" "mull")] ) (define_insn "div3" [(set (match_operand:GPI 0 "register_operand" "=r") (ANY_DIV:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:GPI 2 "register_operand" "r")))] "" "div\\t%0, %1, %2" [(set_attr "type" "div")] ) ;; zero_extend version of above (define_insn "*divsi3_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (ANY_DIV:SI (match_operand:SI 1 "register_operand" "r") (match_operand:SI 2 "register_operand" "r"))))] "" "div\\t%w0, %w1, %w2" [(set_attr "type" "div")] ) ;; ------------------------------------------------------------------- ;; Comparison insns ;; ------------------------------------------------------------------- (define_insn "*cmp" [(set (reg:CC CC_REGNUM) (compare:CC (match_operand:GPI 0 "register_operand" "r,r,r") (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J")))] "" "@ cmp\\t%0, %1 cmp\\t%0, %1 cmn\\t%0, #%n1" [(set_attr "type" "alus_reg,alus_imm,alus_imm")] ) (define_insn "*cmp" [(set (reg:CCFP CC_REGNUM) (compare:CCFP (match_operand:GPF 0 "register_operand" "w,w") (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))] "TARGET_FLOAT" "@ fcmp\\t%0, #0.0 fcmp\\t%0, %1" [(set_attr "type" "fcmp")] ) (define_insn "*cmpe" [(set (reg:CCFPE CC_REGNUM) (compare:CCFPE (match_operand:GPF 0 "register_operand" "w,w") (match_operand:GPF 1 "aarch64_fp_compare_operand" "Y,w")))] "TARGET_FLOAT" "@ fcmpe\\t%0, #0.0 fcmpe\\t%0, %1" [(set_attr "type" "fcmp")] ) (define_insn "*cmp_swp__reg" [(set (reg:CC_SWP CC_REGNUM) (compare:CC_SWP (ASHIFT:GPI (match_operand:GPI 0 "register_operand" "r") (match_operand:QI 1 "aarch64_shift_imm_" "n")) (match_operand:GPI 2 "aarch64_reg_or_zero" "rZ")))] "" "cmp\\t%2, %0, %1" [(set_attr "type" "alus_shift_imm")] ) (define_insn "*cmp_swp__reg" [(set (reg:CC_SWP CC_REGNUM) (compare:CC_SWP (ANY_EXTEND:GPI (match_operand:ALLX 0 "register_operand" "r")) (match_operand:GPI 1 "register_operand" "r")))] "" "cmp\\t%1, %0, xt" [(set_attr "type" "alus_ext")] ) (define_insn "*cmp_swp__shft_" [(set (reg:CC_SWP CC_REGNUM) (compare:CC_SWP (ashift:GPI (ANY_EXTEND:GPI (match_operand:ALLX 0 "register_operand" "r")) (match_operand 1 "aarch64_imm3" "Ui3")) (match_operand:GPI 2 "register_operand" "r")))] "" "cmp\\t%2, %0, xt %1" [(set_attr "type" "alus_ext")] ) ;; ------------------------------------------------------------------- ;; Store-flag and conditional select insns ;; ------------------------------------------------------------------- (define_expand "cstore4" [(set (match_operand:SI 0 "register_operand" "") (match_operator:SI 1 "aarch64_comparison_operator" [(match_operand:GPI 2 "register_operand" "") (match_operand:GPI 3 "aarch64_plus_operand" "")]))] "" " operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2], operands[3]); operands[3] = const0_rtx; " ) (define_expand "cstore4" [(set (match_operand:SI 0 "register_operand" "") (match_operator:SI 1 "aarch64_comparison_operator" [(match_operand:GPF 2 "register_operand" "") (match_operand:GPF 3 "register_operand" "")]))] "" " operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2], operands[3]); operands[3] = const0_rtx; " ) (define_insn "*cstore_insn" [(set (match_operand:ALLI 0 "register_operand" "=r") (match_operator:ALLI 1 "aarch64_comparison_operator" [(match_operand 2 "cc_register" "") (const_int 0)]))] "" "cset\\t%0, %m1" [(set_attr "type" "csel")] ) ;; zero_extend version of the above (define_insn "*cstoresi_insn_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (match_operator:SI 1 "aarch64_comparison_operator" [(match_operand 2 "cc_register" "") (const_int 0)])))] "" "cset\\t%w0, %m1" [(set_attr "type" "csel")] ) (define_insn "cstore_neg" [(set (match_operand:ALLI 0 "register_operand" "=r") (neg:ALLI (match_operator:ALLI 1 "aarch64_comparison_operator" [(match_operand 2 "cc_register" "") (const_int 0)])))] "" "csetm\\t%0, %m1" [(set_attr "type" "csel")] ) ;; zero_extend version of the above (define_insn "*cstoresi_neg_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (neg:SI (match_operator:SI 1 "aarch64_comparison_operator" [(match_operand 2 "cc_register" "") (const_int 0)]))))] "" "csetm\\t%w0, %m1" [(set_attr "type" "csel")] ) (define_expand "cmov6" [(set (match_operand:GPI 0 "register_operand" "") (if_then_else:GPI (match_operator 1 "aarch64_comparison_operator" [(match_operand:GPI 2 "register_operand" "") (match_operand:GPI 3 "aarch64_plus_operand" "")]) (match_operand:GPI 4 "register_operand" "") (match_operand:GPI 5 "register_operand" "")))] "" " operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2], operands[3]); operands[3] = const0_rtx; " ) (define_expand "cmov6" [(set (match_operand:GPF 0 "register_operand" "") (if_then_else:GPF (match_operator 1 "aarch64_comparison_operator" [(match_operand:GPF 2 "register_operand" "") (match_operand:GPF 3 "register_operand" "")]) (match_operand:GPF 4 "register_operand" "") (match_operand:GPF 5 "register_operand" "")))] "" " operands[2] = aarch64_gen_compare_reg (GET_CODE (operands[1]), operands[2], operands[3]); operands[3] = const0_rtx; " ) (define_insn "*cmov_insn" [(set (match_operand:ALLI 0 "register_operand" "=r,r,r,r,r,r,r") (if_then_else:ALLI (match_operator 1 "aarch64_comparison_operator" [(match_operand 2 "cc_register" "") (const_int 0)]) (match_operand:ALLI 3 "aarch64_reg_zero_or_m1_or_1" "rZ,rZ,UsM,rZ,Ui1,UsM,Ui1") (match_operand:ALLI 4 "aarch64_reg_zero_or_m1_or_1" "rZ,UsM,rZ,Ui1,rZ,UsM,Ui1")))] "!((operands[3] == const1_rtx && operands[4] == constm1_rtx) || (operands[3] == constm1_rtx && operands[4] == const1_rtx))" ;; Final two alternatives should be unreachable, but included for completeness "@ csel\\t%0, %3, %4, %m1 csinv\\t%0, %3, zr, %m1 csinv\\t%0, %4, zr, %M1 csinc\\t%0, %3, zr, %m1 csinc\\t%0, %4, zr, %M1 mov\\t%0, -1 mov\\t%0, 1" [(set_attr "type" "csel")] ) ;; zero_extend version of above (define_insn "*cmovsi_insn_uxtw" [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r,r,r") (zero_extend:DI (if_then_else:SI (match_operator 1 "aarch64_comparison_operator" [(match_operand 2 "cc_register" "") (const_int 0)]) (match_operand:SI 3 "aarch64_reg_zero_or_m1_or_1" "rZ,rZ,UsM,rZ,Ui1,UsM,Ui1") (match_operand:SI 4 "aarch64_reg_zero_or_m1_or_1" "rZ,UsM,rZ,Ui1,rZ,UsM,Ui1"))))] "!((operands[3] == const1_rtx && operands[4] == constm1_rtx) || (operands[3] == constm1_rtx && operands[4] == const1_rtx))" ;; Final two alternatives should be unreachable, but included for completeness "@ csel\\t%w0, %w3, %w4, %m1 csinv\\t%w0, %w3, wzr, %m1 csinv\\t%w0, %w4, wzr, %M1 csinc\\t%w0, %w3, wzr, %m1 csinc\\t%w0, %w4, wzr, %M1 mov\\t%w0, -1 mov\\t%w0, 1" [(set_attr "type" "csel")] ) (define_insn "*cmov_insn" [(set (match_operand:GPF 0 "register_operand" "=w") (if_then_else:GPF (match_operator 1 "aarch64_comparison_operator" [(match_operand 2 "cc_register" "") (const_int 0)]) (match_operand:GPF 3 "register_operand" "w") (match_operand:GPF 4 "register_operand" "w")))] "TARGET_FLOAT" "fcsel\\t%0, %3, %4, %m1" [(set_attr "type" "fcsel")] ) (define_expand "movcc" [(set (match_operand:ALLI 0 "register_operand" "") (if_then_else:ALLI (match_operand 1 "aarch64_comparison_operator" "") (match_operand:ALLI 2 "register_operand" "") (match_operand:ALLI 3 "register_operand" "")))] "" { rtx ccreg; enum rtx_code code = GET_CODE (operands[1]); if (code == UNEQ || code == LTGT) FAIL; ccreg = aarch64_gen_compare_reg (code, XEXP (operands[1], 0), XEXP (operands[1], 1)); operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx); } ) (define_expand "movcc" [(set (match_operand:GPI 0 "register_operand" "") (if_then_else:GPI (match_operand 1 "aarch64_comparison_operator" "") (match_operand:GPF 2 "register_operand" "") (match_operand:GPF 3 "register_operand" "")))] "" { rtx ccreg; enum rtx_code code = GET_CODE (operands[1]); if (code == UNEQ || code == LTGT) FAIL; ccreg = aarch64_gen_compare_reg (code, XEXP (operands[1], 0), XEXP (operands[1], 1)); operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx); } ) (define_insn "*csinc2_insn" [(set (match_operand:GPI 0 "register_operand" "=r") (plus:GPI (match_operator:GPI 2 "aarch64_comparison_operator" [(match_operand:CC 3 "cc_register" "") (const_int 0)]) (match_operand:GPI 1 "register_operand" "r")))] "" "csinc\\t%0, %1, %1, %M2" [(set_attr "type" "csel")] ) (define_insn "csinc3_insn" [(set (match_operand:GPI 0 "register_operand" "=r") (if_then_else:GPI (match_operator:GPI 1 "aarch64_comparison_operator" [(match_operand:CC 2 "cc_register" "") (const_int 0)]) (plus:GPI (match_operand:GPI 3 "register_operand" "r") (const_int 1)) (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))] "" "csinc\\t%0, %4, %3, %M1" [(set_attr "type" "csel")] ) (define_insn "*csinv3_insn" [(set (match_operand:GPI 0 "register_operand" "=r") (if_then_else:GPI (match_operator:GPI 1 "aarch64_comparison_operator" [(match_operand:CC 2 "cc_register" "") (const_int 0)]) (not:GPI (match_operand:GPI 3 "register_operand" "r")) (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))] "" "csinv\\t%0, %4, %3, %M1" [(set_attr "type" "csel")] ) (define_insn "*csneg3_insn" [(set (match_operand:GPI 0 "register_operand" "=r") (if_then_else:GPI (match_operator:GPI 1 "aarch64_comparison_operator" [(match_operand:CC 2 "cc_register" "") (const_int 0)]) (neg:GPI (match_operand:GPI 3 "register_operand" "r")) (match_operand:GPI 4 "aarch64_reg_or_zero" "rZ")))] "" "csneg\\t%0, %4, %3, %M1" [(set_attr "type" "csel")] ) ;; ------------------------------------------------------------------- ;; Logical operations ;; ------------------------------------------------------------------- (define_insn "3" [(set (match_operand:GPI 0 "register_operand" "=r,rk") (LOGICAL:GPI (match_operand:GPI 1 "register_operand" "%r,r") (match_operand:GPI 2 "aarch64_logical_operand" "r,")))] "" "\\t%0, %1, %2" [(set_attr "type" "logic_reg,logic_imm")] ) ;; zero_extend version of above (define_insn "*si3_uxtw" [(set (match_operand:DI 0 "register_operand" "=r,rk") (zero_extend:DI (LOGICAL:SI (match_operand:SI 1 "register_operand" "%r,r") (match_operand:SI 2 "aarch64_logical_operand" "r,K"))))] "" "\\t%w0, %w1, %w2" [(set_attr "type" "logic_reg,logic_imm")] ) (define_insn "*and3_compare0" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (and:GPI (match_operand:GPI 1 "register_operand" "%r,r") (match_operand:GPI 2 "aarch64_logical_operand" "r,")) (const_int 0))) (set (match_operand:GPI 0 "register_operand" "=r,r") (and:GPI (match_dup 1) (match_dup 2)))] "" "ands\\t%0, %1, %2" [(set_attr "type" "logics_reg,logics_imm")] ) ;; zero_extend version of above (define_insn "*andsi3_compare0_uxtw" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (and:SI (match_operand:SI 1 "register_operand" "%r,r") (match_operand:SI 2 "aarch64_logical_operand" "r,K")) (const_int 0))) (set (match_operand:DI 0 "register_operand" "=r,r") (zero_extend:DI (and:SI (match_dup 1) (match_dup 2))))] "" "ands\\t%w0, %w1, %w2" [(set_attr "type" "logics_reg,logics_imm")] ) (define_insn "*and_3_compare0" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (and:GPI (SHIFT:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_" "n")) (match_operand:GPI 3 "register_operand" "r")) (const_int 0))) (set (match_operand:GPI 0 "register_operand" "=r") (and:GPI (SHIFT:GPI (match_dup 1) (match_dup 2)) (match_dup 3)))] "" "ands\\t%0, %3, %1, %2" [(set_attr "type" "logics_shift_imm")] ) ;; zero_extend version of above (define_insn "*and_si3_compare0_uxtw" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (and:SI (SHIFT:SI (match_operand:SI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_si" "n")) (match_operand:SI 3 "register_operand" "r")) (const_int 0))) (set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (and:SI (SHIFT:SI (match_dup 1) (match_dup 2)) (match_dup 3))))] "" "ands\\t%w0, %w3, %w1, %2" [(set_attr "type" "logics_shift_imm")] ) (define_insn "*_3" [(set (match_operand:GPI 0 "register_operand" "=r") (LOGICAL:GPI (SHIFT:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_" "n")) (match_operand:GPI 3 "register_operand" "r")))] "" "\\t%0, %3, %1, %2" [(set_attr "type" "logic_shift_imm")] ) ;; zero_extend version of above (define_insn "*_si3_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (LOGICAL:SI (SHIFT:SI (match_operand:SI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_si" "n")) (match_operand:SI 3 "register_operand" "r"))))] "" "\\t%w0, %w3, %w1, %2" [(set_attr "type" "logic_shift_imm")] ) (define_insn "one_cmpl2" [(set (match_operand:GPI 0 "register_operand" "=r") (not:GPI (match_operand:GPI 1 "register_operand" "r")))] "" "mvn\\t%0, %1" [(set_attr "type" "logic_reg")] ) (define_insn "*one_cmpl_2" [(set (match_operand:GPI 0 "register_operand" "=r") (not:GPI (SHIFT:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_" "n"))))] "" "mvn\\t%0, %1, %2" [(set_attr "type" "logic_shift_imm")] ) (define_insn "*_one_cmpl3" [(set (match_operand:GPI 0 "register_operand" "=r") (LOGICAL:GPI (not:GPI (match_operand:GPI 1 "register_operand" "r")) (match_operand:GPI 2 "register_operand" "r")))] "" "\\t%0, %2, %1" [(set_attr "type" "logic_reg")] ) (define_insn "*and_one_cmpl3_compare0" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (and:GPI (not:GPI (match_operand:GPI 1 "register_operand" "r")) (match_operand:GPI 2 "register_operand" "r")) (const_int 0))) (set (match_operand:GPI 0 "register_operand" "=r") (and:GPI (not:GPI (match_dup 1)) (match_dup 2)))] "" "bics\\t%0, %2, %1" [(set_attr "type" "logics_reg")] ) ;; zero_extend version of above (define_insn "*and_one_cmplsi3_compare0_uxtw" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (and:SI (not:SI (match_operand:SI 1 "register_operand" "r")) (match_operand:SI 2 "register_operand" "r")) (const_int 0))) (set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (and:SI (not:SI (match_dup 1)) (match_dup 2))))] "" "bics\\t%w0, %w2, %w1" [(set_attr "type" "logics_reg")] ) (define_insn "*_one_cmpl_3" [(set (match_operand:GPI 0 "register_operand" "=r") (LOGICAL:GPI (not:GPI (SHIFT:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_" "n"))) (match_operand:GPI 3 "register_operand" "r")))] "" "\\t%0, %3, %1, %2" [(set_attr "type" "logics_shift_imm")] ) (define_insn "*and_one_cmpl_3_compare0" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (and:GPI (not:GPI (SHIFT:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_" "n"))) (match_operand:GPI 3 "register_operand" "r")) (const_int 0))) (set (match_operand:GPI 0 "register_operand" "=r") (and:GPI (not:GPI (SHIFT:GPI (match_dup 1) (match_dup 2))) (match_dup 3)))] "" "bics\\t%0, %3, %1, %2" [(set_attr "type" "logics_shift_imm")] ) ;; zero_extend version of above (define_insn "*and_one_cmpl_si3_compare0_uxtw" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (and:SI (not:SI (SHIFT:SI (match_operand:SI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_shift_imm_si" "n"))) (match_operand:SI 3 "register_operand" "r")) (const_int 0))) (set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (and:SI (not:SI (SHIFT:SI (match_dup 1) (match_dup 2))) (match_dup 3))))] "" "bics\\t%w0, %w3, %w1, %2" [(set_attr "type" "logics_shift_imm")] ) (define_insn "clz2" [(set (match_operand:GPI 0 "register_operand" "=r") (clz:GPI (match_operand:GPI 1 "register_operand" "r")))] "" "clz\\t%0, %1" [(set_attr "type" "clz")] ) (define_expand "ffs2" [(match_operand:GPI 0 "register_operand") (match_operand:GPI 1 "register_operand")] "" { rtx ccreg = aarch64_gen_compare_reg (EQ, operands[1], const0_rtx); rtx x = gen_rtx_NE (VOIDmode, ccreg, const0_rtx); emit_insn (gen_rbit2 (operands[0], operands[1])); emit_insn (gen_clz2 (operands[0], operands[0])); emit_insn (gen_csinc3_insn (operands[0], x, ccreg, operands[0], const0_rtx)); DONE; } ) (define_insn "clrsb2" [(set (match_operand:GPI 0 "register_operand" "=r") (unspec:GPI [(match_operand:GPI 1 "register_operand" "r")] UNSPEC_CLS))] "" "cls\\t%0, %1" [(set_attr "type" "clz")] ) (define_insn "rbit2" [(set (match_operand:GPI 0 "register_operand" "=r") (unspec:GPI [(match_operand:GPI 1 "register_operand" "r")] UNSPEC_RBIT))] "" "rbit\\t%0, %1" [(set_attr "type" "rbit")] ) (define_expand "ctz2" [(match_operand:GPI 0 "register_operand") (match_operand:GPI 1 "register_operand")] "" { emit_insn (gen_rbit2 (operands[0], operands[1])); emit_insn (gen_clz2 (operands[0], operands[0])); DONE; } ) (define_insn "*and3nr_compare0" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (and:GPI (match_operand:GPI 0 "register_operand" "%r,r") (match_operand:GPI 1 "aarch64_logical_operand" "r,")) (const_int 0)))] "" "tst\\t%0, %1" [(set_attr "type" "logics_reg")] ) (define_insn "*and_3nr_compare0" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (and:GPI (SHIFT:GPI (match_operand:GPI 0 "register_operand" "r") (match_operand:QI 1 "aarch64_shift_imm_" "n")) (match_operand:GPI 2 "register_operand" "r")) (const_int 0)))] "" "tst\\t%2, %0, %1" [(set_attr "type" "logics_shift_imm")] ) ;; ------------------------------------------------------------------- ;; Shifts ;; ------------------------------------------------------------------- (define_expand "3" [(set (match_operand:GPI 0 "register_operand") (ASHIFT:GPI (match_operand:GPI 1 "register_operand") (match_operand:QI 2 "nonmemory_operand")))] "" { if (CONST_INT_P (operands[2])) { operands[2] = GEN_INT (INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1)); if (operands[2] == const0_rtx) { emit_insn (gen_mov (operands[0], operands[1])); DONE; } } } ) (define_expand "ashl3" [(set (match_operand:SHORT 0 "register_operand") (ashift:SHORT (match_operand:SHORT 1 "register_operand") (match_operand:QI 2 "nonmemory_operand")))] "" { if (CONST_INT_P (operands[2])) { operands[2] = GEN_INT (INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1)); if (operands[2] == const0_rtx) { emit_insn (gen_mov (operands[0], operands[1])); DONE; } } } ) (define_expand "rotr3" [(set (match_operand:GPI 0 "register_operand") (rotatert:GPI (match_operand:GPI 1 "register_operand") (match_operand:QI 2 "nonmemory_operand")))] "" { if (CONST_INT_P (operands[2])) { operands[2] = GEN_INT (INTVAL (operands[2]) & (GET_MODE_BITSIZE (mode) - 1)); if (operands[2] == const0_rtx) { emit_insn (gen_mov (operands[0], operands[1])); DONE; } } } ) (define_expand "rotl3" [(set (match_operand:GPI 0 "register_operand") (rotatert:GPI (match_operand:GPI 1 "register_operand") (match_operand:QI 2 "nonmemory_operand")))] "" { /* (SZ - cnt) % SZ == -cnt % SZ */ if (CONST_INT_P (operands[2])) { operands[2] = GEN_INT ((-INTVAL (operands[2])) & (GET_MODE_BITSIZE (mode) - 1)); if (operands[2] == const0_rtx) { emit_insn (gen_mov (operands[0], operands[1])); DONE; } } else operands[2] = expand_simple_unop (QImode, NEG, operands[2], NULL_RTX, 1); } ) ;; Logical left shift using SISD or Integer instruction (define_insn "*aarch64_ashl_sisd_or_int_3" [(set (match_operand:GPI 0 "register_operand" "=w,w,r") (ashift:GPI (match_operand:GPI 1 "register_operand" "w,w,r") (match_operand:QI 2 "aarch64_reg_or_shift_imm_" "Us,w,rUs")))] "" "@ shl\t%0, %1, %2 ushl\t%0, %1, %2 lsl\t%0, %1, %2" [(set_attr "simd" "yes,yes,no") (set_attr "type" "neon_shift_imm, neon_shift_reg,shift_reg")] ) ;; Logical right shift using SISD or Integer instruction (define_insn "*aarch64_lshr_sisd_or_int_3" [(set (match_operand:GPI 0 "register_operand" "=w,w,r") (lshiftrt:GPI (match_operand:GPI 1 "register_operand" "w,w,r") (match_operand:QI 2 "aarch64_reg_or_shift_imm_" "Us,w,rUs")))] "" "@ ushr\t%0, %1, %2 # lsr\t%0, %1, %2" [(set_attr "simd" "yes,yes,no") (set_attr "type" "neon_shift_imm,neon_shift_reg,shift_reg")] ) (define_split [(set (match_operand:DI 0 "aarch64_simd_register") (lshiftrt:DI (match_operand:DI 1 "aarch64_simd_register") (match_operand:QI 2 "aarch64_simd_register")))] "TARGET_SIMD && reload_completed" [(set (match_dup 2) (unspec:QI [(match_dup 2)] UNSPEC_SISD_NEG)) (set (match_dup 0) (unspec:DI [(match_dup 1) (match_dup 2)] UNSPEC_SISD_USHL))] "" ) (define_split [(set (match_operand:SI 0 "aarch64_simd_register") (lshiftrt:SI (match_operand:SI 1 "aarch64_simd_register") (match_operand:QI 2 "aarch64_simd_register")))] "TARGET_SIMD && reload_completed" [(set (match_dup 2) (unspec:QI [(match_dup 2)] UNSPEC_SISD_NEG)) (set (match_dup 0) (unspec:SI [(match_dup 1) (match_dup 2)] UNSPEC_USHL_2S))] "" ) ;; Arithmetic right shift using SISD or Integer instruction (define_insn "*aarch64_ashr_sisd_or_int_3" [(set (match_operand:GPI 0 "register_operand" "=w,&w,&w,r") (ashiftrt:GPI (match_operand:GPI 1 "register_operand" "w,w,w,r") (match_operand:QI 2 "aarch64_reg_or_shift_imm_di" "Us,w,0,rUs")))] "" "@ sshr\t%0, %1, %2 # # asr\t%0, %1, %2" [(set_attr "simd" "yes,yes,yes,no") (set_attr "type" "neon_shift_imm,neon_shift_reg,neon_shift_reg,shift_reg")] ) (define_split [(set (match_operand:DI 0 "aarch64_simd_register") (ashiftrt:DI (match_operand:DI 1 "aarch64_simd_register") (match_operand:QI 2 "aarch64_simd_register")))] "TARGET_SIMD && reload_completed" [(set (match_dup 3) (unspec:QI [(match_dup 2)] UNSPEC_SISD_NEG)) (set (match_dup 0) (unspec:DI [(match_dup 1) (match_dup 3)] UNSPEC_SISD_SSHL))] { operands[3] = gen_lowpart (QImode, operands[0]); } ) (define_split [(set (match_operand:SI 0 "aarch64_simd_register") (ashiftrt:SI (match_operand:SI 1 "aarch64_simd_register") (match_operand:QI 2 "aarch64_simd_register")))] "TARGET_SIMD && reload_completed" [(set (match_dup 3) (unspec:QI [(match_dup 2)] UNSPEC_SISD_NEG)) (set (match_dup 0) (unspec:SI [(match_dup 1) (match_dup 3)] UNSPEC_SSHL_2S))] { operands[3] = gen_lowpart (QImode, operands[0]); } ) (define_insn "*aarch64_sisd_ushl" [(set (match_operand:DI 0 "register_operand" "=w") (unspec:DI [(match_operand:DI 1 "register_operand" "w") (match_operand:QI 2 "register_operand" "w")] UNSPEC_SISD_USHL))] "TARGET_SIMD" "ushl\t%d0, %d1, %d2" [(set_attr "simd" "yes") (set_attr "type" "neon_shift_reg")] ) (define_insn "*aarch64_ushl_2s" [(set (match_operand:SI 0 "register_operand" "=w") (unspec:SI [(match_operand:SI 1 "register_operand" "w") (match_operand:QI 2 "register_operand" "w")] UNSPEC_USHL_2S))] "TARGET_SIMD" "ushl\t%0.2s, %1.2s, %2.2s" [(set_attr "simd" "yes") (set_attr "type" "neon_shift_reg")] ) (define_insn "*aarch64_sisd_sshl" [(set (match_operand:DI 0 "register_operand" "=w") (unspec:DI [(match_operand:DI 1 "register_operand" "w") (match_operand:QI 2 "register_operand" "w")] UNSPEC_SISD_SSHL))] "TARGET_SIMD" "sshl\t%d0, %d1, %d2" [(set_attr "simd" "yes") (set_attr "type" "neon_shift_reg")] ) (define_insn "*aarch64_sshl_2s" [(set (match_operand:SI 0 "register_operand" "=w") (unspec:SI [(match_operand:SI 1 "register_operand" "w") (match_operand:QI 2 "register_operand" "w")] UNSPEC_SSHL_2S))] "TARGET_SIMD" "sshl\t%0.2s, %1.2s, %2.2s" [(set_attr "simd" "yes") (set_attr "type" "neon_shift_reg")] ) (define_insn "*aarch64_sisd_neg_qi" [(set (match_operand:QI 0 "register_operand" "=w") (unspec:QI [(match_operand:QI 1 "register_operand" "w")] UNSPEC_SISD_NEG))] "TARGET_SIMD" "neg\t%d0, %d1" [(set_attr "simd" "yes") (set_attr "type" "neon_neg")] ) ;; Rotate right (define_insn "*ror3_insn" [(set (match_operand:GPI 0 "register_operand" "=r") (rotatert:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_reg_or_shift_imm_" "rUs")))] "" "ror\\t%0, %1, %2" [(set_attr "type" "shift_reg")] ) ;; zero_extend version of above (define_insn "*si3_insn_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (SHIFT:SI (match_operand:SI 1 "register_operand" "r") (match_operand:QI 2 "aarch64_reg_or_shift_imm_si" "rUss"))))] "" "\\t%w0, %w1, %w2" [(set_attr "type" "shift_reg")] ) (define_insn "*ashl3_insn" [(set (match_operand:SHORT 0 "register_operand" "=r") (ashift:SHORT (match_operand:SHORT 1 "register_operand" "r") (match_operand:QI 2 "aarch64_reg_or_shift_imm_si" "rUss")))] "" "lsl\\t%0, %1, %2" [(set_attr "type" "shift_reg")] ) (define_insn "*3_insn" [(set (match_operand:SHORT 0 "register_operand" "=r") (ASHIFT:SHORT (match_operand:SHORT 1 "register_operand" "r") (match_operand 2 "const_int_operand" "n")))] "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)" { operands[3] = GEN_INT ( - UINTVAL (operands[2])); return "\t%w0, %w1, %2, %3"; } [(set_attr "type" "bfm")] ) (define_insn "*extr5_insn" [(set (match_operand:GPI 0 "register_operand" "=r") (ior:GPI (ashift:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand 3 "const_int_operand" "n")) (lshiftrt:GPI (match_operand:GPI 2 "register_operand" "r") (match_operand 4 "const_int_operand" "n"))))] "UINTVAL (operands[3]) < GET_MODE_BITSIZE (mode) && (UINTVAL (operands[3]) + UINTVAL (operands[4]) == GET_MODE_BITSIZE (mode))" "extr\\t%0, %1, %2, %4" [(set_attr "type" "shift_imm")] ) ;; zero_extend version of the above (define_insn "*extrsi5_insn_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (ior:SI (ashift:SI (match_operand:SI 1 "register_operand" "r") (match_operand 3 "const_int_operand" "n")) (lshiftrt:SI (match_operand:SI 2 "register_operand" "r") (match_operand 4 "const_int_operand" "n")))))] "UINTVAL (operands[3]) < 32 && (UINTVAL (operands[3]) + UINTVAL (operands[4]) == 32)" "extr\\t%w0, %w1, %w2, %4" [(set_attr "type" "shift_imm")] ) (define_insn "*ror3_insn" [(set (match_operand:GPI 0 "register_operand" "=r") (rotate:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand 2 "const_int_operand" "n")))] "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)" { operands[3] = GEN_INT ( - UINTVAL (operands[2])); return "ror\\t%0, %1, %3"; } [(set_attr "type" "shift_imm")] ) ;; zero_extend version of the above (define_insn "*rorsi3_insn_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (rotate:SI (match_operand:SI 1 "register_operand" "r") (match_operand 2 "const_int_operand" "n"))))] "UINTVAL (operands[2]) < 32" { operands[3] = GEN_INT (32 - UINTVAL (operands[2])); return "ror\\t%w0, %w1, %3"; } [(set_attr "type" "shift_imm")] ) (define_insn "*_ashl" [(set (match_operand:GPI 0 "register_operand" "=r") (ANY_EXTEND:GPI (ashift:SHORT (match_operand:SHORT 1 "register_operand" "r") (match_operand 2 "const_int_operand" "n"))))] "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)" { operands[3] = GEN_INT ( - UINTVAL (operands[2])); return "bfiz\t%0, %1, %2, %3"; } [(set_attr "type" "bfm")] ) (define_insn "*zero_extend_lshr" [(set (match_operand:GPI 0 "register_operand" "=r") (zero_extend:GPI (lshiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r") (match_operand 2 "const_int_operand" "n"))))] "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)" { operands[3] = GEN_INT ( - UINTVAL (operands[2])); return "ubfx\t%0, %1, %2, %3"; } [(set_attr "type" "bfm")] ) (define_insn "*extend_ashr" [(set (match_operand:GPI 0 "register_operand" "=r") (sign_extend:GPI (ashiftrt:SHORT (match_operand:SHORT 1 "register_operand" "r") (match_operand 2 "const_int_operand" "n"))))] "UINTVAL (operands[2]) < GET_MODE_BITSIZE (mode)" { operands[3] = GEN_INT ( - UINTVAL (operands[2])); return "sbfx\\t%0, %1, %2, %3"; } [(set_attr "type" "bfm")] ) ;; ------------------------------------------------------------------- ;; Bitfields ;; ------------------------------------------------------------------- (define_expand "" [(set (match_operand:DI 0 "register_operand" "=r") (ANY_EXTRACT:DI (match_operand:DI 1 "register_operand" "r") (match_operand 2 "const_int_operand" "n") (match_operand 3 "const_int_operand" "n")))] "" "" ) (define_insn "*" [(set (match_operand:GPI 0 "register_operand" "=r") (ANY_EXTRACT:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand 2 "const_int_operand" "n") (match_operand 3 "const_int_operand" "n")))] "" "bfx\\t%0, %1, %3, %2" [(set_attr "type" "bfm")] ) ;; Bitfield Insert (insv) (define_expand "insv" [(set (zero_extract:GPI (match_operand:GPI 0 "register_operand") (match_operand 1 "const_int_operand") (match_operand 2 "const_int_operand")) (match_operand:GPI 3 "general_operand"))] "" { unsigned HOST_WIDE_INT width = UINTVAL (operands[1]); unsigned HOST_WIDE_INT pos = UINTVAL (operands[2]); rtx value = operands[3]; if (width == 0 || (pos + width) > GET_MODE_BITSIZE (mode)) FAIL; if (CONST_INT_P (value)) { unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT)1 << width) - 1; /* Prefer AND/OR for inserting all zeros or all ones. */ if ((UINTVAL (value) & mask) == 0 || (UINTVAL (value) & mask) == mask) FAIL; /* 16-bit aligned 16-bit wide insert is handled by insv_imm. */ if (width == 16 && (pos % 16) == 0) DONE; } operands[3] = force_reg (mode, value); }) (define_insn "*insv_reg" [(set (zero_extract:GPI (match_operand:GPI 0 "register_operand" "+r") (match_operand 1 "const_int_operand" "n") (match_operand 2 "const_int_operand" "n")) (match_operand:GPI 3 "register_operand" "r"))] "!(UINTVAL (operands[1]) == 0 || (UINTVAL (operands[2]) + UINTVAL (operands[1]) > GET_MODE_BITSIZE (mode)))" "bfi\\t%0, %3, %2, %1" [(set_attr "type" "bfm")] ) (define_insn "*extr_insv_lower_reg" [(set (zero_extract:GPI (match_operand:GPI 0 "register_operand" "+r") (match_operand 1 "const_int_operand" "n") (const_int 0)) (zero_extract:GPI (match_operand:GPI 2 "register_operand" "+r") (match_dup 1) (match_operand 3 "const_int_operand" "n")))] "!(UINTVAL (operands[1]) == 0 || (UINTVAL (operands[3]) + UINTVAL (operands[1]) > GET_MODE_BITSIZE (mode)))" "bfxil\\t%0, %2, %3, %1" [(set_attr "type" "bfm")] ) (define_insn "*_shft_" [(set (match_operand:GPI 0 "register_operand" "=r") (ashift:GPI (ANY_EXTEND:GPI (match_operand:ALLX 1 "register_operand" "r")) (match_operand 2 "const_int_operand" "n")))] "UINTVAL (operands[2]) < " { operands[3] = ( <= ( - UINTVAL (operands[2]))) ? GEN_INT () : GEN_INT ( - UINTVAL (operands[2])); return "bfiz\t%0, %1, %2, %3"; } [(set_attr "type" "bfm")] ) ;; XXX We should match (any_extend (ashift)) here, like (and (ashift)) below (define_insn "*andim_ashift_bfiz" [(set (match_operand:GPI 0 "register_operand" "=r") (and:GPI (ashift:GPI (match_operand:GPI 1 "register_operand" "r") (match_operand 2 "const_int_operand" "n")) (match_operand 3 "const_int_operand" "n")))] "exact_log2 ((INTVAL (operands[3]) >> INTVAL (operands[2])) + 1) >= 0 && (INTVAL (operands[3]) & ((1 << INTVAL (operands[2])) - 1)) == 0" "ubfiz\\t%0, %1, %2, %P3" [(set_attr "type" "bfm")] ) (define_insn "bswap2" [(set (match_operand:GPI 0 "register_operand" "=r") (bswap:GPI (match_operand:GPI 1 "register_operand" "r")))] "" "rev\\t%0, %1" [(set_attr "type" "rev")] ) (define_insn "bswaphi2" [(set (match_operand:HI 0 "register_operand" "=r") (bswap:HI (match_operand:HI 1 "register_operand" "r")))] "" "rev16\\t%w0, %w1" [(set_attr "type" "rev")] ) ;; zero_extend version of above (define_insn "*bswapsi2_uxtw" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (bswap:SI (match_operand:SI 1 "register_operand" "r"))))] "" "rev\\t%w0, %w1" [(set_attr "type" "rev")] ) ;; ------------------------------------------------------------------- ;; Floating-point intrinsics ;; ------------------------------------------------------------------- ;; frint floating-point round to integral standard patterns. ;; Expands to btrunc, ceil, floor, nearbyint, rint, round. (define_insn "2" [(set (match_operand:GPF 0 "register_operand" "=w") (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")] FRINT))] "TARGET_FLOAT" "frint\\t%0, %1" [(set_attr "type" "f_rint")] ) ;; frcvt floating-point round to integer and convert standard patterns. ;; Expands to lbtrunc, lceil, lfloor, lround. (define_insn "l2" [(set (match_operand:GPI 0 "register_operand" "=r") (FIXUORS:GPI (unspec:GPF [(match_operand:GPF 1 "register_operand" "w")] FCVT)))] "TARGET_FLOAT" "fcvt\\t%0, %1" [(set_attr "type" "f_cvtf2i")] ) ;; fma - no throw (define_insn "fma4" [(set (match_operand:GPF 0 "register_operand" "=w") (fma:GPF (match_operand:GPF 1 "register_operand" "w") (match_operand:GPF 2 "register_operand" "w") (match_operand:GPF 3 "register_operand" "w")))] "TARGET_FLOAT" "fmadd\\t%0, %1, %2, %3" [(set_attr "type" "fmac")] ) (define_insn "fnma4" [(set (match_operand:GPF 0 "register_operand" "=w") (fma:GPF (neg:GPF (match_operand:GPF 1 "register_operand" "w")) (match_operand:GPF 2 "register_operand" "w") (match_operand:GPF 3 "register_operand" "w")))] "TARGET_FLOAT" "fmsub\\t%0, %1, %2, %3" [(set_attr "type" "fmac")] ) (define_insn "fms4" [(set (match_operand:GPF 0 "register_operand" "=w") (fma:GPF (match_operand:GPF 1 "register_operand" "w") (match_operand:GPF 2 "register_operand" "w") (neg:GPF (match_operand:GPF 3 "register_operand" "w"))))] "TARGET_FLOAT" "fnmsub\\t%0, %1, %2, %3" [(set_attr "type" "fmac")] ) (define_insn "fnms4" [(set (match_operand:GPF 0 "register_operand" "=w") (fma:GPF (neg:GPF (match_operand:GPF 1 "register_operand" "w")) (match_operand:GPF 2 "register_operand" "w") (neg:GPF (match_operand:GPF 3 "register_operand" "w"))))] "TARGET_FLOAT" "fnmadd\\t%0, %1, %2, %3" [(set_attr "type" "fmac")] ) ;; If signed zeros are ignored, -(a * b + c) = -a * b - c. (define_insn "*fnmadd4" [(set (match_operand:GPF 0 "register_operand" "=w") (neg:GPF (fma:GPF (match_operand:GPF 1 "register_operand" "w") (match_operand:GPF 2 "register_operand" "w") (match_operand:GPF 3 "register_operand" "w"))))] "!HONOR_SIGNED_ZEROS (mode) && TARGET_FLOAT" "fnmadd\\t%0, %1, %2, %3" [(set_attr "type" "fmac")] ) ;; ------------------------------------------------------------------- ;; Floating-point conversions ;; ------------------------------------------------------------------- (define_insn "extendsfdf2" [(set (match_operand:DF 0 "register_operand" "=w") (float_extend:DF (match_operand:SF 1 "register_operand" "w")))] "TARGET_FLOAT" "fcvt\\t%d0, %s1" [(set_attr "type" "f_cvt")] ) (define_insn "truncdfsf2" [(set (match_operand:SF 0 "register_operand" "=w") (float_truncate:SF (match_operand:DF 1 "register_operand" "w")))] "TARGET_FLOAT" "fcvt\\t%s0, %d1" [(set_attr "type" "f_cvt")] ) (define_insn "fix_trunc2" [(set (match_operand:GPI 0 "register_operand" "=r") (fix:GPI (match_operand:GPF 1 "register_operand" "w")))] "TARGET_FLOAT" "fcvtzs\\t%0, %1" [(set_attr "type" "f_cvtf2i")] ) (define_insn "fixuns_trunc2" [(set (match_operand:GPI 0 "register_operand" "=r") (unsigned_fix:GPI (match_operand:GPF 1 "register_operand" "w")))] "TARGET_FLOAT" "fcvtzu\\t%0, %1" [(set_attr "type" "f_cvtf2i")] ) (define_insn "float2" [(set (match_operand:GPF 0 "register_operand" "=w") (float:GPF (match_operand:GPI 1 "register_operand" "r")))] "TARGET_FLOAT" "scvtf\\t%0, %1" [(set_attr "type" "f_cvti2f")] ) (define_insn "floatuns2" [(set (match_operand:GPF 0 "register_operand" "=w") (unsigned_float:GPF (match_operand:GPI 1 "register_operand" "r")))] "TARGET_FLOAT" "ucvtf\\t%0, %1" [(set_attr "type" "f_cvt")] ) ;; ------------------------------------------------------------------- ;; Floating-point arithmetic ;; ------------------------------------------------------------------- (define_insn "add3" [(set (match_operand:GPF 0 "register_operand" "=w") (plus:GPF (match_operand:GPF 1 "register_operand" "w") (match_operand:GPF 2 "register_operand" "w")))] "TARGET_FLOAT" "fadd\\t%0, %1, %2" [(set_attr "type" "fadd")] ) (define_insn "sub3" [(set (match_operand:GPF 0 "register_operand" "=w") (minus:GPF (match_operand:GPF 1 "register_operand" "w") (match_operand:GPF 2 "register_operand" "w")))] "TARGET_FLOAT" "fsub\\t%0, %1, %2" [(set_attr "type" "fadd")] ) (define_insn "mul3" [(set (match_operand:GPF 0 "register_operand" "=w") (mult:GPF (match_operand:GPF 1 "register_operand" "w") (match_operand:GPF 2 "register_operand" "w")))] "TARGET_FLOAT" "fmul\\t%0, %1, %2" [(set_attr "type" "fmul")] ) (define_insn "*fnmul3" [(set (match_operand:GPF 0 "register_operand" "=w") (mult:GPF (neg:GPF (match_operand:GPF 1 "register_operand" "w")) (match_operand:GPF 2 "register_operand" "w")))] "TARGET_FLOAT" "fnmul\\t%0, %1, %2" [(set_attr "type" "fmul")] ) (define_insn "div3" [(set (match_operand:GPF 0 "register_operand" "=w") (div:GPF (match_operand:GPF 1 "register_operand" "w") (match_operand:GPF 2 "register_operand" "w")))] "TARGET_FLOAT" "fdiv\\t%0, %1, %2" [(set_attr "type" "fdiv")] ) (define_insn "neg2" [(set (match_operand:GPF 0 "register_operand" "=w") (neg:GPF (match_operand:GPF 1 "register_operand" "w")))] "TARGET_FLOAT" "fneg\\t%0, %1" [(set_attr "type" "ffarith")] ) (define_insn "sqrt2" [(set (match_operand:GPF 0 "register_operand" "=w") (sqrt:GPF (match_operand:GPF 1 "register_operand" "w")))] "TARGET_FLOAT" "fsqrt\\t%0, %1" [(set_attr "type" "fsqrt")] ) (define_insn "abs2" [(set (match_operand:GPF 0 "register_operand" "=w") (abs:GPF (match_operand:GPF 1 "register_operand" "w")))] "TARGET_FLOAT" "fabs\\t%0, %1" [(set_attr "type" "ffarith")] ) ;; Given that smax/smin do not specify the result when either input is NaN, ;; we could use either FMAXNM or FMAX for smax, and either FMINNM or FMIN ;; for smin. (define_insn "smax3" [(set (match_operand:GPF 0 "register_operand" "=w") (smax:GPF (match_operand:GPF 1 "register_operand" "w") (match_operand:GPF 2 "register_operand" "w")))] "TARGET_FLOAT" "fmaxnm\\t%0, %1, %2" [(set_attr "type" "f_minmax")] ) (define_insn "smin3" [(set (match_operand:GPF 0 "register_operand" "=w") (smin:GPF (match_operand:GPF 1 "register_operand" "w") (match_operand:GPF 2 "register_operand" "w")))] "TARGET_FLOAT" "fminnm\\t%0, %1, %2" [(set_attr "type" "f_minmax")] ) ;; ------------------------------------------------------------------- ;; Reload support ;; ------------------------------------------------------------------- (define_expand "aarch64_reload_mov" [(set (match_operand:TX 0 "register_operand" "=w") (match_operand:TX 1 "register_operand" "w")) (clobber (match_operand:DI 2 "register_operand" "=&r")) ] "" { rtx op0 = simplify_gen_subreg (TImode, operands[0], mode, 0); rtx op1 = simplify_gen_subreg (TImode, operands[1], mode, 0); gen_aarch64_movtilow_tilow (op0, op1); gen_aarch64_movdi_tihigh (operands[2], op1); gen_aarch64_movtihigh_di (op0, operands[2]); DONE; } ) ;; The following secondary reload helpers patterns are invoked ;; after or during reload as we don't want these patterns to start ;; kicking in during the combiner. (define_insn "aarch64_movdi_low" [(set (match_operand:DI 0 "register_operand" "=r") (truncate:DI (match_operand:TX 1 "register_operand" "w")))] "reload_completed || reload_in_progress" "fmov\\t%x0, %d1" [(set_attr "type" "f_mrc") (set_attr "length" "4") ]) (define_insn "aarch64_movdi_high" [(set (match_operand:DI 0 "register_operand" "=r") (truncate:DI (lshiftrt:TX (match_operand:TX 1 "register_operand" "w") (const_int 64))))] "reload_completed || reload_in_progress" "fmov\\t%x0, %1.d[1]" [(set_attr "type" "f_mrc") (set_attr "length" "4") ]) (define_insn "aarch64_movhigh_di" [(set (zero_extract:TX (match_operand:TX 0 "register_operand" "+w") (const_int 64) (const_int 64)) (zero_extend:TX (match_operand:DI 1 "register_operand" "r")))] "reload_completed || reload_in_progress" "fmov\\t%0.d[1], %x1" [(set_attr "type" "f_mcr") (set_attr "length" "4") ]) (define_insn "aarch64_movlow_di" [(set (match_operand:TX 0 "register_operand" "=w") (zero_extend:TX (match_operand:DI 1 "register_operand" "r")))] "reload_completed || reload_in_progress" "fmov\\t%d0, %x1" [(set_attr "type" "f_mcr") (set_attr "length" "4") ]) (define_insn "aarch64_movtilow_tilow" [(set (match_operand:TI 0 "register_operand" "=w") (zero_extend:TI (truncate:DI (match_operand:TI 1 "register_operand" "w"))))] "reload_completed || reload_in_progress" "fmov\\t%d0, %d1" [(set_attr "type" "f_mcr") (set_attr "length" "4") ]) ;; There is a deliberate reason why the parameters of high and lo_sum's ;; don't have modes for ADRP and ADD instructions. This is to allow high ;; and lo_sum's to be used with the labels defining the jump tables in ;; rodata section. (define_expand "add_losym" [(set (match_operand 0 "register_operand" "=r") (lo_sum (match_operand 1 "register_operand" "r") (match_operand 2 "aarch64_valid_symref" "S")))] "" { enum machine_mode mode = GET_MODE (operands[0]); emit_insn ((mode == DImode ? gen_add_losym_di : gen_add_losym_si) (operands[0], operands[1], operands[2])); DONE; }) (define_insn "add_losym_" [(set (match_operand:P 0 "register_operand" "=r") (lo_sum:P (match_operand:P 1 "register_operand" "r") (match_operand 2 "aarch64_valid_symref" "S")))] "" "add\\t%0, %1, :lo12:%a2" [(set_attr "type" "alu_reg")] ) (define_insn "ldr_got_small_" [(set (match_operand:PTR 0 "register_operand" "=r") (unspec:PTR [(mem:PTR (lo_sum:PTR (match_operand:PTR 1 "register_operand" "r") (match_operand:PTR 2 "aarch64_valid_symref" "S")))] UNSPEC_GOTSMALLPIC))] "" "ldr\\t%0, [%1, #:got_lo12:%a2]" [(set_attr "type" "load1")] ) (define_insn "ldr_got_small_sidi" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (unspec:SI [(mem:SI (lo_sum:DI (match_operand:DI 1 "register_operand" "r") (match_operand:DI 2 "aarch64_valid_symref" "S")))] UNSPEC_GOTSMALLPIC)))] "TARGET_ILP32" "ldr\\t%w0, [%1, #:got_lo12:%a2]" [(set_attr "type" "load1")] ) (define_insn "ldr_got_tiny" [(set (match_operand:DI 0 "register_operand" "=r") (unspec:DI [(match_operand:DI 1 "aarch64_valid_symref" "S")] UNSPEC_GOTTINYPIC))] "" "ldr\\t%0, %L1" [(set_attr "type" "load1")] ) (define_insn "aarch64_load_tp_hard" [(set (match_operand:DI 0 "register_operand" "=r") (unspec:DI [(const_int 0)] UNSPEC_TLS))] "" "mrs\\t%0, tpidr_el0" [(set_attr "type" "mrs")] ) ;; The TLS ABI specifically requires that the compiler does not schedule ;; instructions in the TLS stubs, in order to enable linker relaxation. ;; Therefore we treat the stubs as an atomic sequence. (define_expand "tlsgd_small" [(parallel [(set (match_operand 0 "register_operand" "") (call (mem:DI (match_dup 2)) (const_int 1))) (unspec:DI [(match_operand:DI 1 "aarch64_valid_symref" "")] UNSPEC_GOTSMALLTLS) (clobber (reg:DI LR_REGNUM))])] "" { operands[2] = aarch64_tls_get_addr (); }) (define_insn "*tlsgd_small" [(set (match_operand 0 "register_operand" "") (call (mem:DI (match_operand:DI 2 "" "")) (const_int 1))) (unspec:DI [(match_operand:DI 1 "aarch64_valid_symref" "S")] UNSPEC_GOTSMALLTLS) (clobber (reg:DI LR_REGNUM)) ] "" "adrp\\tx0, %A1\;add\\tx0, x0, %L1\;bl\\t%2\;nop" [(set_attr "type" "call") (set_attr "length" "16")]) (define_insn "tlsie_small" [(set (match_operand:DI 0 "register_operand" "=r") (unspec:DI [(match_operand:DI 1 "aarch64_tls_ie_symref" "S")] UNSPEC_GOTSMALLTLS))] "" "adrp\\t%0, %A1\;ldr\\t%0, [%0, #%L1]" [(set_attr "type" "load1") (set_attr "length" "8")] ) (define_insn "tlsle_small" [(set (match_operand:DI 0 "register_operand" "=r") (unspec:DI [(match_operand:DI 1 "register_operand" "r") (match_operand:DI 2 "aarch64_tls_le_symref" "S")] UNSPEC_GOTSMALLTLS))] "" "add\\t%0, %1, #%G2\;add\\t%0, %0, #%L2" [(set_attr "type" "alu_reg") (set_attr "length" "8")] ) (define_insn "tlsdesc_small" [(set (reg:DI R0_REGNUM) (unspec:DI [(match_operand:DI 0 "aarch64_valid_symref" "S")] UNSPEC_TLSDESC)) (clobber (reg:DI LR_REGNUM)) (clobber (reg:CC CC_REGNUM)) (clobber (match_scratch:DI 1 "=r"))] "TARGET_TLS_DESC" "adrp\\tx0, %A0\;ldr\\t%1, [x0, #%L0]\;add\\tx0, x0, %L0\;.tlsdesccall\\t%0\;blr\\t%1" [(set_attr "type" "call") (set_attr "length" "16")]) (define_insn "stack_tie" [(set (mem:BLK (scratch)) (unspec:BLK [(match_operand:DI 0 "register_operand" "rk") (match_operand:DI 1 "register_operand" "rk")] UNSPEC_PRLG_STK))] "" "" [(set_attr "length" "0")] ) ;; Named pattern for expanding thread pointer reference. (define_expand "get_thread_pointerdi" [(match_operand:DI 0 "register_operand" "=r")] "" { rtx tmp = aarch64_load_tp (operands[0]); if (tmp != operands[0]) emit_move_insn (operands[0], tmp); DONE; }) ;; AdvSIMD Stuff (include "aarch64-simd.md") ;; Atomic Operations (include "atomics.md")