aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.9/gcc/config/mips
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.9/gcc/config/mips')
-rw-r--r--gcc-4.9/gcc/config/mips/10000.md251
-rw-r--r--gcc-4.9/gcc/config/mips/20kc.md284
-rw-r--r--gcc-4.9/gcc/config/mips/24k.md545
-rw-r--r--gcc-4.9/gcc/config/mips/3000.md71
-rw-r--r--gcc-4.9/gcc/config/mips/4000.md32
-rw-r--r--gcc-4.9/gcc/config/mips/4100.md51
-rw-r--r--gcc-4.9/gcc/config/mips/4130.md153
-rw-r--r--gcc-4.9/gcc/config/mips/4300.md85
-rw-r--r--gcc-4.9/gcc/config/mips/4600.md130
-rw-r--r--gcc-4.9/gcc/config/mips/4k.md153
-rw-r--r--gcc-4.9/gcc/config/mips/5000.md80
-rw-r--r--gcc-4.9/gcc/config/mips/5400.md184
-rw-r--r--gcc-4.9/gcc/config/mips/5500.md227
-rw-r--r--gcc-4.9/gcc/config/mips/5k.md229
-rw-r--r--gcc-4.9/gcc/config/mips/6000.md56
-rw-r--r--gcc-4.9/gcc/config/mips/7000.md214
-rw-r--r--gcc-4.9/gcc/config/mips/74k.md501
-rw-r--r--gcc-4.9/gcc/config/mips/9000.md151
-rw-r--r--gcc-4.9/gcc/config/mips/constraints.md363
-rw-r--r--gcc-4.9/gcc/config/mips/driver-native.c89
-rw-r--r--gcc-4.9/gcc/config/mips/elf.h50
-rw-r--r--gcc-4.9/gcc/config/mips/elfoabi.h39
-rw-r--r--gcc-4.9/gcc/config/mips/elforion.h20
-rw-r--r--gcc-4.9/gcc/config/mips/generic.md121
-rwxr-xr-xgcc-4.9/gcc/config/mips/genopt.sh123
-rw-r--r--gcc-4.9/gcc/config/mips/gnu-user.h139
-rw-r--r--gcc-4.9/gcc/config/mips/gnu-user64.h52
-rw-r--r--gcc-4.9/gcc/config/mips/linux-common.h64
-rw-r--r--gcc-4.9/gcc/config/mips/linux.h25
-rw-r--r--gcc-4.9/gcc/config/mips/linux64.h44
-rw-r--r--gcc-4.9/gcc/config/mips/loongson.h690
-rw-r--r--gcc-4.9/gcc/config/mips/loongson.md939
-rw-r--r--gcc-4.9/gcc/config/mips/loongson2ef.md252
-rw-r--r--gcc-4.9/gcc/config/mips/loongson3a.md137
-rw-r--r--gcc-4.9/gcc/config/mips/micromips.md138
-rw-r--r--gcc-4.9/gcc/config/mips/mips-cpus.def154
-rw-r--r--gcc-4.9/gcc/config/mips/mips-dsp.md1205
-rw-r--r--gcc-4.9/gcc/config/mips/mips-dspr2.md632
-rw-r--r--gcc-4.9/gcc/config/mips/mips-fixed.md156
-rw-r--r--gcc-4.9/gcc/config/mips/mips-ftypes.def129
-rw-r--r--gcc-4.9/gcc/config/mips/mips-modes.def48
-rw-r--r--gcc-4.9/gcc/config/mips/mips-opts.h50
-rw-r--r--gcc-4.9/gcc/config/mips/mips-protos.h362
-rw-r--r--gcc-4.9/gcc/config/mips/mips-ps-3d.md764
-rw-r--r--gcc-4.9/gcc/config/mips/mips-tables.opt644
-rw-r--r--gcc-4.9/gcc/config/mips/mips.c19139
-rw-r--r--gcc-4.9/gcc/config/mips/mips.h3005
-rw-r--r--gcc-4.9/gcc/config/mips/mips.md7190
-rw-r--r--gcc-4.9/gcc/config/mips/mips.opt404
-rw-r--r--gcc-4.9/gcc/config/mips/mti-elf.h42
-rw-r--r--gcc-4.9/gcc/config/mips/mti-linux.h46
-rw-r--r--gcc-4.9/gcc/config/mips/n32-elf.h35
-rw-r--r--gcc-4.9/gcc/config/mips/netbsd.h179
-rw-r--r--gcc-4.9/gcc/config/mips/octeon.md136
-rw-r--r--gcc-4.9/gcc/config/mips/predicates.md494
-rw-r--r--gcc-4.9/gcc/config/mips/r3900.h39
-rw-r--r--gcc-4.9/gcc/config/mips/rtems.h34
-rw-r--r--gcc-4.9/gcc/config/mips/sb1.md573
-rw-r--r--gcc-4.9/gcc/config/mips/sde.h103
-rw-r--r--gcc-4.9/gcc/config/mips/sde.opt28
-rw-r--r--gcc-4.9/gcc/config/mips/sdemtk.h102
-rw-r--r--gcc-4.9/gcc/config/mips/sr71k.md337
-rw-r--r--gcc-4.9/gcc/config/mips/st.h30
-rw-r--r--gcc-4.9/gcc/config/mips/sync.md716
-rw-r--r--gcc-4.9/gcc/config/mips/t-elf23
-rw-r--r--gcc-4.9/gcc/config/mips/t-irix64
-rw-r--r--gcc-4.9/gcc/config/mips/t-isa326433
-rw-r--r--gcc-4.9/gcc/config/mips/t-linux6426
-rw-r--r--gcc-4.9/gcc/config/mips/t-mips22
-rw-r--r--gcc-4.9/gcc/config/mips/t-mti-elf50
-rw-r--r--gcc-4.9/gcc/config/mips/t-mti-linux50
-rw-r--r--gcc-4.9/gcc/config/mips/t-r390023
-rw-r--r--gcc-4.9/gcc/config/mips/t-rtems34
-rw-r--r--gcc-4.9/gcc/config/mips/t-sb162
-rw-r--r--gcc-4.9/gcc/config/mips/t-sde37
-rw-r--r--gcc-4.9/gcc/config/mips/t-sdemtk40
-rw-r--r--gcc-4.9/gcc/config/mips/t-sr71k21
-rw-r--r--gcc-4.9/gcc/config/mips/t-st30
-rw-r--r--gcc-4.9/gcc/config/mips/t-vr106
-rw-r--r--gcc-4.9/gcc/config/mips/t-vxworks35
-rw-r--r--gcc-4.9/gcc/config/mips/vr.h58
-rw-r--r--gcc-4.9/gcc/config/mips/vxworks.h78
-rw-r--r--gcc-4.9/gcc/config/mips/x-native3
-rw-r--r--gcc-4.9/gcc/config/mips/xlp.md213
-rw-r--r--gcc-4.9/gcc/config/mips/xlr.md94
85 files changed, 44500 insertions, 0 deletions
diff --git a/gcc-4.9/gcc/config/mips/10000.md b/gcc-4.9/gcc/config/mips/10000.md
new file mode 100644
index 000000000..74dd3eca0
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/10000.md
@@ -0,0 +1,251 @@
+;; DFA-based pipeline description for the VR1x000.
+;; Copyright (C) 2005-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; R12K/R14K/R16K are derivatives of R10K, thus copy its description
+;; until specific tuning for each is added.
+
+;; R10000 has an int queue, fp queue, address queue.
+;; The int queue feeds ALU1 and ALU2.
+;; The fp queue feeds the fp-adder and fp-multiplier.
+;; The addr queue feeds the Load/Store unit.
+;;
+;; However, we define the fp-adder and fp-multiplier as
+;; separate automatons, because the fp-multiplier is
+;; divided into fp-multiplier, fp-division, and
+;; fp-squareroot units, all of which share the same
+;; issue and completion logic, yet can operate in
+;; parallel.
+;;
+;; This is based on the model described in the R10K Manual
+;; and it helps to reduce the size of the automata.
+(define_automaton "r10k_a_int, r10k_a_fpadder, r10k_a_addr,
+ r10k_a_fpmpy, r10k_a_fpdiv, r10k_a_fpsqrt")
+
+(define_cpu_unit "r10k_alu1" "r10k_a_int")
+(define_cpu_unit "r10k_alu2" "r10k_a_int")
+(define_cpu_unit "r10k_fpadd" "r10k_a_fpadder")
+(define_cpu_unit "r10k_fpmpy" "r10k_a_fpmpy")
+(define_cpu_unit "r10k_fpdiv" "r10k_a_fpdiv")
+(define_cpu_unit "r10k_fpsqrt" "r10k_a_fpsqrt")
+(define_cpu_unit "r10k_loadstore" "r10k_a_addr")
+
+
+;; R10k Loads and Stores.
+(define_insn_reservation "r10k_load" 2
+ (and (eq_attr "cpu" "r10000")
+ (eq_attr "type" "load,prefetch,prefetchx"))
+ "r10k_loadstore")
+
+(define_insn_reservation "r10k_store" 0
+ (and (eq_attr "cpu" "r10000")
+ (eq_attr "type" "store,fpstore,fpidxstore"))
+ "r10k_loadstore")
+
+(define_insn_reservation "r10k_fpload" 3
+ (and (eq_attr "cpu" "r10000")
+ (eq_attr "type" "fpload,fpidxload"))
+ "r10k_loadstore")
+
+
+;; Integer add/sub + logic ops, and mt hi/lo can be done by alu1 or alu2.
+;; Miscellaneous arith goes here too (this is a guess).
+(define_insn_reservation "r10k_arith" 1
+ (and (eq_attr "cpu" "r10000")
+ (eq_attr "type" "arith,mthi,mtlo,slt,clz,const,nop,trap,logical"))
+ "r10k_alu1 | r10k_alu2")
+
+;; We treat mfhilo differently, because we need to know when
+;; it's HI and when it's LO.
+(define_insn_reservation "r10k_mfhi" 1
+ (and (eq_attr "cpu" "r10000")
+ (eq_attr "type" "mfhi"))
+ "r10k_alu1 | r10k_alu2")
+
+(define_insn_reservation "r10k_mflo" 1
+ (and (eq_attr "cpu" "r10000")
+ (eq_attr "type" "mflo"))
+ "r10k_alu1 | r10k_alu2")
+
+
+;; ALU1 handles shifts, branch eval, and condmove.
+;;
+;; Brancher is separate, but part of ALU1, but can only
+;; do one branch per cycle (is this even implementable?).
+;;
+;; Unsure if the brancher handles jumps and calls as well, but since
+;; they're related, we'll add them here for now.
+(define_insn_reservation "r10k_brancher" 1
+ (and (eq_attr "cpu" "r10000")
+ (eq_attr "type" "shift,branch,jump,call"))
+ "r10k_alu1")
+
+(define_insn_reservation "r10k_int_cmove" 1
+ (and (eq_attr "cpu" "r10000")
+ (and (eq_attr "type" "condmove")
+ (eq_attr "mode" "SI,DI")))
+ "r10k_alu1")
+
+
+;; Coprocessor Moves.
+;; mtc1/dmtc1 are handled by ALU1.
+;; mfc1/dmfc1 are handled by the fp-multiplier.
+(define_insn_reservation "r10k_mt_xfer" 3
+ (and (eq_attr "cpu" "r10000")
+ (eq_attr "type" "mtc"))
+ "r10k_alu1")
+
+(define_insn_reservation "r10k_mf_xfer" 2
+ (and (eq_attr "cpu" "r10000")
+ (eq_attr "type" "mfc"))
+ "r10k_fpmpy")
+
+
+;; Only ALU2 does int multiplications and divisions.
+;;
+;; According to the Vr10000 series user manual,
+;; integer mult and div insns can be issued one
+;; cycle earlier if using register Lo. We model
+;; this by using the Lo value by default, as it
+;; is the more common value, and use a bypass
+;; for the Hi value when needed.
+;;
+;; Also of note, There are different latencies
+;; for MULT/DMULT (Lo 5/Hi 6) and MULTU/DMULTU (Lo 6/Hi 7).
+;; However, gcc does not have separate types
+;; for these insns. Thus to strike a balance,
+;; we use the Hi latency value for imul
+;; operations until the imul type can be split.
+(define_insn_reservation "r10k_imul_single" 6
+ (and (eq_attr "cpu" "r10000")
+ (and (eq_attr "type" "imul,imul3")
+ (eq_attr "mode" "SI")))
+ "r10k_alu2 * 6")
+
+(define_insn_reservation "r10k_imul_double" 10
+ (and (eq_attr "cpu" "r10000")
+ (and (eq_attr "type" "imul,imul3")
+ (eq_attr "mode" "DI")))
+ "r10k_alu2 * 10")
+
+;; Divides keep ALU2 busy.
+(define_insn_reservation "r10k_idiv_single" 34
+ (and (eq_attr "cpu" "r10000")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "SI")))
+ "r10k_alu2 * 35")
+
+(define_insn_reservation "r10k_idiv_double" 66
+ (and (eq_attr "cpu" "r10000")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "r10k_alu2 * 67")
+
+(define_bypass 35 "r10k_idiv_single" "r10k_mfhi")
+(define_bypass 67 "r10k_idiv_double" "r10k_mfhi")
+
+
+;; Floating point add/sub, mul, abs value, neg, comp, & moves.
+(define_insn_reservation "r10k_fp_miscadd" 2
+ (and (eq_attr "cpu" "r10000")
+ (eq_attr "type" "fadd,fabs,fneg,fcmp"))
+ "r10k_fpadd")
+
+(define_insn_reservation "r10k_fp_miscmul" 2
+ (and (eq_attr "cpu" "r10000")
+ (eq_attr "type" "fmul,fmove"))
+ "r10k_fpmpy")
+
+(define_insn_reservation "r10k_fp_cmove" 2
+ (and (eq_attr "cpu" "r10000")
+ (and (eq_attr "type" "condmove")
+ (eq_attr "mode" "SF,DF")))
+ "r10k_fpmpy")
+
+
+;; The fcvt.s.[wl] insn has latency 4, repeat 2.
+;; All other fcvt insns have latency 2, repeat 1.
+(define_insn_reservation "r10k_fcvt_single" 4
+ (and (eq_attr "cpu" "r10000")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "cnv_mode" "I2S")))
+ "r10k_fpadd * 2")
+
+(define_insn_reservation "r10k_fcvt_other" 2
+ (and (eq_attr "cpu" "r10000")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "cnv_mode" "!I2S")))
+ "r10k_fpadd")
+
+
+;; Run the fmadd insn through fp-adder first, then fp-multiplier.
+;;
+;; The latency for fmadd is 2 cycles if the result is used
+;; by another fmadd instruction.
+(define_insn_reservation "r10k_fmadd" 4
+ (and (eq_attr "cpu" "r10000")
+ (eq_attr "type" "fmadd"))
+ "r10k_fpadd, r10k_fpmpy")
+
+(define_bypass 2 "r10k_fmadd" "r10k_fmadd")
+
+
+;; Floating point Divisions & square roots.
+(define_insn_reservation "r10k_fdiv_single" 12
+ (and (eq_attr "cpu" "r10000")
+ (and (eq_attr "type" "fdiv,frdiv")
+ (eq_attr "mode" "SF")))
+ "r10k_fpdiv * 14")
+
+(define_insn_reservation "r10k_fdiv_double" 19
+ (and (eq_attr "cpu" "r10000")
+ (and (eq_attr "type" "fdiv,frdiv")
+ (eq_attr "mode" "DF")))
+ "r10k_fpdiv * 21")
+
+(define_insn_reservation "r10k_fsqrt_single" 18
+ (and (eq_attr "cpu" "r10000")
+ (and (eq_attr "type" "fsqrt")
+ (eq_attr "mode" "SF")))
+ "r10k_fpsqrt * 20")
+
+(define_insn_reservation "r10k_fsqrt_double" 33
+ (and (eq_attr "cpu" "r10000")
+ (and (eq_attr "type" "fsqrt")
+ (eq_attr "mode" "DF")))
+ "r10k_fpsqrt * 35")
+
+(define_insn_reservation "r10k_frsqrt_single" 30
+ (and (eq_attr "cpu" "r10000")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "SF")))
+ "r10k_fpsqrt * 20")
+
+(define_insn_reservation "r10k_frsqrt_double" 52
+ (and (eq_attr "cpu" "r10000")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "DF")))
+ "r10k_fpsqrt * 35")
+
+
+;; Handle unknown/multi insns here (this is a guess).
+(define_insn_reservation "r10k_unknown" 1
+ (and (eq_attr "cpu" "r10000")
+ (eq_attr "type" "unknown,multi,atomic,syncloop"))
+ "r10k_alu1 + r10k_alu2")
diff --git a/gcc-4.9/gcc/config/mips/20kc.md b/gcc-4.9/gcc/config/mips/20kc.md
new file mode 100644
index 000000000..c0c3feb35
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/20kc.md
@@ -0,0 +1,284 @@
+;; Copyright (C) 2007-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>. */
+;;
+;; .........................
+;;
+;; DFA-based pipeline description for MIPS64 model R20Kc.
+;; Contributed by Jason Eckhardt (jle@cygnus.com).
+;;
+;; The R20Kc is a dual-issue processor that can generally bundle
+;; instructions as follows:
+;; 1. integer with integer
+;; 2. integer with fp
+;; 3. fp with fpload/fpstore
+;;
+;; Of course, there are various restrictions.
+;; Reference:
+;; "Ruby (R20K) Technical Specification Rev. 1.2, December 28, 1999."
+;;
+;; .........................
+
+;; Use three automata to isolate long latency operations, reducing space.
+(define_automaton "r20kc_other, r20kc_fdiv, r20kc_idiv")
+
+;;
+;; Describe the resources.
+;;
+
+;; Global.
+(define_cpu_unit "r20kc_iss0, r20kc_iss1" "r20kc_other")
+
+;; Integer execution unit (pipeline A).
+(define_cpu_unit "r20kc_ixua_addsub_agen" "r20kc_other")
+(define_cpu_unit "r20kc_ixua_shift" "r20kc_other")
+
+(exclusion_set "r20kc_ixua_addsub_agen" "r20kc_ixua_shift")
+
+;; Integer execution unit (pipeline B).
+(define_cpu_unit "r20kc_ixub_addsub" "r20kc_other")
+(define_cpu_unit "r20kc_ixub_branch" "r20kc_other")
+(define_cpu_unit "r20kc_ixub_mpydiv" "r20kc_other")
+(define_cpu_unit "r20kc_ixub_mpydiv_iter" "r20kc_idiv")
+
+(exclusion_set "r20kc_ixub_addsub" "r20kc_ixub_branch, r20kc_ixub_mpydiv")
+(exclusion_set "r20kc_ixub_branch" "r20kc_ixub_mpydiv")
+
+;; Cache / memory interface.
+(define_cpu_unit "r20kc_cache" "r20kc_other")
+
+;; Floating-point unit.
+(define_cpu_unit "r20kc_fpu_add" "r20kc_other")
+(define_cpu_unit "r20kc_fpu_mpy" "r20kc_other")
+(define_cpu_unit "r20kc_fpu_mpy_iter" "r20kc_fdiv")
+(define_cpu_unit "r20kc_fpu_divsqrt" "r20kc_other")
+(define_cpu_unit "r20kc_fpu_divsqrt_iter" "r20kc_fdiv")
+
+(exclusion_set "r20kc_fpu_add" "r20kc_fpu_mpy, r20kc_fpu_divsqrt")
+(exclusion_set "r20kc_fpu_mpy" "r20kc_fpu_divsqrt")
+
+;; After branch any insn can not be issued.
+(absence_set "r20kc_iss0,r20kc_iss1" "r20kc_ixub_branch")
+
+;;
+;; Define reservations for unit name mnemonics or combinations.
+;;
+
+(define_reservation "r20kc_iss"
+ "r20kc_iss0|r20kc_iss1")
+(define_reservation "r20kc_single_dispatch"
+ "r20kc_iss0+r20kc_iss1")
+(define_reservation "r20kc_iaddsub"
+ "r20kc_iss+(r20kc_ixua_addsub_agen|r20kc_ixub_addsub)")
+(define_reservation "r20kc_ishift"
+ "r20kc_iss+r20kc_ixua_shift")
+(define_reservation "r20kc_fpmove"
+ "r20kc_iss+r20kc_ixua_addsub_agen")
+(define_reservation "r20kc_imem"
+ "r20kc_iss+r20kc_ixua_addsub_agen+r20kc_cache")
+(define_reservation "r20kc_icache"
+ "r20kc_cache")
+(define_reservation "r20kc_impydiv"
+ "r20kc_iss+r20kc_ixub_mpydiv")
+(define_reservation "r20kc_impydiv_iter"
+ "r20kc_ixub_mpydiv_iter")
+(define_reservation "r20kc_ibranch"
+ "r20kc_iss+r20kc_ixub_branch")
+
+(define_reservation "r20kc_fpadd"
+ "r20kc_iss+r20kc_fpu_add")
+(define_reservation "r20kc_fpmpy"
+ "r20kc_iss+r20kc_fpu_mpy")
+(define_reservation "r20kc_fpmpy_iter"
+ "r20kc_fpu_mpy_iter")
+(define_reservation "r20kc_fpdivsqrt"
+ "r20kc_iss+r20kc_fpu_divsqrt")
+(define_reservation "r20kc_fpdivsqrt_iter"
+ "r20kc_fpu_divsqrt_iter")
+
+;;
+;; Describe instruction reservations for integer operations.
+;;
+
+;; Conditional moves always force single-dispatch.
+(define_insn_reservation "r20kc_cond_move_int" 1
+ (and (eq_attr "cpu" "20kc")
+ (and (eq_attr "type" "condmove")
+ (eq_attr "mode" "!SF,DF")))
+ "r20kc_single_dispatch")
+
+(define_insn_reservation "r20kc_cond_move_fp" 4
+ (and (eq_attr "cpu" "20kc")
+ (and (eq_attr "type" "condmove")
+ (eq_attr "mode" "SF,DF")))
+ "r20kc_single_dispatch")
+
+(define_insn_reservation "r20kc_int_other" 1
+ (and (eq_attr "cpu" "20kc")
+ (eq_attr "type" "move,arith,const,nop"))
+ "r20kc_iaddsub")
+
+;; Shifts can only execute on ixu pipeline A.
+(define_insn_reservation "r20kc_int_shift" 1
+ (and (eq_attr "cpu" "20kc")
+ (eq_attr "type" "shift"))
+ "r20kc_ishift")
+
+(define_insn_reservation "r20kc_ld" 2
+ (and (eq_attr "cpu" "20kc")
+ (eq_attr "type" "load,prefetch,prefetchx"))
+ "r20kc_imem")
+
+
+;; A load immediately following a store will stall, so
+;; say that a store uses the cache for an extra cycle.
+(define_insn_reservation "r20kc_st" 2
+ (and (eq_attr "cpu" "20kc")
+ (eq_attr "type" "store"))
+ "r20kc_imem,r20kc_icache")
+
+(define_insn_reservation "r20kc_fld" 3
+ (and (eq_attr "cpu" "20kc")
+ (eq_attr "type" "fpload"))
+ "r20kc_imem")
+
+(define_insn_reservation "r20kc_ffst" 3
+ (and (eq_attr "cpu" "20kc")
+ (eq_attr "type" "fpstore"))
+ "r20kc_imem,r20kc_icache*2")
+
+;; Integer divide latency is between 13 and 42 cycles for DIV[U] and between
+;; 13 and 72 cycles for DDIV[U]. This depends on the value of the inputs
+;; so we just choose the worst case latency.
+(define_insn_reservation "r20kc_idiv_si" 42
+ (and (eq_attr "cpu" "20kc")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "SI")))
+ "r20kc_impydiv+(r20kc_impydiv_iter*42)")
+
+(define_insn_reservation "r20kc_idiv_di" 72
+ (and (eq_attr "cpu" "20kc")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "r20kc_impydiv+(r20kc_impydiv_iter*72)")
+
+;; Integer multiply latency is 4 or 7 cycles for word and double-word
+;; respectively.
+(define_insn_reservation "r20kc_impy_si" 4
+ (and (eq_attr "cpu" "20kc")
+ (and (eq_attr "type" "imadd,imul,imul3")
+ (eq_attr "mode" "SI")))
+ "r20kc_impydiv+(r20kc_impydiv_iter*2)")
+
+(define_insn_reservation "r20kc_impy_di" 7
+ (and (eq_attr "cpu" "20kc")
+ (and (eq_attr "type" "imadd,imul,imul3")
+ (eq_attr "mode" "DI")))
+ "r20kc_impydiv+(r20kc_impydiv_iter*7)")
+
+;; Move to/from HI/LO.
+;; Moving to HI/LO has a 3 cycle latency while moving from only has a 1
+;; cycle latency. Repeat rate is 3 for both.
+(define_insn_reservation "r20kc_imthilo" 3
+ (and (eq_attr "cpu" "20kc")
+ (eq_attr "type" "mthi,mtlo"))
+ "r20kc_impydiv+(r20kc_impydiv_iter*3)")
+
+(define_insn_reservation "r20kc_imfhilo" 1
+ (and (eq_attr "cpu" "20kc")
+ (eq_attr "type" "mfhi,mflo"))
+ "r20kc_impydiv+(r20kc_impydiv_iter*3)")
+
+;; Move to fp coprocessor.
+(define_insn_reservation "r20kc_ixfer_mt" 3
+ (and (eq_attr "cpu" "20kc")
+ (eq_attr "type" "mtc"))
+ "r20kc_fpmove")
+
+;; Move from fp coprocessor.
+(define_insn_reservation "r20kc_ixfer_mf" 2
+ (and (eq_attr "cpu" "20kc")
+ (eq_attr "type" "mfc"))
+ "r20kc_fpmove")
+
+;; Assume branch predicted correctly.
+(define_insn_reservation "r20kc_ibr" 1
+ (and (eq_attr "cpu" "20kc")
+ (eq_attr "type" "branch,jump,call"))
+ "r20kc_ibranch")
+
+;;
+;; Describe instruction reservations for the floating-point operations.
+;;
+(define_insn_reservation "r20kc_fp_other" 4
+ (and (eq_attr "cpu" "20kc")
+ (eq_attr "type" "fmove,fadd,fabs,fneg,fcmp"))
+ "r20kc_fpadd")
+
+(define_insn_reservation "r20kc_fp_cvt_a" 4
+ (and (eq_attr "cpu" "20kc")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "cnv_mode" "I2S,I2D,S2D")))
+ "r20kc_fpadd")
+
+(define_insn_reservation "r20kc_fp_cvt_b" 5
+ (and (eq_attr "cpu" "20kc")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "cnv_mode" "D2S,S2I")))
+ "r20kc_fpadd")
+
+(define_insn_reservation "r20kc_fp_divsqrt_df" 32
+ (and (eq_attr "cpu" "20kc")
+ (and (eq_attr "type" "fdiv,fsqrt")
+ (eq_attr "mode" "DF")))
+ "r20kc_fpdivsqrt+(r20kc_fpdivsqrt_iter*32)")
+
+(define_insn_reservation "r20kc_fp_divsqrt_sf" 17
+ (and (eq_attr "cpu" "20kc")
+ (and (eq_attr "type" "fdiv,fsqrt")
+ (eq_attr "mode" "SF")))
+ "r20kc_fpdivsqrt+(r20kc_fpdivsqrt_iter*17)")
+
+(define_insn_reservation "r20kc_fp_rsqrt_df" 35
+ (and (eq_attr "cpu" "20kc")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "DF")))
+ "r20kc_fpdivsqrt+(r20kc_fpdivsqrt_iter*35)")
+
+(define_insn_reservation "r20kc_fp_rsqrt_sf" 17
+ (and (eq_attr "cpu" "20kc")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "SF")))
+ "r20kc_fpdivsqrt+(r20kc_fpdivsqrt_iter*17)")
+
+(define_insn_reservation "r20kc_fp_mpy_sf" 4
+ (and (eq_attr "cpu" "20kc")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "r20kc_fpmpy+r20kc_fpmpy_iter")
+
+(define_insn_reservation "r20kc_fp_mpy_df" 5
+ (and (eq_attr "cpu" "20kc")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "r20kc_fpmpy+(r20kc_fpmpy_iter*2)")
+
+;; Force single-dispatch for unknown or multi.
+(define_insn_reservation "r20kc_unknown" 1
+ (and (eq_attr "cpu" "20kc")
+ (eq_attr "type" "unknown,multi,atomic,syncloop"))
+ "r20kc_single_dispatch")
diff --git a/gcc-4.9/gcc/config/mips/24k.md b/gcc-4.9/gcc/config/mips/24k.md
new file mode 100644
index 000000000..9ed83dd96
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/24k.md
@@ -0,0 +1,545 @@
+;; DFA-based pipeline descriptions for MIPS Technologies 24K core.
+;; Contributed by Chao-ying Fu (fu@mips.com), Nigel Stephens (nigel@mips.com)
+;; and David Ung (davidu@mips.com)
+;;
+;; The 24kf2_1 is a single-issue processor with a half-clocked fpu.
+;; The 24kf1_1 is 24k with 1:1 clocked fpu.
+;;
+;; References:
+;; "MIPS32 24K Processor Core Family Software User's Manual, Rev 3.04."
+;;
+;; Copyright (C) 2005-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "r24k_cpu, r24k_mdu, r24k_fpu")
+
+;; Integer execution unit.
+(define_cpu_unit "r24k_iss" "r24k_cpu")
+(define_cpu_unit "r24k_ixu_arith" "r24k_cpu")
+(define_cpu_unit "r24k_mul3a" "r24k_mdu")
+(define_cpu_unit "r24k_mul3b" "r24k_mdu")
+(define_cpu_unit "r24k_mul3c" "r24k_mdu")
+
+;; --------------------------------------------------------------
+;; Producers
+;; --------------------------------------------------------------
+
+;; 1. Loads: lb, lbu, lh, lhu, ll, lw, lwl, lwr, lwpc, lwxs
+(define_insn_reservation "r24k_int_load" 2
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "load"))
+ "r24k_iss+r24k_ixu_arith")
+
+
+;; 2. Arithmetic: add, addi, addiu, addiupc, addu, and, andi, clo, clz,
+;; ext, ins, lui, movn, movz, nor, or, ori, rotr, rotrv, seb, seh, sll,
+;; sllv, slt, slti, sltiu, sltu, sra, srav, srl, srlv, sub, subu, wsbh,
+;; xor, xori
+;; (movn/movz is not matched, we'll need to split condmov to
+;; differentiate between integer/float moves)
+(define_insn_reservation "r24k_int_arith" 1
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "arith,const,logical,move,nop,shift,signext,slt"))
+ "r24k_iss+r24k_ixu_arith")
+
+
+;; 3. Links: bgezal, bgezall, bltzal, bltzall, jal, jalr, jalx
+;; 3a. jr/jalr consumer
+(define_insn_reservation "r24k_int_jump" 1
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "call,jump"))
+ "r24k_iss+r24k_ixu_arith")
+
+;; 3b. branch consumer
+(define_insn_reservation "r24k_int_branch" 1
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "branch"))
+ "r24k_iss+r24k_ixu_arith")
+
+
+;; 4. MDU: fully pipelined multiplier
+;; mult - delivers result to hi/lo in 1 cycle (pipelined)
+(define_insn_reservation "r24k_int_mult" 1
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "imul"))
+ "r24k_iss+(r24k_mul3a|r24k_mul3b|r24k_mul3c)")
+
+;; madd, msub - delivers result to hi/lo in 1 cycle (pipelined)
+(define_insn_reservation "r24k_int_madd" 1
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "imadd"))
+ "r24k_iss+(r24k_mul3a|r24k_mul3b|r24k_mul3c)")
+
+;; mul - delivers result to gpr in 5 cycles
+(define_insn_reservation "r24k_int_mul3" 5
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "imul3"))
+ "r24k_iss+(r24k_mul3a|r24k_mul3b|r24k_mul3c)*5")
+
+;; mfhi, mflo, mflhxu - deliver result to gpr in 5 cycles
+(define_insn_reservation "r24k_int_mfhilo" 5
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "mfhi,mflo"))
+ "r24k_iss+(r24k_mul3a|r24k_mul3b|r24k_mul3c)")
+
+;; mthi, mtlo, mtlhx - deliver result to hi/lo, thence madd, handled as bypass
+(define_insn_reservation "r24k_int_mthilo" 1
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "mthi,mtlo"))
+ "r24k_iss+(r24k_mul3a|r24k_mul3b|r24k_mul3c)")
+
+;; div - default to 36 cycles for 32bit operands. Faster for 24bit, 16bit and
+;; 8bit, but is tricky to identify.
+(define_insn_reservation "r24k_int_div" 36
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "idiv"))
+ "r24k_iss+(r24k_mul3a+r24k_mul3b+r24k_mul3c)*36")
+
+
+;; 5. Cop: cfc1, di, ei, mfc0, mtc0
+;; (Disabled until we add proper cop0 support)
+;;(define_insn_reservation "r24k_int_cop" 3
+;; (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+;; (eq_attr "type" "cop0"))
+;; "r24k_iss+r24k_ixu_arith")
+
+
+;; 6. Store
+(define_insn_reservation "r24k_int_store" 1
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "store"))
+ "r24k_iss+r24k_ixu_arith")
+
+
+;; 7. Multiple instructions
+(define_insn_reservation "r24k_int_multi" 1
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "multi"))
+ "r24k_iss+r24k_ixu_arith+r24k_fpu_arith+(r24k_mul3a+r24k_mul3b+r24k_mul3c)")
+
+
+;; 8. Unknowns - Currently these include blockage, consttable and alignment
+;; rtls. They do not really affect scheduling latency, (blockage affects
+;; scheduling via log links, but not used here).
+(define_insn_reservation "r24k_int_unknown" 0
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "unknown,atomic,syncloop"))
+ "r24k_iss")
+
+
+;; 9. Prefetch
+(define_insn_reservation "r24k_int_prefetch" 1
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "prefetch,prefetchx"))
+ "r24k_iss+r24k_ixu_arith")
+
+
+;; --------------------------------------------------------------
+;; Bypass to Consumer
+;; --------------------------------------------------------------
+
+;; load->next use : 2 cycles (Default)
+;; load->load base: 3 cycles
+;; load->store base: 3 cycles
+;; load->prefetch: 3 cycles
+(define_bypass 3 "r24k_int_load" "r24k_int_load")
+(define_bypass 3 "r24k_int_load" "r24k_int_store" "!mips_store_data_bypass_p")
+(define_bypass 3 "r24k_int_load" "r24k_int_prefetch")
+
+;; arith->next use : 1 cycles (Default)
+;; arith->load base: 2 cycles
+;; arith->store base: 2 cycles
+;; arith->prefetch: 2 cycles
+(define_bypass 2 "r24k_int_arith" "r24k_int_load")
+(define_bypass 2 "r24k_int_arith" "r24k_int_store" "!mips_store_data_bypass_p")
+(define_bypass 2 "r24k_int_arith" "r24k_int_prefetch")
+
+;; mul3->next use : 5 cycles (default)
+;; mul3->l/s base : 6 cycles
+;; mul3->prefetch : 6 cycles
+(define_bypass 6 "r24k_int_mul3" "r24k_int_load")
+(define_bypass 6 "r24k_int_mul3" "r24k_int_store" "!mips_store_data_bypass_p")
+(define_bypass 6 "r24k_int_mul3" "r24k_int_prefetch")
+
+;; mul3->madd/msub : 1 cycle
+(define_bypass 1 "r24k_int_mul3" "r24k_int_madd" "mips_linked_madd_p")
+
+;; mfhilo->next use : 5 cycles (default)
+;; mfhilo->l/s base : 6 cycles
+;; mfhilo->prefetch : 6 cycles
+;; mthilo->madd/msub : 2 cycle (only for mthi/lo not mfhi/lo)
+(define_bypass 6 "r24k_int_mfhilo" "r24k_int_load")
+(define_bypass 6 "r24k_int_mfhilo" "r24k_int_store"
+ "!mips_store_data_bypass_p")
+(define_bypass 6 "r24k_int_mfhilo" "r24k_int_prefetch")
+(define_bypass 2 "r24k_int_mthilo" "r24k_int_madd")
+
+;; cop->next use : 3 cycles (Default)
+;; cop->l/s base : 4 cycles
+;; (define_bypass 4 "r24k_int_cop" "r24k_int_load")
+;; (define_bypass 4 "r24k_int_cop" "r24k_int_store"
+;; "!mips_store_data_bypass_p")
+
+;; multi->next use : 1 cycles (Default)
+;; multi->l/s base : 2 cycles
+;; multi->prefetch : 2 cycles
+(define_bypass 2 "r24k_int_multi" "r24k_int_load")
+(define_bypass 2 "r24k_int_multi" "r24k_int_store" "!mips_store_data_bypass_p")
+(define_bypass 2 "r24k_int_multi" "r24k_int_prefetch")
+
+
+;; --------------------------------------------------------------
+;; DSP instructions
+;; --------------------------------------------------------------
+
+;; absq, addq, addsc, addu, addwc, bitrev, cmp, cmpgu, cmpu, insv, modsub,
+;; packrl, pick, preceq, preceu, precequ, precrq, precrqu, raddu, rddsp, repl,
+;; replv, shll, shllv, shra, shrav, shrl, shrlv, subq, subu, wrdsp
+(define_insn_reservation "r24k_dsp_alu" 2
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "dspalu,dspalusat"))
+ "r24k_iss+r24k_ixu_arith")
+
+;; dpaq_s, dpau, dpsq_s, dpsu, maq_s, mulsaq
+(define_insn_reservation "r24k_dsp_mac" 1
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "dspmac"))
+ "r24k_iss+(r24k_mul3a|r24k_mul3b|r24k_mul3c)")
+
+;; dpaq_sa, dpsq_sa, maq_sa
+(define_insn_reservation "r24k_dsp_mac_sat" 1
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "dspmacsat"))
+ "r24k_iss+(r24k_mul3a|r24k_mul3b|r24k_mul3c)")
+
+;; extp, extpdp, extpdpv, extpv, extr, extrv
+(define_insn_reservation "r24k_dsp_acc_ext" 5
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "accext"))
+ "r24k_iss+(r24k_mul3a|r24k_mul3b|r24k_mul3c)")
+
+;; mthlip, shilo, shilov
+(define_insn_reservation "r24k_dsp_acc_mod" 1
+ (and (eq_attr "cpu" "24kc,24kf2_1,24kf1_1")
+ (eq_attr "type" "accmod"))
+ "r24k_iss+(r24k_mul3a|r24k_mul3b|r24k_mul3c)")
+
+
+;; mult/madd->dsp_acc_ext : 4 cycles
+;; mult/madd->dsp_acc_mod : 4 cycles
+(define_bypass 4 "r24k_int_mult" "r24k_dsp_acc_ext")
+(define_bypass 4 "r24k_int_mult" "r24k_dsp_acc_mod")
+
+;; mthilo->dsp_acc_ext : 4 cycles
+;; mthilo->dsp_acc_ext : 4 cycles
+(define_bypass 4 "r24k_int_mthilo" "r24k_dsp_acc_ext")
+(define_bypass 4 "r24k_int_mthilo" "r24k_dsp_acc_mod")
+
+;; dsp_mac->next use : 1 cycles (default)
+;; dsp_mac->dsp_acc_ext : 4 cycles
+;; dsp_mac->dsp_acc_mod : 4 cycles
+(define_bypass 4 "r24k_dsp_mac" "r24k_dsp_acc_ext")
+(define_bypass 4 "r24k_dsp_mac" "r24k_dsp_acc_mod")
+
+;; dsp_mac_sat->next use : 1 cycles (default)
+;; dsp_mac_sat->mult/madd : 2 cycles
+;; dsp_mac_sat->dsp_mac : 2 cycles
+;; dsp_mac_sat->dsp_mac_sat : 2 cycles
+;; dsp_mac_sat->dsp_acc_ext : 4 cycles
+;; dsp_mac_sat->dsp_acc_mod : 4 cycles
+(define_bypass 2 "r24k_dsp_mac_sat" "r24k_int_mult")
+(define_bypass 2 "r24k_dsp_mac_sat" "r24k_dsp_mac")
+(define_bypass 2 "r24k_dsp_mac_sat" "r24k_dsp_mac_sat")
+(define_bypass 4 "r24k_dsp_mac_sat" "r24k_dsp_acc_ext")
+(define_bypass 4 "r24k_dsp_mac_sat" "r24k_dsp_acc_mod")
+
+;; dsp_acc_ext->next use : 5 cycles (default)
+;; dsp_acc_ext->l/s base : 6 cycles
+;; dsp_acc_ext->prefetch : 6 cycles
+(define_bypass 6 "r24k_dsp_acc_ext" "r24k_int_load")
+(define_bypass 6 "r24k_dsp_acc_ext" "r24k_int_store"
+ "!mips_store_data_bypass_p")
+(define_bypass 6 "r24k_dsp_acc_ext" "r24k_int_prefetch")
+
+;; dsp_acc_mod->next use : 1 cycles (default)
+;; dsp_acc_mod->mult/madd : 2 cycles
+;; dsp_acc_mod->dsp_mac : 2 cycles
+;; dsp_acc_mod->dsp_mac_sat : 2 cycles
+;; dsp_acc_mod->dsp_acc_ext : 4 cycles
+;; dsp_acc_mod->dsp_acc_mod : 4 cycles
+(define_bypass 2 "r24k_dsp_acc_mod" "r24k_int_mult")
+(define_bypass 2 "r24k_dsp_acc_mod" "r24k_dsp_mac")
+(define_bypass 2 "r24k_dsp_acc_mod" "r24k_dsp_mac_sat")
+(define_bypass 4 "r24k_dsp_acc_mod" "r24k_dsp_acc_ext")
+(define_bypass 4 "r24k_dsp_acc_mod" "r24k_dsp_acc_mod")
+
+;; dspalu->next use : 2 cycles (default)
+;; dspalu->l/s base : 3 cycles
+;; dspalu->prefetch : 3 cycles
+;; some pairs of dspalu (addsc/addwc, cmp/pick, wrdsp/insv) : 1 cycle
+(define_bypass 3 "r24k_dsp_alu" "r24k_int_load")
+(define_bypass 3 "r24k_dsp_alu" "r24k_int_store" "!mips_store_data_bypass_p")
+(define_bypass 3 "r24k_dsp_alu" "r24k_int_prefetch")
+(define_bypass 1 "r24k_dsp_alu" "r24k_dsp_alu" "mips_dspalu_bypass_p")
+
+
+;; --------------------------------------------------------------
+;; Floating Point Instructions
+;; --------------------------------------------------------------
+
+(define_cpu_unit "r24k_fpu_arith" "r24k_fpu")
+
+;; The 24k is a single issue cpu, and the fpu runs at half clock speed,
+;; so each fpu instruction ties up the shared instruction scheduler for
+;; 1 cycle, and the fpu scheduler for 2 cycles.
+;;
+;; These timings are therefore twice the values in the 24K manual,
+;; which are quoted in fpu clocks.
+;;
+;; The 24kf1_1 is a 24k configured with 1:1 cpu and fpu, so use
+;; the unscaled timings
+
+(define_reservation "r24kf2_1_fpu_iss" "r24k_iss+(r24k_fpu_arith*2)")
+
+;; fadd, fabs, fneg
+(define_insn_reservation "r24kf2_1_fadd" 8
+ (and (eq_attr "cpu" "24kf2_1")
+ (eq_attr "type" "fadd,fabs,fneg"))
+ "r24kf2_1_fpu_iss")
+
+;; fmove, fcmove
+(define_insn_reservation "r24kf2_1_fmove" 8
+ (and (eq_attr "cpu" "24kf2_1")
+ (eq_attr "type" "fmove,condmove"))
+ "r24kf2_1_fpu_iss")
+
+;; fload
+(define_insn_reservation "r24kf2_1_fload" 6
+ (and (eq_attr "cpu" "24kf2_1")
+ (eq_attr "type" "fpload,fpidxload"))
+ "r24kf2_1_fpu_iss")
+
+;; fstore
+(define_insn_reservation "r24kf2_1_fstore" 2
+ (and (eq_attr "cpu" "24kf2_1")
+ (eq_attr "type" "fpstore"))
+ "r24kf2_1_fpu_iss")
+
+;; fmul, fmadd
+(define_insn_reservation "r24kf2_1_fmul_sf" 8
+ (and (eq_attr "cpu" "24kf2_1")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "r24kf2_1_fpu_iss")
+
+(define_insn_reservation "r24kf2_1_fmul_df" 10
+ (and (eq_attr "cpu" "24kf2_1")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "r24kf2_1_fpu_iss,(r24k_fpu_arith*2)")
+
+
+;; fdiv, fsqrt, frsqrt
+(define_insn_reservation "r24kf2_1_fdiv_sf" 34
+ (and (eq_attr "cpu" "24kf2_1")
+ (and (eq_attr "type" "fdiv,fsqrt,frsqrt")
+ (eq_attr "mode" "SF")))
+ "r24kf2_1_fpu_iss,(r24k_fpu_arith*26)")
+
+(define_insn_reservation "r24kf2_1_fdiv_df" 64
+ (and (eq_attr "cpu" "24kf2_1")
+ (and (eq_attr "type" "fdiv,fsqrt")
+ (eq_attr "mode" "DF")))
+ "r24kf2_1_fpu_iss,(r24k_fpu_arith*56)")
+
+;; frsqrt
+(define_insn_reservation "r24kf2_1_frsqrt_df" 70
+ (and (eq_attr "cpu" "24kf2_1")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "DF")))
+ "r24kf2_1_fpu_iss,(r24k_fpu_arith*60)")
+
+;; fcmp
+(define_insn_reservation "r24kf2_1_fcmp" 4
+ (and (eq_attr "cpu" "24kf2_1")
+ (eq_attr "type" "fcmp"))
+ "r24kf2_1_fpu_iss")
+
+;; fcmp -> movf.fmt & movt.fmt bypass (dependency must be on the condition)
+(define_bypass 2 "r24kf2_1_fcmp" "r24kf2_1_fmove")
+
+;; fcvt (cvt.d.s, cvt.[sd].[wl])
+(define_insn_reservation "r24kf2_1_fcvt_i2f_s2d" 8
+ (and (eq_attr "cpu" "24kf2_1")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "cnv_mode" "I2S,I2D,S2D")))
+ "r24kf2_1_fpu_iss")
+
+;; fcvt (cvt.s.d)
+(define_insn_reservation "r24kf2_1_fcvt_s2d" 12
+ (and (eq_attr "cpu" "24kf2_1")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "cnv_mode" "D2S")))
+ "r24kf2_1_fpu_iss")
+
+;; fcvt (cvt.[wl].[sd], etc)
+(define_insn_reservation "r24kf2_1_fcvt_f2i" 10
+ (and (eq_attr "cpu" "24kf2_1")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "cnv_mode" "S2I,D2I")))
+ "r24kf2_1_fpu_iss")
+
+;; fxfer (mfc1, mfhc1, mtc1, mthc1)
+(define_insn_reservation "r24kf2_1_fxfer" 4
+ (and (eq_attr "cpu" "24kf2_1")
+ (eq_attr "type" "mfc,mtc"))
+ "r24kf2_1_fpu_iss")
+
+;; --------------------------------------------------------------
+;; Bypass to Consumer
+;; --------------------------------------------------------------
+;; r24kf2_1_fcvt_f2i->l/s base : 11 cycles
+;; r24kf2_1_fcvt_f2i->prefetch : 11 cycles
+(define_bypass 11 "r24kf2_1_fcvt_f2i" "r24k_int_load")
+(define_bypass 11 "r24kf2_1_fcvt_f2i" "r24k_int_store"
+ "!mips_store_data_bypass_p")
+(define_bypass 11 "r24kf2_1_fcvt_f2i" "r24k_int_prefetch")
+
+;; r24kf2_1_fxfer->l/s base : 5 cycles
+;; r24kf2_1_fxfer->prefetch : 5 cycles
+(define_bypass 5 "r24kf2_1_fxfer" "r24k_int_load")
+(define_bypass 5 "r24kf2_1_fxfer" "r24k_int_store" "!mips_store_data_bypass_p")
+(define_bypass 5 "r24kf2_1_fxfer" "r24k_int_prefetch")
+
+;; --------------------------------------------------------------
+;; The 24kf1_1 is a 24k configured with 1:1 cpu and fpu, so use
+;; the unscaled timings
+;; --------------------------------------------------------------
+
+(define_reservation "r24kf1_1_fpu_iss" "r24k_iss+r24k_fpu_arith")
+
+;; fadd, fabs, fneg
+(define_insn_reservation "r24kf1_1_fadd" 4
+ (and (eq_attr "cpu" "24kf1_1")
+ (eq_attr "type" "fadd,fabs,fneg"))
+ "r24kf1_1_fpu_iss")
+
+;; fmove, fcmove
+(define_insn_reservation "r24kf1_1_fmove" 4
+ (and (eq_attr "cpu" "24kf1_1")
+ (eq_attr "type" "fmove,condmove"))
+ "r24kf1_1_fpu_iss")
+
+;; fload
+(define_insn_reservation "r24kf1_1_fload" 3
+ (and (eq_attr "cpu" "24kf1_1")
+ (eq_attr "type" "fpload,fpidxload"))
+ "r24kf1_1_fpu_iss")
+
+;; fstore
+(define_insn_reservation "r24kf1_1_fstore" 1
+ (and (eq_attr "cpu" "24kf1_1")
+ (eq_attr "type" "fpstore"))
+ "r24kf1_1_fpu_iss")
+
+;; fmul, fmadd
+(define_insn_reservation "r24kf1_1_fmul_sf" 4
+ (and (eq_attr "cpu" "24kf1_1")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "r24kf1_1_fpu_iss")
+
+(define_insn_reservation "r24kf1_1_fmul_df" 5
+ (and (eq_attr "cpu" "24kf1_1")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "r24kf1_1_fpu_iss,r24k_fpu_arith")
+
+
+;; fdiv, fsqrt, frsqrt
+(define_insn_reservation "r24kf1_1_fdiv_sf" 17
+ (and (eq_attr "cpu" "24kf1_1")
+ (and (eq_attr "type" "fdiv,fsqrt,frsqrt")
+ (eq_attr "mode" "SF")))
+ "r24kf1_1_fpu_iss,(r24k_fpu_arith*13)")
+
+(define_insn_reservation "r24kf1_1_fdiv_df" 32
+ (and (eq_attr "cpu" "24kf1_1")
+ (and (eq_attr "type" "fdiv,fsqrt")
+ (eq_attr "mode" "DF")))
+ "r24kf1_1_fpu_iss,(r24k_fpu_arith*28)")
+
+;; frsqrt
+(define_insn_reservation "r24kf1_1_frsqrt_df" 35
+ (and (eq_attr "cpu" "24kf1_1")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "DF")))
+ "r24kf1_1_fpu_iss,(r24k_fpu_arith*30)")
+
+;; fcmp
+(define_insn_reservation "r24kf1_1_fcmp" 2
+ (and (eq_attr "cpu" "24kf1_1")
+ (eq_attr "type" "fcmp"))
+ "r24kf1_1_fpu_iss")
+
+;; fcmp -> movf.fmt & movt.fmt bypass (dependency must be on the condition)
+(define_bypass 1 "r24kf1_1_fcmp" "r24kf1_1_fmove")
+
+;; fcvt (cvt.d.s, cvt.[sd].[wl])
+(define_insn_reservation "r24kf1_1_fcvt_i2f_s2d" 4
+ (and (eq_attr "cpu" "24kf1_1")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "cnv_mode" "I2S,I2D,S2D")))
+ "r24kf1_1_fpu_iss")
+
+;; fcvt (cvt.s.d)
+(define_insn_reservation "r24kf1_1_fcvt_s2d" 6
+ (and (eq_attr "cpu" "24kf1_1")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "cnv_mode" "D2S")))
+ "r24kf1_1_fpu_iss")
+
+;; fcvt (cvt.[wl].[sd], etc)
+(define_insn_reservation "r24kf1_1_fcvt_f2i" 5
+ (and (eq_attr "cpu" "24kf1_1")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "cnv_mode" "S2I,D2I")))
+ "r24kf1_1_fpu_iss")
+
+;; fxfer (mfc1, mfhc1, mtc1, mthc1)
+(define_insn_reservation "r24kf1_1_fxfer" 2
+ (and (eq_attr "cpu" "24kf1_1")
+ (eq_attr "type" "mfc,mtc"))
+ "r24kf1_1_fpu_iss")
+
+;; --------------------------------------------------------------
+;; Bypass to Consumer
+;; --------------------------------------------------------------
+;; r24kf1_1_fcvt_f2i->l/s base : 6 cycles
+;; r24kf1_1_fcvt_f2i->prefetch : 6 cycles
+(define_bypass 6 "r24kf1_1_fcvt_f2i" "r24k_int_load")
+(define_bypass 6 "r24kf1_1_fcvt_f2i" "r24k_int_store"
+ "!mips_store_data_bypass_p")
+(define_bypass 6 "r24kf1_1_fcvt_f2i" "r24k_int_prefetch")
+
+;; r24kf1_1_fxfer->l/s base : 3 cycles
+;; r24kf1_1_fxfer->prefetch : 3 cycles
+(define_bypass 3 "r24kf1_1_fxfer" "r24k_int_load")
+(define_bypass 3 "r24kf1_1_fxfer" "r24k_int_store" "!mips_store_data_bypass_p")
+(define_bypass 3 "r24kf1_1_fxfer" "r24k_int_prefetch")
+
diff --git a/gcc-4.9/gcc/config/mips/3000.md b/gcc-4.9/gcc/config/mips/3000.md
new file mode 100644
index 000000000..2d4250385
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/3000.md
@@ -0,0 +1,71 @@
+;; R3000 and TX39 pipeline description.
+;; Copyright (C) 2004-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; This file overrides parts of generic.md. It is derived from the
+;; old define_function_unit description.
+
+(define_insn_reservation "r3k_load" 2
+ (and (eq_attr "cpu" "r3000,r3900")
+ (eq_attr "type" "load,fpload,fpidxload"))
+ "alu")
+
+(define_insn_reservation "r3k_imul" 12
+ (and (eq_attr "cpu" "r3000,r3900")
+ (eq_attr "type" "imul,imul3,imadd"))
+ "imuldiv*12")
+
+(define_insn_reservation "r3k_idiv" 35
+ (and (eq_attr "cpu" "r3000,r3900")
+ (eq_attr "type" "idiv"))
+ "imuldiv*35")
+
+(define_insn_reservation "r3k_fmove" 1
+ (and (eq_attr "cpu" "r3000,r3900")
+ (eq_attr "type" "fabs,fneg,fmove"))
+ "alu")
+
+(define_insn_reservation "r3k_fadd" 2
+ (and (eq_attr "cpu" "r3000,r3900")
+ (eq_attr "type" "fcmp,fadd"))
+ "alu")
+
+(define_insn_reservation "r3k_fmul_single" 4
+ (and (eq_attr "cpu" "r3000,r3900")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "alu")
+
+(define_insn_reservation "r3k_fmul_double" 5
+ (and (eq_attr "cpu" "r3000,r3900")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "alu")
+
+(define_insn_reservation "r3k_fdiv_single" 12
+ (and (eq_attr "cpu" "r3000,r3900")
+ (and (eq_attr "type" "fdiv,frdiv")
+ (eq_attr "mode" "SF")))
+ "alu")
+
+(define_insn_reservation "r3k_fdiv_double" 19
+ (and (eq_attr "cpu" "r3000,r3900")
+ (and (eq_attr "type" "fdiv,frdiv")
+ (eq_attr "mode" "DF")))
+ "alu")
diff --git a/gcc-4.9/gcc/config/mips/4000.md b/gcc-4.9/gcc/config/mips/4000.md
new file mode 100644
index 000000000..1851d831b
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/4000.md
@@ -0,0 +1,32 @@
+;; R4000 pipeline description.
+;; Copyright (C) 2004-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; This file overrides parts of generic.md. It is derived from the
+;; old define_function_unit description.
+
+(define_insn_reservation "r4k_imul" 10
+ (and (eq_attr "cpu" "r4000")
+ (eq_attr "type" "imul,imul3,imadd"))
+ "imuldiv*10")
+
+(define_insn_reservation "r4k_idiv" 69
+ (and (eq_attr "cpu" "r4000")
+ (eq_attr "type" "idiv"))
+ "imuldiv*69")
diff --git a/gcc-4.9/gcc/config/mips/4100.md b/gcc-4.9/gcc/config/mips/4100.md
new file mode 100644
index 000000000..c816ef086
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/4100.md
@@ -0,0 +1,51 @@
+;; VR4100 and VR4120 pipeline description.
+;; Copyright (C) 2004-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; This file overrides parts of generic.md. It is derived from the
+;; old define_function_unit description.
+
+(define_insn_reservation "r4100_load" 2
+ (and (eq_attr "cpu" "r4100,r4120")
+ (eq_attr "type" "load,fpload,fpidxload,mfc,mtc"))
+ "alu")
+
+(define_insn_reservation "r4100_imul_si" 1
+ (and (eq_attr "cpu" "r4100,r4120")
+ (and (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "SI")))
+ "imuldiv")
+
+(define_insn_reservation "r4100_imul_di" 4
+ (and (eq_attr "cpu" "r4100,r4120")
+ (and (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "DI")))
+ "imuldiv*4")
+
+(define_insn_reservation "r4100_idiv_si" 35
+ (and (eq_attr "cpu" "r4100,r4120")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "SI")))
+ "imuldiv*35")
+
+(define_insn_reservation "r4100_idiv_di" 67
+ (and (eq_attr "cpu" "r4100,r4120")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "imuldiv*67")
diff --git a/gcc-4.9/gcc/config/mips/4130.md b/gcc-4.9/gcc/config/mips/4130.md
new file mode 100644
index 000000000..14197acf5
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/4130.md
@@ -0,0 +1,153 @@
+;; Copyright (C) 2004-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>. */
+;;
+;; Pipeline description for the VR4130 family.
+;;
+;; The processor issues each 8-byte aligned pair of instructions together,
+;; stalling the second instruction if it depends on the first. Thus, if we
+;; want two instructions to issue in parallel, we need to make sure that the
+;; first one is 8-byte aligned.
+;;
+;; For the purposes of this pipeline description, we treat the processor
+;; like a standard two-way superscalar architecture. If scheduling were
+;; the last pass to run, we could use the scheduler hooks to vary the
+;; issue rate depending on whether an instruction is at an aligned or
+;; unaligned address. Unfortunately, delayed branch scheduling and
+;; hazard avoidance are done after the final scheduling pass, and they
+;; can change the addresses of many instructions.
+;;
+;; We get around this in two ways:
+;;
+;; (1) By running an extra pass at the end of compilation. This pass goes
+;; through the function looking for pairs of instructions that could
+;; execute in parallel. It makes sure that the first instruction in
+;; each pair is suitably aligned, inserting nops if necessary. Doing
+;; this gives the same kind of pipeline behavior we would see on a
+;; normal superscalar target.
+;;
+;; This pass is generally a speed improvement, but the extra nops will
+;; obviously make the program bigger. It is therefore unsuitable for
+;; -Os (at the very least).
+;;
+;; (2) By modifying the scheduler hooks so that, where possible:
+;;
+;; (a) dependent instructions are separated by a non-dependent
+;; instruction;
+;;
+;; (b) instructions that use the multiplication unit are separated
+;; by non-multiplication instructions; and
+;;
+;; (c) memory access instructions are separated by non-memory
+;; instructions.
+;;
+;; The idea is to keep conflicting instructions apart wherever possible
+;; and thus make the schedule less dependent on alignment.
+
+(define_automaton "vr4130_main, vr4130_muldiv, vr4130_mulpre")
+
+(define_cpu_unit "vr4130_alu1, vr4130_alu2, vr4130_dcache" "vr4130_main")
+(define_cpu_unit "vr4130_muldiv" "vr4130_muldiv")
+
+;; This is a fake unit for pre-reload scheduling of multiplications.
+;; It enforces the true post-reload repeat rate.
+(define_cpu_unit "vr4130_mulpre" "vr4130_mulpre")
+
+;; The scheduling hooks use this attribute for (b) above.
+(define_attr "vr4130_class" "mul,mem,alu"
+ (cond [(eq_attr "type" "load,store")
+ (const_string "mem")
+
+ (eq_attr "type" "mfhi,mflo,mthi,mtlo,imul,imul3,imadd,idiv")
+ (const_string "mul")]
+ (const_string "alu")))
+
+(define_insn_reservation "vr4130_multi" 1
+ (and (eq_attr "cpu" "r4130")
+ (eq_attr "type" "multi,unknown,atomic,syncloop"))
+ "vr4130_alu1 + vr4130_alu2 + vr4130_dcache + vr4130_muldiv")
+
+(define_insn_reservation "vr4130_int" 1
+ (and (eq_attr "cpu" "r4130")
+ (eq_attr "type" "arith,const,logical,move,nop,shift,signext,slt"))
+ "vr4130_alu1 | vr4130_alu2")
+
+(define_insn_reservation "vr4130_load" 3
+ (and (eq_attr "cpu" "r4130")
+ (eq_attr "type" "load"))
+ "vr4130_dcache")
+
+(define_insn_reservation "vr4130_store" 1
+ (and (eq_attr "cpu" "r4130")
+ (eq_attr "type" "store"))
+ "vr4130_dcache")
+
+(define_insn_reservation "vr4130_mfhilo" 3
+ (and (eq_attr "cpu" "r4130")
+ (eq_attr "type" "mfhi,mflo"))
+ "vr4130_muldiv")
+
+(define_insn_reservation "vr4130_mthilo" 1
+ (and (eq_attr "cpu" "r4130")
+ (eq_attr "type" "mthi,mtlo"))
+ "vr4130_muldiv")
+
+;; The product is available in LO & HI after one cycle. Moving the result
+;; into an integer register will take an additional three cycles, see mflo
+;; & mfhi above. Note that the same latencies and repeat rates apply if we
+;; use "mtlo; macc" instead of "mult; mflo".
+(define_insn_reservation "vr4130_mulsi" 4
+ (and (eq_attr "cpu" "r4130")
+ (and (eq_attr "type" "imul,imul3")
+ (eq_attr "mode" "SI")))
+ "vr4130_muldiv + (vr4130_mulpre * 2)")
+
+;; As for vr4130_mulsi, but the product is available in LO and HI
+;; after 3 cycles.
+(define_insn_reservation "vr4130_muldi" 6
+ (and (eq_attr "cpu" "r4130")
+ (and (eq_attr "type" "imul,imul3")
+ (eq_attr "mode" "DI")))
+ "(vr4130_muldiv * 3) + (vr4130_mulpre * 4)")
+
+;; maccs can execute in consecutive cycles without stalling, but it
+;; is 3 cycles before the integer destination can be read.
+(define_insn_reservation "vr4130_macc" 3
+ (and (eq_attr "cpu" "r4130")
+ (eq_attr "type" "imadd"))
+ "vr4130_muldiv")
+
+(define_bypass 1 "vr4130_mulsi,vr4130_macc" "vr4130_macc" "mips_linked_madd_p")
+(define_bypass 1 "vr4130_mulsi,vr4130_macc" "vr4130_mfhilo")
+(define_bypass 3 "vr4130_muldi" "vr4130_mfhilo")
+
+(define_insn_reservation "vr4130_divsi" 36
+ (and (eq_attr "cpu" "r4130")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "SI")))
+ "vr4130_muldiv * 36")
+
+(define_insn_reservation "vr4130_divdi" 72
+ (and (eq_attr "cpu" "r4130")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "vr4130_muldiv * 72")
+
+(define_insn_reservation "vr4130_branch" 0
+ (and (eq_attr "cpu" "r4130")
+ (eq_attr "type" "branch,jump,call"))
+ "vr4130_alu1 | vr4130_alu2")
diff --git a/gcc-4.9/gcc/config/mips/4300.md b/gcc-4.9/gcc/config/mips/4300.md
new file mode 100644
index 000000000..04a25c360
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/4300.md
@@ -0,0 +1,85 @@
+;; VR4300 pipeline description.
+;; Copyright (C) 2004-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; This file overrides parts of generic.md. It is derived from the
+;; old define_function_unit description.
+
+(define_insn_reservation "r4300_load" 2
+ (and (eq_attr "cpu" "r4300")
+ (eq_attr "type" "load,fpload,fpidxload,mfc,mtc"))
+ "alu")
+
+(define_insn_reservation "r4300_imul_si" 5
+ (and (eq_attr "cpu" "r4300")
+ (and (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "SI")))
+ "imuldiv*5")
+
+(define_insn_reservation "r4300_imul_di" 8
+ (and (eq_attr "cpu" "r4300")
+ (and (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "DI")))
+ "imuldiv*8")
+
+(define_insn_reservation "r4300_idiv_si" 37
+ (and (eq_attr "cpu" "r4300")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "SI")))
+ "imuldiv*37")
+
+(define_insn_reservation "r4300_idiv_di" 69
+ (and (eq_attr "cpu" "r4300")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "imuldiv*69")
+
+(define_insn_reservation "r4300_fmove" 1
+ (and (eq_attr "cpu" "r4300")
+ (eq_attr "type" "fcmp,fabs,fneg,fmove"))
+ "imuldiv")
+
+(define_insn_reservation "r4300_fadd" 3
+ (and (eq_attr "cpu" "r4300")
+ (eq_attr "type" "fadd"))
+ "imuldiv*3")
+
+(define_insn_reservation "r4300_fmul_single" 5
+ (and (eq_attr "cpu" "r4300")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "imuldiv*5")
+
+(define_insn_reservation "r4300_fmul_double" 8
+ (and (eq_attr "cpu" "r4300")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "imuldiv*8")
+
+(define_insn_reservation "r4300_fdiv_single" 29
+ (and (eq_attr "cpu" "r4300")
+ (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt")
+ (eq_attr "mode" "SF")))
+ "imuldiv*29")
+
+(define_insn_reservation "r4300_fdiv_double" 58
+ (and (eq_attr "cpu" "r4300")
+ (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt")
+ (eq_attr "mode" "DF")))
+ "imuldiv*58")
diff --git a/gcc-4.9/gcc/config/mips/4600.md b/gcc-4.9/gcc/config/mips/4600.md
new file mode 100644
index 000000000..29ec0eacd
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/4600.md
@@ -0,0 +1,130 @@
+;; R4600, R4650, and R4700 pipeline description.
+;; Copyright (C) 2004-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; This file overrides parts of generic.md. It is derived from the
+;; old define_function_unit description.
+;;
+;; We handle the R4600, R4650, and R4700 in much the same way. The only
+;; differences between R4600 and R4650 are the integer multiplication and
+;; division costs. The only differences between R4600 and R4700 are the
+;; integer and floating-point multiplication costs.
+
+(define_insn_reservation "r4600_imul_si" 10
+ (and (eq_attr "cpu" "r4600")
+ (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "SI"))
+ "imuldiv*10")
+
+(define_insn_reservation "r4600_imul_di" 12
+ (and (eq_attr "cpu" "r4600")
+ (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "DI"))
+ "imuldiv*12")
+
+(define_insn_reservation "r4600_idiv_si" 42
+ (and (eq_attr "cpu" "r4600,r4700")
+ (eq_attr "type" "idiv")
+ (eq_attr "mode" "SI"))
+ "imuldiv*42")
+
+(define_insn_reservation "r4600_idiv_di" 74
+ (and (eq_attr "cpu" "r4600,r4700")
+ (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI"))
+ "imuldiv*74")
+
+
+(define_insn_reservation "r4650_imul" 4
+ (and (eq_attr "cpu" "r4650")
+ (eq_attr "type" "imul,imul3,imadd"))
+ "imuldiv*4")
+
+(define_insn_reservation "r4650_idiv" 36
+ (and (eq_attr "cpu" "r4650")
+ (eq_attr "type" "idiv"))
+ "imuldiv*36")
+
+
+(define_insn_reservation "r4700_imul_si" 8
+ (and (eq_attr "cpu" "r4700")
+ (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "SI"))
+ "imuldiv*8")
+
+(define_insn_reservation "r4700_imul_di" 10
+ (and (eq_attr "cpu" "r4700")
+ (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "DI"))
+ "imuldiv*10")
+
+
+(define_insn_reservation "r4600_load" 2
+ (and (eq_attr "cpu" "r4600,r4650,r4700")
+ (eq_attr "type" "load,fpload,fpidxload"))
+ "alu")
+
+(define_insn_reservation "r4600_fmove" 1
+ (and (eq_attr "cpu" "r4600,r4650,r4700")
+ (eq_attr "type" "fabs,fneg,fmove"))
+ "alu")
+
+(define_insn_reservation "r4600_fmul_single" 8
+ (and (eq_attr "cpu" "r4600,r4650")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "alu")
+
+
+(define_insn_reservation "r4700_fmul_single" 4
+ (and (eq_attr "cpu" "r4700")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "alu")
+
+(define_insn_reservation "r4700_fmul_double" 5
+ (and (eq_attr "cpu" "r4700")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "alu")
+
+
+(define_insn_reservation "r4600_fdiv_single" 32
+ (and (eq_attr "cpu" "r4600,r4650,r4700")
+ (and (eq_attr "type" "fdiv,frdiv")
+ (eq_attr "mode" "SF")))
+ "alu")
+
+(define_insn_reservation "r4600_fdiv_double" 61
+ (and (eq_attr "cpu" "r4600,r4650,r4700")
+ (and (eq_attr "type" "fdiv,frdiv")
+ (eq_attr "mode" "DF")))
+ "alu")
+
+(define_insn_reservation "r4600_fsqrt_single" 31
+ (and (eq_attr "cpu" "r4600,r4650,r4700")
+ (and (eq_attr "type" "fsqrt,frsqrt")
+ (eq_attr "mode" "SF")))
+ "alu")
+
+(define_insn_reservation "r4600_fsqrt_double" 60
+ (and (eq_attr "cpu" "r4600,r4650,r4700")
+ (and (eq_attr "type" "fsqrt,frsqrt")
+ (eq_attr "mode" "DF")))
+ "alu")
diff --git a/gcc-4.9/gcc/config/mips/4k.md b/gcc-4.9/gcc/config/mips/4k.md
new file mode 100644
index 000000000..9d4304b18
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/4k.md
@@ -0,0 +1,153 @@
+;; DFA-based pipeline descriptions for MIPS32 4K processor family
+;; Contributed by Nigel Stephens (nigel@mips.com)
+;; and David Ung (davidu@mips.com)
+;;
+;; References:
+;; "MIPS32 4K Processor Core Family Software User's Manual,
+;; Doc no: MD00016, Rev 1.18, Nov 15, 2004."
+;;
+;; 4Kc - pipelined multiplier and translation lookaside buffer (TLB)
+;; 4km - pipelined multiplier and block address translator (BAT)
+;; 4kp - non-pipelined multiplier and block address translator (BAT)
+;;
+;; Copyright (C) 2005-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "r4k_cpu, r4k_mdu")
+
+;; Integer execution unit.
+(define_cpu_unit "r4k_ixu_arith" "r4k_cpu")
+(define_cpu_unit "r4k_ixu_mpydiv" "r4k_mdu")
+
+(define_insn_reservation "r4k_int_load" 2
+ (and (eq_attr "cpu" "4kc,4kp")
+ (eq_attr "type" "load"))
+ "r4k_ixu_arith")
+
+(define_insn_reservation "r4k_int_prefetch" 1
+ (and (eq_attr "cpu" "4kc,4kp")
+ (eq_attr "type" "prefetch"))
+ "r4k_ixu_arith")
+
+(define_insn_reservation "r4k_int_store" 1
+ (and (eq_attr "cpu" "4kc,4kp")
+ (eq_attr "type" "store"))
+ "r4k_ixu_arith")
+
+;; 4Kc/4Km
+;; unsigned divide - 8/16/24/32-bit operand have latencies 9/17/25/33
+;; signed divide - 8/16/24/32-bit operand have latencies 10/18/26/34
+(define_insn_reservation "r4k_idiv_4kc" 34
+ (and (eq_attr "cpu" "4kc")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "!DI")))
+ "r4k_ixu_arith+(r4k_ixu_mpydiv*34)")
+
+;; 4Kp
+;; unsigned divide - 33
+;; signed divide - 33-35
+(define_insn_reservation "r4k_idiv_4kp" 35
+ (and (eq_attr "cpu" "4kp")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "!DI")))
+ "r4k_ixu_arith+(r4k_ixu_mpydiv*35)")
+
+;; 4Kc/4Km fast 32x32 multiply
+;; 16x32 is faster, but there's no way to detect this
+(define_insn_reservation "r4k_mult_4kc" 2
+ (and (eq_attr "cpu" "4kc")
+ (and (eq_attr "type" "imul,imadd")
+ (eq_attr "mode" "SI")))
+ "r4k_ixu_arith+(r4k_ixu_mpydiv*2)")
+
+;; 4Kc/4Km MUL has 2 cycle latency, but has the special property that it will
+;; stall the integer unit pipeline. MUL 16x16 or 32x16 forces 1 cycle stall,
+;; while MUL 32x32 forces 2 cycle stall. If next insn use the result, an
+;; additional stall is forced.
+(define_insn_reservation "r4k_mul_4kc" 4
+ (and (eq_attr "cpu" "4kc")
+ (and (eq_attr "type" "imul3")
+ (eq_attr "mode" "SI")))
+ "(r4k_ixu_arith+r4k_ixu_mpydiv)*3")
+
+;; 4Kp slow iterative 2-op MULT
+;; Latency of 32 if next insn is MADD/MSUB,MFHI/MFLO.
+;; Repeat rate of 33 cycles.
+(define_insn_reservation "r4k_mult_4kp" 32
+ (and (eq_attr "cpu" "4kp")
+ (and (eq_attr "type" "imul")
+ (eq_attr "mode" "SI")))
+ "r4k_ixu_arith+(r4k_ixu_mpydiv*32)")
+
+;; 4Kp slow iterative 3-op MUL
+;; Latency of 32 cycles, but stalls the whole pipeline until complete.
+(define_insn_reservation "r4k_mul_4kp" 32
+ (and (eq_attr "cpu" "4kp")
+ (and (eq_attr "type" "imul3")
+ (eq_attr "mode" "SI")))
+ "(r4k_ixu_arith+r4k_ixu_mpydiv)*32")
+
+;; 4Kp slow iterative MADD
+;; Latency of 34 if next use insn is MADD/MSUB,MFHI/MFLO.
+;; Repeat rate of 35 cycles.
+(define_insn_reservation "r4k_madd_4kp" 34
+ (and (eq_attr "cpu" "4kp")
+ (and (eq_attr "type" "imadd")
+ (eq_attr "mode" "SI")))
+ "r4k_ixu_arith+(r4k_ixu_mpydiv*34)")
+
+;; Move to HI/LO -> MADD/MSUB,MFHI/MFLO has a 1 cycle latency.
+(define_insn_reservation "r4k_int_mthilo" 1
+ (and (eq_attr "cpu" "4kc,4kp")
+ (eq_attr "type" "mthi,mtlo"))
+ "r4k_ixu_arith+r4k_ixu_mpydiv")
+
+;; Move from HI/LO -> integer operation has a 2 cycle latency.
+(define_insn_reservation "r4k_int_mfhilo" 2
+ (and (eq_attr "cpu" "4kc,4kp")
+ (eq_attr "type" "mfhi,mflo"))
+ "r4k_ixu_arith+r4k_ixu_mpydiv")
+
+;; All other integer insns.
+(define_insn_reservation "r4k_int_alu" 1
+ (and (eq_attr "cpu" "4kc,4kp")
+ (eq_attr "type" "arith,condmove,const,logical,move,nop,shift,signext,slt"))
+ "r4k_ixu_arith")
+
+(define_insn_reservation "r4k_int_branch" 1
+ (and (eq_attr "cpu" "4kc,4kp")
+ (eq_attr "type" "branch"))
+ "r4k_ixu_arith")
+
+(define_insn_reservation "r4k_int_jump_4k" 1
+ (and (eq_attr "cpu" "4kc,4kp")
+ (eq_attr "type" "jump,call"))
+ "r4k_ixu_arith")
+
+;; mfcx/mtcx - non FPU
+;; (Disabled until we add cop0 support)
+;; (define_insn_reservation "r4k_int_cop" 2
+;; (and (eq_attr "cpu" "4kc,4kp")
+;; (eq_attr "type" "cop0"))
+;; "r4k_ixu_arith")
+
+;; Unknown or multi - single issue
+(define_insn_reservation "r4k_unknown" 1
+ (and (eq_attr "cpu" "4kc,4kp")
+ (eq_attr "type" "unknown,multi,atomic,syncloop"))
+ "r4k_ixu_arith+r4k_ixu_mpydiv")
diff --git a/gcc-4.9/gcc/config/mips/5000.md b/gcc-4.9/gcc/config/mips/5000.md
new file mode 100644
index 000000000..611749620
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/5000.md
@@ -0,0 +1,80 @@
+;; VR5000 pipeline description.
+;; Copyright (C) 2004-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; This file overrides parts of generic.md. It is derived from the
+;; old define_function_unit description.
+
+(define_insn_reservation "r5k_load" 2
+ (and (eq_attr "cpu" "r5000")
+ (eq_attr "type" "load,fpload,fpidxload,mfc,mtc"))
+ "alu")
+
+(define_insn_reservation "r5k_imul_si" 5
+ (and (eq_attr "cpu" "r5000")
+ (and (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "SI")))
+ "imuldiv*5")
+
+(define_insn_reservation "r5k_imul_di" 9
+ (and (eq_attr "cpu" "r5000")
+ (and (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "DI")))
+ "imuldiv*9")
+
+(define_insn_reservation "r5k_idiv_si" 36
+ (and (eq_attr "cpu" "r5000")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "SI")))
+ "imuldiv*36")
+
+(define_insn_reservation "r5k_idiv_di" 68
+ (and (eq_attr "cpu" "r5000")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "imuldiv*68")
+
+(define_insn_reservation "r5k_fmove" 1
+ (and (eq_attr "cpu" "r5000")
+ (eq_attr "type" "fcmp,fabs,fneg,fmove"))
+ "alu")
+
+(define_insn_reservation "r5k_fmul_single" 4
+ (and (eq_attr "cpu" "r5000")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "alu")
+
+(define_insn_reservation "r5k_fmul_double" 5
+ (and (eq_attr "cpu" "r5000")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "alu")
+
+(define_insn_reservation "r5k_fdiv_single" 21
+ (and (eq_attr "cpu" "r5000")
+ (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt")
+ (eq_attr "mode" "SF")))
+ "alu")
+
+(define_insn_reservation "r5k_fsqrt_double" 36
+ (and (eq_attr "cpu" "r5000")
+ (and (eq_attr "type" "fsqrt,frsqrt")
+ (eq_attr "mode" "DF")))
+ "alu")
diff --git a/gcc-4.9/gcc/config/mips/5400.md b/gcc-4.9/gcc/config/mips/5400.md
new file mode 100644
index 000000000..f54bacef2
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/5400.md
@@ -0,0 +1,184 @@
+;; Copyright (C) 2002-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+;;
+;; DFA-based pipeline description for 5400
+(define_automaton "vr54")
+(define_cpu_unit "vr54_dp0" "vr54")
+(define_cpu_unit "vr54_dp1" "vr54")
+(define_cpu_unit "vr54_mem" "vr54")
+(define_cpu_unit "vr54_mac" "vr54")
+
+;;
+;; The ordering of the instruction-execution-path/resource-usage
+;; descriptions (also known as reservation RTL) is roughly ordered
+;; based on the define attribute RTL for the "type" classification.
+;; When modifying, remember that the first test that matches is the
+;; reservation used!
+;;
+
+(define_insn_reservation "ir_vr54_unknown" 1
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "unknown,atomic,syncloop"))
+ "vr54_dp0+vr54_dp1+vr54_mem+vr54_mac")
+
+;; Assume prediction fails.
+(define_insn_reservation "ir_vr54_branch" 3
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "branch,jump,call"))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_load" 2
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "load,fpload,fpidxload"))
+ "vr54_mem")
+
+(define_insn_reservation "ir_vr54_store" 1
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "store"))
+ "vr54_mem")
+
+(define_insn_reservation "ir_vr54_fstore" 1
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "fpstore,fpidxstore"))
+ "vr54_mem")
+
+
+;; This reservation is for conditional move based on integer
+;; or floating point CC.
+(define_insn_reservation "ir_vr54_condmove" 4
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "condmove"))
+ "vr54_dp0|vr54_dp1")
+
+;; Move to/from FPU registers
+(define_insn_reservation "ir_vr54_xfer" 2
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "mfc,mtc"))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_hilo" 1
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "mthi,mtlo,mfhi,mflo"))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_arith" 1
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "arith,shift,signext,slt,clz,const,logical,move,nop,trap"))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_imul_si" 3
+ (and (eq_attr "cpu" "r5400")
+ (and (eq_attr "type" "imul,imul3")
+ (eq_attr "mode" "SI")))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_imul_di" 4
+ (and (eq_attr "cpu" "r5400")
+ (and (eq_attr "type" "imul,imul3")
+ (eq_attr "mode" "DI")))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_imadd_si" 3
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "imul,imul3"))
+ "vr54_mac")
+
+(define_insn_reservation "ir_vr54_idiv_si" 42
+ (and (eq_attr "cpu" "r5400")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "SI")))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_idiv_di" 74
+ (and (eq_attr "cpu" "r5400")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_fadd" 4
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "fadd"))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_fmul_sf" 5
+ (and (eq_attr "cpu" "r5400")
+ (and (eq_attr "type" "fmul")
+ (eq_attr "mode" "SF")))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_fmul_df" 6
+ (and (eq_attr "cpu" "r5400")
+ (and (eq_attr "type" "fmul")
+ (eq_attr "mode" "DF")))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_fmadd_sf" 9
+ (and (eq_attr "cpu" "r5400")
+ (and (eq_attr "type" "fmadd")
+ (eq_attr "mode" "SF")))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_fmadd_df" 10
+ (and (eq_attr "cpu" "r5400")
+ (and (eq_attr "type" "fmadd")
+ (eq_attr "mode" "DF")))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_fdiv_sf" 42
+ (and (eq_attr "cpu" "r5400")
+ (and (eq_attr "type" "fdiv,frdiv,fsqrt")
+ (eq_attr "mode" "SF")))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_fdiv_df" 72
+ (and (eq_attr "cpu" "r5400")
+ (and (eq_attr "type" "fdiv,frdiv,fsqrt")
+ (eq_attr "mode" "DF")))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_fabs" 2
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "fabs,fneg,fmove"))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_fcmp" 2
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "fcmp"))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_fcvt" 6
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "fcvt"))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_frsqrt_sf" 61
+ (and (eq_attr "cpu" "r5400")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "SF")))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_frsqrt_df" 121
+ (and (eq_attr "cpu" "r5400")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "DF")))
+ "vr54_dp0|vr54_dp1")
+
+(define_insn_reservation "ir_vr54_multi" 1
+ (and (eq_attr "cpu" "r5400")
+ (eq_attr "type" "multi"))
+ "vr54_dp0+vr54_dp1+vr54_mem+vr54_mac")
diff --git a/gcc-4.9/gcc/config/mips/5500.md b/gcc-4.9/gcc/config/mips/5500.md
new file mode 100644
index 000000000..23cab21a5
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/5500.md
@@ -0,0 +1,227 @@
+;; Copyright (C) 2002-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+;;
+;; DFA-based pipeline description for 5500
+(define_automaton "vr55")
+(define_cpu_unit "vr55_dp0" "vr55")
+(define_cpu_unit "vr55_dp1" "vr55")
+(define_cpu_unit "vr55_mem" "vr55")
+(define_cpu_unit "vr55_mac" "vr55")
+(define_cpu_unit "vr55_fp" "vr55")
+(define_cpu_unit "vr55_bru" "vr55")
+
+;;
+;; The ordering of the instruction-execution-path/resource-usage
+;; descriptions (also known as reservation RTL) is roughly ordered
+;; based on the define attribute RTL for the "type" classification.
+;; When modifying, remember that the first test that matches is the
+;; reservation used!
+;;
+
+(define_insn_reservation "ir_vr55_unknown" 1
+ (and (eq_attr "cpu" "r5500")
+ (eq_attr "type" "unknown,atomic,syncloop"))
+ "vr55_dp0+vr55_dp1+vr55_mem+vr55_mac+vr55_fp+vr55_bru")
+
+;; Assume prediction fails.
+(define_insn_reservation "ir_vr55_branch" 2
+ (and (eq_attr "cpu" "r5500")
+ (eq_attr "type" "branch,jump,call"))
+ "vr55_bru")
+
+(define_insn_reservation "ir_vr55_load" 3
+ (and (eq_attr "cpu" "r5500")
+ (eq_attr "type" "load,fpload,fpidxload"))
+ "vr55_mem")
+
+(define_bypass 4
+ "ir_vr55_load"
+ "ir_vr55_mthilo,ir_vr55_imul_si,ir_vr55_imul_di,ir_vr55_imadd,
+ ir_vr55_idiv_si,ir_vr55_idiv_di")
+
+(define_insn_reservation "ir_vr55_store" 0
+ (and (eq_attr "cpu" "r5500")
+ (eq_attr "type" "store,fpstore,fpidxstore"))
+ "vr55_mem")
+
+;; This reservation is for conditional move based on integer
+;; or floating point CC.
+(define_insn_reservation "ir_vr55_condmove" 2
+ (and (eq_attr "cpu" "r5500")
+ (eq_attr "type" "condmove"))
+ "vr55_dp0|vr55_dp1")
+
+;; Move to/from FPU registers
+(define_insn_reservation "ir_vr55_xfer" 2
+ (and (eq_attr "cpu" "r5500")
+ (eq_attr "type" "mfc,mtc"))
+ "vr55_dp0|vr55_dp1")
+
+(define_insn_reservation "ir_vr55_arith" 1
+ (and (eq_attr "cpu" "r5500")
+ (eq_attr "type" "arith,shift,signext,slt,clz,const,logical,move,nop,trap"))
+ "vr55_dp0|vr55_dp1")
+
+(define_bypass 2
+ "ir_vr55_arith"
+ "ir_vr55_mthilo,ir_vr55_imul_si,ir_vr55_imul_di,ir_vr55_imadd,
+ ir_vr55_idiv_si,ir_vr55_idiv_di")
+
+(define_insn_reservation "ir_vr55_mthilo" 1
+ (and (eq_attr "cpu" "r5500")
+ (eq_attr "type" "mthi,mtlo"))
+ "vr55_mac")
+
+(define_insn_reservation "ir_vr55_mfhilo" 5
+ (and (eq_attr "cpu" "r5500")
+ (eq_attr "type" "mfhi,mflo"))
+ "vr55_mac")
+
+;; The default latency is for the GPR result of a mul. Bypasses handle the
+;; latency of {mul,mult}->{mfhi,mflo}.
+(define_insn_reservation "ir_vr55_imul_si" 5
+ (and (eq_attr "cpu" "r5500")
+ (and (eq_attr "type" "imul,imul3")
+ (eq_attr "mode" "SI")))
+ "vr55_mac")
+
+;; The default latency is for pre-reload scheduling and handles the case
+;; where a pseudo destination will be stored in a GPR (as it usually is).
+;; The delay includes the latency of the dmult itself and the anticipated
+;; mflo or mfhi.
+;;
+;; Once the mflo or mfhi has been created, bypasses handle the latency
+;; between it and the dmult.
+(define_insn_reservation "ir_vr55_imul_di" 9
+ (and (eq_attr "cpu" "r5500")
+ (and (eq_attr "type" "imul,imul3")
+ (eq_attr "mode" "DI")))
+ "vr55_mac*4")
+
+;; The default latency is as for ir_vr55_imul_si.
+(define_insn_reservation "ir_vr55_imadd" 5
+ (and (eq_attr "cpu" "r5500")
+ (eq_attr "type" "imadd"))
+ "vr55_mac")
+
+(define_bypass 1
+ "ir_vr55_imul_si,ir_vr55_imadd"
+ "ir_vr55_imadd"
+ "mips_linked_madd_p")
+
+(define_bypass 2
+ "ir_vr55_imul_si,ir_vr55_imadd"
+ "ir_vr55_mfhilo")
+
+(define_bypass 4
+ "ir_vr55_imul_di"
+ "ir_vr55_mfhilo")
+
+;; Divide algorithm is early out with best latency of 7 pcycles.
+;; Use worst case for scheduling purposes.
+(define_insn_reservation "ir_vr55_idiv_si" 42
+ (and (eq_attr "cpu" "r5500")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "SI")))
+ "vr55_mac")
+
+(define_insn_reservation "ir_vr55_idiv_di" 74
+ (and (eq_attr "cpu" "r5500")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "vr55_mac")
+
+(define_insn_reservation "ir_vr55_fadd" 4
+ (and (eq_attr "cpu" "r5500")
+ (eq_attr "type" "fadd"))
+ "vr55_fp")
+
+(define_insn_reservation "ir_vr55_fmul_sf" 5
+ (and (eq_attr "cpu" "r5500")
+ (and (eq_attr "type" "fmul")
+ (eq_attr "mode" "SF")))
+ "vr55_mac")
+
+(define_insn_reservation "ir_vr55_fmul_df" 6
+ (and (eq_attr "cpu" "r5500")
+ (and (eq_attr "type" "fmul")
+ (eq_attr "mode" "DF")))
+ "vr55_mac")
+
+(define_insn_reservation "ir_vr55_fmadd_sf" 9
+ (and (eq_attr "cpu" "r5500")
+ (and (eq_attr "type" "fmadd")
+ (eq_attr "mode" "SF")))
+ "vr55_mac")
+
+(define_insn_reservation "ir_vr55_fmadd_df" 10
+ (and (eq_attr "cpu" "r5500")
+ (and (eq_attr "type" "fmadd")
+ (eq_attr "mode" "DF")))
+ "vr55_mac")
+
+(define_insn_reservation "ir_vr55_fdiv_sf" 30
+ (and (eq_attr "cpu" "r5500")
+ (and (eq_attr "type" "fdiv,frdiv,fsqrt")
+ (eq_attr "mode" "SF")))
+ "vr55_mac")
+
+(define_insn_reservation "ir_vr55_fdiv_df" 59
+ (and (eq_attr "cpu" "r5500")
+ (and (eq_attr "type" "fdiv,frdiv,fsqrt")
+ (eq_attr "mode" "DF")))
+ "vr55_mac")
+
+(define_insn_reservation "ir_vr55_fabs" 2
+ (and (eq_attr "cpu" "r5500")
+ (eq_attr "type" "fabs,fneg,fmove"))
+ "vr55_fp")
+
+(define_insn_reservation "ir_vr55_fcmp" 2
+ (and (eq_attr "cpu" "r5500")
+ (eq_attr "type" "fcmp"))
+ "vr55_fp")
+
+(define_insn_reservation "ir_vr55_fcvt_sf" 4
+ (and (eq_attr "cpu" "r5500")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "mode" "SF")))
+ "vr55_fp")
+
+(define_insn_reservation "ir_vr55_fcvt_df" 6
+ (and (eq_attr "cpu" "r5500")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "mode" "DF")))
+ "vr55_fp")
+
+(define_insn_reservation "ir_vr55_frsqrt_sf" 60
+ (and (eq_attr "cpu" "r5500")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "SF")))
+ "vr55_mac")
+
+(define_insn_reservation "ir_vr55_frsqrt_df" 118
+ (and (eq_attr "cpu" "r5500")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "DF")))
+ "vr55_mac")
+
+(define_insn_reservation "ir_vr55_multi" 1
+ (and (eq_attr "cpu" "r5500")
+ (eq_attr "type" "multi"))
+ "vr55_dp0+vr55_dp1+vr55_mem+vr55_mac+vr55_fp+vr55_bru")
diff --git a/gcc-4.9/gcc/config/mips/5k.md b/gcc-4.9/gcc/config/mips/5k.md
new file mode 100644
index 000000000..397c74c93
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/5k.md
@@ -0,0 +1,229 @@
+;; DFA-based pipeline descriptions for MIPS32 5K processor family
+;; Contributed by David Ung (davidu@mips.com)
+;; and Nigel Stephens (nigel@mips.com)
+;;
+;; References:
+;; "MIPS64 5K Processor Core Family Software User's Manual,
+;; Doc no: MD00012, Rev 2.09, Jan 28, 2005."
+;;
+;; 5Kc - Single issue with no floating point unit.
+;; 5kf - Separate floating point pipe which can dual-issue with the
+;; integer pipe.
+;;
+;; Copyright (C) 2005-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "r5k_cpu, r5k_mdu, r5k_fpu")
+
+;; Integer execution unit.
+(define_cpu_unit "r5k_ixu_arith" "r5k_cpu")
+(define_cpu_unit "r5k_ixu_mpydiv" "r5k_mdu")
+(define_cpu_unit "r5kf_fpu_arith" "r5k_fpu")
+
+(define_insn_reservation "r5k_int_load" 2
+ (and (eq_attr "cpu" "5kc,5kf")
+ (eq_attr "type" "load"))
+ "r5k_ixu_arith")
+
+(define_insn_reservation "r5k_int_prefetch" 1
+ (and (eq_attr "cpu" "5kc,5kf")
+ (eq_attr "type" "prefetch,prefetchx"))
+ "r5k_ixu_arith")
+
+(define_insn_reservation "r5k_int_store" 1
+ (and (eq_attr "cpu" "5kc,5kf")
+ (eq_attr "type" "store"))
+ "r5k_ixu_arith")
+
+;; Divides
+(define_insn_reservation "r5k_int_divsi" 34
+ (and (eq_attr "cpu" "5kc,5kf")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "!DI")))
+ "r5k_ixu_arith+(r5k_ixu_mpydiv*34)")
+
+(define_insn_reservation "r5k_int_divdi" 66
+ (and (eq_attr "cpu" "5kc,5kf")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "r5k_ixu_arith+(r5k_ixu_mpydiv*66)")
+
+;; 32x32 multiply
+;; 32x16 is faster, but there's no way to detect this
+(define_insn_reservation "r5k_int_mult" 2
+ (and (eq_attr "cpu" "5kc,5kf")
+ (and (eq_attr "type" "imul,imadd")
+ (eq_attr "mode" "SI")))
+ "r5k_ixu_arith+(r5k_ixu_mpydiv*2)")
+
+;; 64x64 multiply
+(define_insn_reservation "r5k_int_mult_64" 9
+ (and (eq_attr "cpu" "5kc,5kf")
+ (and (eq_attr "type" "imul,imadd")
+ (eq_attr "mode" "DI")))
+ "r5k_ixu_arith+(r5k_ixu_mpydiv*2)")
+
+;; 3 operand MUL 32x32
+(define_insn_reservation "r5k_int_mul" 4
+ (and (eq_attr "cpu" "5kc,5kf")
+ (and (eq_attr "type" "imul3")
+ (eq_attr "mode" "SI")))
+ "r5k_ixu_arith+(r5k_ixu_mpydiv*2)")
+
+;; Move to HI/LO -> MADD/MSUB,MFHI/MFLO has a 1 cycle latency.
+(define_insn_reservation "r5k_int_mthilo" 1
+ (and (eq_attr "cpu" "5kc,5kf")
+ (eq_attr "type" "mthi,mtlo"))
+ "r5k_ixu_arith+r5k_ixu_mpydiv")
+
+;; Move from HI/LO -> integer operation has a 2 cycle latency.
+(define_insn_reservation "r5k_int_mfhilo" 2
+ (and (eq_attr "cpu" "5kc,5kf")
+ (eq_attr "type" "mfhi,mflo"))
+ "r5k_ixu_arith+r5k_ixu_mpydiv")
+
+;; All other integer insns.
+(define_insn_reservation "r5k_int_alu" 1
+ (and (eq_attr "cpu" "5kc,5kf")
+ (eq_attr "type" "arith,condmove,const,logical,move,nop,shift,signext,slt"))
+ "r5k_ixu_arith")
+
+(define_insn_reservation "r5k_int_branch" 1
+ (and (eq_attr "cpu" "5kc,5kf")
+ (eq_attr "type" "branch"))
+ "r5k_ixu_arith")
+
+;; JR/JALR always cause one pipeline bubble because of interlock.
+(define_insn_reservation "r5k_int_jump" 2
+ (and (eq_attr "cpu" "5kc,5kf")
+ (eq_attr "type" "jump,call"))
+ "r5k_ixu_arith")
+
+;; Any -> JR/JALR (without dependency) : 1 clock issue delay
+;; Any -> JR/JALR (with dependency) : 2 clock issue delay
+;; load -> JR/JALR (with dependency) : 3 clock issue delay
+;; mfhilo -> JR/JALR (with dependency) : 3 clock issue delay
+;; mul -> JR/JALR (with dependency) : 3 clock issue delay
+(define_bypass 2 "r5k_int_alu" "r5k_int_jump")
+(define_bypass 3 "r5k_int_load" "r5k_int_jump")
+(define_bypass 3 "r5k_int_mfhilo" "r5k_int_jump")
+(define_bypass 3 "r5k_int_mul" "r5k_int_jump")
+
+;; Unknown or multi - single issue
+(define_insn_reservation "r5k_int_unknown" 1
+ (and (eq_attr "cpu" "5kc,5kf")
+ (eq_attr "type" "unknown,multi,atomic,syncloop"))
+ "r5k_ixu_arith+r5k_ixu_mpydiv")
+
+
+;; Floating Point Instructions
+;; The 5Kf is a partial dual-issue cpu which can dual issue an integer
+;; and floating-point instruction in the same cycle.
+
+;; fadd, fabs, fneg
+(define_insn_reservation "r5kf_fadd" 4
+ (and (eq_attr "cpu" "5kf")
+ (eq_attr "type" "fadd,fabs,fneg"))
+ "r5kf_fpu_arith")
+
+;; fmove, fcmove
+(define_insn_reservation "r5kf_fmove" 4
+ (and (eq_attr "cpu" "5kf")
+ (eq_attr "type" "fmove"))
+ "r5kf_fpu_arith")
+
+;; fload
+(define_insn_reservation "r5kf_fload" 3
+ (and (eq_attr "cpu" "5kf")
+ (eq_attr "type" "fpload,fpidxload"))
+ "r5kf_fpu_arith")
+
+;; fstore
+(define_insn_reservation "r5kf_fstore" 1
+ (and (eq_attr "cpu" "5kf")
+ (eq_attr "type" "fpstore"))
+ "r5kf_fpu_arith")
+
+;; fmul, fmadd
+(define_insn_reservation "r5kf_fmul_sf" 4
+ (and (eq_attr "cpu" "5kf")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "r5kf_fpu_arith")
+
+(define_insn_reservation "r5kf_fmul_df" 5
+ (and (eq_attr "cpu" "5kf")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "r5kf_fpu_arith*2")
+
+;; fdiv, fsqrt, frsqrt
+(define_insn_reservation "r5kf_fdiv_sf" 17
+ (and (eq_attr "cpu" "5kf")
+ (and (eq_attr "type" "fdiv,fsqrt,frsqrt")
+ (eq_attr "mode" "SF")))
+ "r5kf_fpu_arith*14")
+
+(define_insn_reservation "r5kf_fdiv_df" 32
+ (and (eq_attr "cpu" "5kf")
+ (and (eq_attr "type" "fdiv,fsqrt")
+ (eq_attr "mode" "DF")))
+ "r5kf_fpu_arith*29")
+
+;; frsqrt
+(define_insn_reservation "r5kf_frsqrt_df" 35
+ (and (eq_attr "cpu" "5kf")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "DF")))
+ "r5kf_fpu_arith*31")
+
+;; fcmp
+(define_insn_reservation "r5kf_fcmp" 2
+ (and (eq_attr "cpu" "5kf")
+ (eq_attr "type" "fcmp"))
+ "r5kf_fpu_arith")
+
+;; fcmp -> movf.fmt & movt.fmt bypass (dependency must be on condition)
+(define_bypass 1 "r5kf_fcmp" "r5kf_fmove")
+
+;; fcvt (cvt.d.s, cvt.[sd].[wl]
+(define_insn_reservation "r5kf_fcvt_d2s" 4
+ (and (eq_attr "cpu" "5kf")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "cnv_mode" "I2S,I2D,S2D")))
+ "r5kf_fpu_arith")
+
+;; fcvt (cvt.s.d)
+(define_insn_reservation "r5kf_fcvt_s2d" 6
+ (and (eq_attr "cpu" "5kc")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "cnv_mode" "D2S")))
+ "r5kf_fpu_arith")
+
+;; fcvt (cvt.[wl].[sd], etc)
+(define_insn_reservation "r5kf_fcvt_f2i" 5
+ (and (eq_attr "cpu" "5kf")
+ (and (eq_attr "type" "fcvt")
+ (eq_attr "cnv_mode" "S2I,D2I")))
+ "r5kf_fpu_arith")
+
+;; fxfer (mfc1, mfhc1, mtc1, mthc1) - single issue
+(define_insn_reservation "r5kf_fxfer" 2
+ (and (eq_attr "cpu" "5kf")
+ (eq_attr "type" "mfc,mtc"))
+ "r5k_ixu_arith+r5kf_fpu_arith")
diff --git a/gcc-4.9/gcc/config/mips/6000.md b/gcc-4.9/gcc/config/mips/6000.md
new file mode 100644
index 000000000..32bc31601
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/6000.md
@@ -0,0 +1,56 @@
+;; R6000 pipeline description.
+;; Copyright (C) 2004-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; This file overrides parts of generic.md. It is derived from the
+;; old define_function_unit description.
+
+(define_insn_reservation "r6k_fcmp" 2
+ (and (eq_attr "cpu" "r6000")
+ (eq_attr "type" "fcmp"))
+ "alu")
+
+(define_insn_reservation "r6k_fadd" 3
+ (and (eq_attr "cpu" "r6000")
+ (eq_attr "type" "fadd"))
+ "alu")
+
+(define_insn_reservation "r6k_fmul_single" 5
+ (and (eq_attr "cpu" "r6000")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "alu")
+
+(define_insn_reservation "r6k_fmul_double" 6
+ (and (eq_attr "cpu" "r6000")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "alu")
+
+(define_insn_reservation "r6k_fdiv_single" 15
+ (and (eq_attr "cpu" "r6000")
+ (and (eq_attr "type" "fdiv,frdiv")
+ (eq_attr "mode" "SF")))
+ "alu")
+
+(define_insn_reservation "r6k_fdiv_double" 16
+ (and (eq_attr "cpu" "r6000")
+ (and (eq_attr "type" "fdiv,frdiv")
+ (eq_attr "mode" "DF")))
+ "alu")
diff --git a/gcc-4.9/gcc/config/mips/7000.md b/gcc-4.9/gcc/config/mips/7000.md
new file mode 100644
index 000000000..6f020c58a
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/7000.md
@@ -0,0 +1,214 @@
+;; DFA-based pipeline description for the RM7000.
+;; Copyright (C) 2003-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; .........................
+;;
+;; The RM7000 is a dual-issue processor that can bundle instructions as:
+;; {arith|load|store}{arith|imul|idiv|branch|float}
+;;
+;; Reference:
+;; "RM7000 Family User Manual, PMC-2002296"
+;;
+;; .........................
+
+;; Use three automata to isolate long latency operations, reducing space.
+(define_automaton "rm7000_other, rm7000_fdiv, rm7000_idiv")
+
+;;
+;; Describe the resources.
+;;
+
+;; Global
+(define_cpu_unit "rm7_iss0,rm7_iss1" "rm7000_other")
+
+;; Integer execution unit (M-Pipe).
+(define_cpu_unit "ixum_addsub_agen" "rm7000_other")
+
+;; Integer execution unit (F-Pipe).
+(define_cpu_unit "ixuf_addsub" "rm7000_other")
+(define_cpu_unit "ixuf_branch" "rm7000_other")
+(define_cpu_unit "ixuf_mpydiv" "rm7000_other")
+(define_cpu_unit "ixuf_mpydiv_iter" "rm7000_idiv")
+;; Floating-point unit (F-Pipe).
+(define_cpu_unit "fxuf_add" "rm7000_other")
+(define_cpu_unit "fxuf_mpy" "rm7000_other")
+(define_cpu_unit "fxuf_mpy_iter" "rm7000_fdiv")
+(define_cpu_unit "fxuf_divsqrt" "rm7000_other")
+(define_cpu_unit "fxuf_divsqrt_iter" "rm7000_fdiv")
+
+(exclusion_set "ixuf_addsub"
+ "ixuf_branch,ixuf_mpydiv,fxuf_add,fxuf_mpy,fxuf_divsqrt")
+(exclusion_set "ixuf_branch" "ixuf_mpydiv,fxuf_add,fxuf_mpy,fxuf_divsqrt")
+(exclusion_set "ixuf_mpydiv" "fxuf_add,fxuf_mpy,fxuf_divsqrt")
+(exclusion_set "fxuf_add" "fxuf_mpy,fxuf_divsqrt")
+(exclusion_set "fxuf_mpy" "fxuf_divsqrt")
+
+;; After branch any insn cannot be issued.
+(absence_set "rm7_iss0,rm7_iss1" "ixuf_branch")
+
+;;
+;; Define reservations for unit name mnemonics or combinations.
+;;
+
+(define_reservation "rm7_iss" "rm7_iss0|rm7_iss1")
+(define_reservation "rm7_single_dispatch" "rm7_iss0+rm7_iss1")
+
+(define_reservation "rm7_iaddsub" "rm7_iss+(ixum_addsub_agen|ixuf_addsub)")
+(define_reservation "rm7_imem" "rm7_iss+ixum_addsub_agen")
+(define_reservation "rm7_impydiv" "rm7_iss+ixuf_mpydiv")
+(define_reservation "rm7_impydiv_iter" "ixuf_mpydiv_iter")
+(define_reservation "rm7_branch" "rm7_iss+ixuf_branch")
+
+(define_reservation "rm7_fpadd" "rm7_iss+fxuf_add")
+(define_reservation "rm7_fpmpy" "rm7_iss+fxuf_mpy")
+(define_reservation "rm7_fpmpy_iter" "fxuf_mpy_iter")
+(define_reservation "rm7_fpdivsqr" "rm7_iss+fxuf_divsqrt")
+(define_reservation "rm7_fpdivsqr_iter" "fxuf_divsqrt_iter")
+
+;;
+;; Describe instruction reservations for integer operations.
+;;
+
+(define_insn_reservation "rm7_int_other" 1
+ (and (eq_attr "cpu" "r7000")
+ (eq_attr "type" "arith,shift,signext,slt,clz,const,condmove,logical,move,nop,trap"))
+ "rm7_iaddsub")
+
+(define_insn_reservation "rm7_ld" 2
+ (and (eq_attr "cpu" "r7000")
+ (eq_attr "type" "load,fpload,fpidxload"))
+ "rm7_imem")
+
+(define_insn_reservation "rm7_st" 1
+ (and (eq_attr "cpu" "r7000")
+ (eq_attr "type" "store,fpstore,fpidxstore"))
+ "rm7_imem")
+
+(define_insn_reservation "rm7_idiv_si" 36
+ (and (eq_attr "cpu" "r7000")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "SI")))
+ "rm7_impydiv+(rm7_impydiv_iter*36)")
+
+(define_insn_reservation "rm7_idiv_di" 68
+ (and (eq_attr "cpu" "r7000")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "rm7_impydiv+(rm7_impydiv_iter*68)")
+
+(define_insn_reservation "rm7_impy_si_mult" 5
+ (and (eq_attr "cpu" "r7000")
+ (and (eq_attr "type" "imul,imadd")
+ (eq_attr "mode" "SI")))
+ "rm7_impydiv+(rm7_impydiv_iter*3)")
+
+;; There are an additional 2 stall cycles.
+(define_insn_reservation "rm7_impy_si_mul" 2
+ (and (eq_attr "cpu" "r7000")
+ (and (eq_attr "type" "imul3")
+ (eq_attr "mode" "SI")))
+ "rm7_impydiv")
+
+(define_insn_reservation "rm7_impy_di" 9
+ (and (eq_attr "cpu" "r7000")
+ (and (eq_attr "type" "imul,imul3")
+ (eq_attr "mode" "DI")))
+ "rm7_impydiv+(rm7_impydiv_iter*8)")
+
+;; Move to/from HI/LO.
+(define_insn_reservation "rm7_mthilo" 3
+ (and (eq_attr "cpu" "r7000")
+ (eq_attr "type" "mthi,mtlo"))
+ "rm7_impydiv")
+
+(define_insn_reservation "rm7_mfhilo" 1
+ (and (eq_attr "cpu" "r7000")
+ (eq_attr "type" "mfhi,mflo"))
+ "rm7_impydiv")
+
+;; Move to/from fp coprocessor.
+(define_insn_reservation "rm7_ixfer" 2
+ (and (eq_attr "cpu" "r7000")
+ (eq_attr "type" "mfc,mtc"))
+ "rm7_iaddsub")
+
+(define_insn_reservation "rm7_ibr" 3
+ (and (eq_attr "cpu" "r7000")
+ (eq_attr "type" "branch,jump,call"))
+ "rm7_branch")
+
+;;
+;; Describe instruction reservations for the floating-point operations.
+;;
+(define_insn_reservation "rm7_fp_quick" 4
+ (and (eq_attr "cpu" "r7000")
+ (eq_attr "type" "fneg,fcmp,fabs,fmove"))
+ "rm7_fpadd")
+
+(define_insn_reservation "rm7_fp_other" 4
+ (and (eq_attr "cpu" "r7000")
+ (eq_attr "type" "fadd"))
+ "rm7_fpadd")
+
+(define_insn_reservation "rm7_fp_cvt" 4
+ (and (eq_attr "cpu" "r7000")
+ (eq_attr "type" "fcvt"))
+ "rm7_fpadd")
+
+(define_insn_reservation "rm7_fp_divsqrt_df" 36
+ (and (eq_attr "cpu" "r7000")
+ (and (eq_attr "type" "fdiv,frdiv,fsqrt")
+ (eq_attr "mode" "DF")))
+ "rm7_fpdivsqr+(rm7_fpdivsqr_iter*36)")
+
+(define_insn_reservation "rm7_fp_divsqrt_sf" 21
+ (and (eq_attr "cpu" "r7000")
+ (and (eq_attr "type" "fdiv,frdiv,fsqrt")
+ (eq_attr "mode" "SF")))
+ "rm7_fpdivsqr+(rm7_fpdivsqr_iter*21)")
+
+(define_insn_reservation "rm7_fp_rsqrt_df" 68
+ (and (eq_attr "cpu" "r7000")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "DF")))
+ "rm7_fpdivsqr+(rm7_fpdivsqr_iter*68)")
+
+(define_insn_reservation "rm7_fp_rsqrt_sf" 38
+ (and (eq_attr "cpu" "r7000")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "SF")))
+ "rm7_fpdivsqr+(rm7_fpdivsqr_iter*38)")
+
+(define_insn_reservation "rm7_fp_mpy_sf" 4
+ (and (eq_attr "cpu" "r7000")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "rm7_fpmpy+rm7_fpmpy_iter")
+
+(define_insn_reservation "rm7_fp_mpy_df" 5
+ (and (eq_attr "cpu" "r7000")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "rm7_fpmpy+(rm7_fpmpy_iter*2)")
+
+;; Force single-dispatch for unknown or multi.
+(define_insn_reservation "rm7_unknown" 1
+ (and (eq_attr "cpu" "r7000")
+ (eq_attr "type" "unknown,multi,atomic,syncloop"))
+ "rm7_single_dispatch")
diff --git a/gcc-4.9/gcc/config/mips/74k.md b/gcc-4.9/gcc/config/mips/74k.md
new file mode 100644
index 000000000..e07c190db
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/74k.md
@@ -0,0 +1,501 @@
+;; DFA-based pipeline description for MIPS32 model 74k.
+;; Contributed by MIPS Technologies and CodeSourcery.
+;;
+;; Reference:
+;; "MIPS32 74K Microarchitecure Specification Rev. 01.02 Jun 15, 2006"
+;; "MIPS32 74Kf Processor Core Datasheet Jun 2, 2006"
+;;
+;; Copyright (C) 2007-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "r74k_mdu_pipe, r74k_alu_pipe, r74k_agen_pipe, r74k_fpu")
+(define_cpu_unit "r74k_mul" "r74k_mdu_pipe")
+(define_cpu_unit "r74k_alu" "r74k_alu_pipe")
+(define_cpu_unit "r74k_agen" "r74k_agen_pipe")
+(define_cpu_unit "r74k_fpu_arith" "r74k_fpu")
+(define_cpu_unit "r74k_fpu_ldst" "r74k_fpu")
+
+;; --------------------------------------------------------------
+;; Producers
+;; --------------------------------------------------------------
+
+;; ALU: Logicals/Arithmetics
+;; - Logicals, move (addu/addiu with rt = 0), Set less than,
+;; sign extend - 1 cycle
+(define_insn_reservation "r74k_int_logical" 1
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "logical,move,signext,slt"))
+ "r74k_alu")
+
+;; - Arithmetics - 2 cycles
+(define_insn_reservation "r74k_int_arith" 2
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "arith,const,shift,clz"))
+ "r74k_alu")
+
+(define_insn_reservation "r74k_int_nop" 0
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "nop"))
+ "nothing")
+
+(define_insn_reservation "r74k_int_cmove" 4
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "condmove"))
+ "r74k_agen*2")
+
+;; MDU: fully pipelined multiplier
+;; mult - delivers result to hi/lo in 4 cycle (pipelined)
+(define_insn_reservation "r74k_int_mult" 4
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "imul"))
+ "r74k_alu+r74k_mul")
+
+;; madd, msub - delivers result to hi/lo in 4 cycle (pipelined)
+(define_insn_reservation "r74k_int_madd" 4
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "imadd"))
+ "r74k_alu+r74k_mul")
+
+;; mul - delivers result to general register in 7 cycles
+(define_insn_reservation "r74k_int_mul3" 7
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "imul3"))
+ "r74k_alu+r74k_mul")
+
+;; mfhi, mflo, mflhxu - deliver result to gpr in 7 cycles
+(define_insn_reservation "r74k_int_mfhilo" 7
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "mfhi,mflo"))
+ "r74k_alu+r74k_mul")
+
+;; mthi, mtlo, mtlhx - deliver result to hi/lo, thence madd, handled as bypass
+(define_insn_reservation "r74k_int_mthilo" 7
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "mthi,mtlo"))
+ "r74k_alu+r74k_mul")
+
+;; div - default to 50 cycles for 32bit operands. Faster for 8 bit,
+;; but is tricky to identify.
+(define_insn_reservation "r74k_int_div" 50
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "idiv"))
+ "r74k_alu+r74k_mul*50")
+
+;; call
+(define_insn_reservation "r74k_int_call" 1
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "call"))
+ "r74k_agen")
+
+;; branch/jump
+(define_insn_reservation "r74k_int_jump" 1
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "branch,jump"))
+ "r74k_agen")
+
+;; loads: lb, lbu, lh, lhu, ll, lw, lwl, lwr, lwpc, lwxs
+;; prefetch: prefetch, prefetchx
+(define_insn_reservation "r74k_int_load" 3
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "load,prefetch,prefetchx"))
+ "r74k_agen")
+
+;; stores
+(define_insn_reservation "r74k_int_store" 1
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "store"))
+ "r74k_agen")
+
+
+;; Unknowns - Currently these include blockage, consttable and alignment
+;; rtls. They do not really affect scheduling latency, (blockage
+;; affects scheduling via log links, but not used here).
+;;
+(define_insn_reservation "r74k_unknown" 1
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "unknown,atomic,syncloop"))
+ "r74k_alu")
+
+(define_insn_reservation "r74k_multi" 10
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "multi"))
+ "(r74k_alu+r74k_agen)*10")
+
+;; --------------------------------------------------------------
+;; Bypass to Consumer
+;; --------------------------------------------------------------
+
+;; load->next use : 3 cycles (Default)
+;; load->load base: 4 cycles
+;; load->store base: 4 cycles
+(define_bypass 4 "r74k_int_load" "r74k_int_load")
+(define_bypass 4 "r74k_int_load" "r74k_int_store" "!mips_store_data_bypass_p")
+
+;; logical/move/slt/signext->next use : 1 cycles (Default)
+;; logical/move/slt/signext->load base: 2 cycles
+;; logical/move/slt/signext->store base: 2 cycles
+(define_bypass 2 "r74k_int_logical" "r74k_int_load")
+(define_bypass 2 "r74k_int_logical" "r74k_int_store"
+ "!mips_store_data_bypass_p")
+
+;; arith->next use : 2 cycles (Default)
+;; arith->load base: 3 cycles
+;; arith->store base: 3 cycles
+(define_bypass 3 "r74k_int_arith" "r74k_int_load")
+(define_bypass 3 "r74k_int_arith" "r74k_int_store" "!mips_store_data_bypass_p")
+
+;; cmove->next use : 4 cycles (Default)
+;; cmove->load base: 5 cycles
+;; cmove->store base: 5 cycles
+(define_bypass 5 "r74k_int_cmove" "r74k_int_load")
+(define_bypass 5 "r74k_int_cmove" "r74k_int_store"
+ "!mips_store_data_bypass_p")
+
+;; mult/madd/msub->int_mfhilo : 4 cycles (default)
+;; mult->madd/msub : 1 cycles
+;; madd/msub->madd/msub : 1 cycles
+(define_bypass 1 "r74k_int_mult,r74k_int_mul3" "r74k_int_madd"
+ "mips_linked_madd_p")
+(define_bypass 1 "r74k_int_madd" "r74k_int_madd"
+ "mips_linked_madd_p")
+
+
+;; --------------------------------------------------------------
+;; DSP instructions
+;; --------------------------------------------------------------
+
+;; Non-saturating insn have the same latency as normal ALU operations,
+(define_insn_reservation "r74k_dsp_alu" 2
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "dspalu"))
+ "r74k_alu")
+
+;; Saturating insn takes an extra cycle.
+(define_insn_reservation "r74k_dsp_alu_sat" 3
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "dspalusat"))
+ "r74k_alu")
+
+;; dpaq_s, dpau, dpsq_s, dpsu, maq_s, mulsaq
+;; - delivers result to hi/lo in 6 cycle (bypass at M4)
+(define_insn_reservation "r74k_dsp_mac" 6
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "dspmac"))
+ "r74k_alu+r74k_mul")
+
+;; dpaq_sa, dpsq_sa, maq_sa
+;; - delivers result to hi/lo in 7 cycle (bypass at WB)
+(define_insn_reservation "r74k_dsp_mac_sat" 7
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "dspmacsat"))
+ "r74k_alu+r74k_mul")
+
+;; extp, extpdp, extpdpv, extpv, extr, extrv
+;; - same latency as "mul"
+(define_insn_reservation "r74k_dsp_acc_ext" 7
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "accext"))
+ "r74k_alu+r74k_mul")
+
+;; mthlip, shilo, shilov
+;; - same latency as "mul"
+(define_insn_reservation "r74k_dsp_acc_mod" 7
+ (and (eq_attr "cpu" "74kc,74kf2_1,74kf1_1,74kf3_2")
+ (eq_attr "type" "accmod"))
+ "r74k_alu+r74k_mul")
+
+;; dspalu ->load/store base
+;; dspalusat->load/store base
+;; - we should never see these in real life.
+
+;; dsp_mac->dsp_mac : 1 cycles (repeat rate of 1)
+;; dsp_mac->dsp_mac_sat : 1 cycles (repeat rate of 1)
+(define_bypass 1 "r74k_dsp_mac" "r74k_dsp_mac")
+(define_bypass 1 "r74k_dsp_mac" "r74k_dsp_mac_sat")
+
+;; dsp_mac_sat->dsp_mac_sat : 2 cycles (repeat rate of 2)
+;; dsp_mac_sat->dsp_mac : 2 cycles (repeat rate of 2)
+(define_bypass 2 "r74k_dsp_mac_sat" "r74k_dsp_mac_sat")
+(define_bypass 2 "r74k_dsp_mac_sat" "r74k_dsp_mac")
+
+(define_bypass 1 "r74k_int_mult" "r74k_dsp_mac")
+(define_bypass 1 "r74k_int_mult" "r74k_dsp_mac_sat")
+
+(define_bypass 1 "r74k_int_mul3" "r74k_dsp_mac" "mips_linked_madd_p")
+(define_bypass 1 "r74k_int_mul3" "r74k_dsp_mac_sat" "mips_linked_madd_p")
+
+;; Assuming the following is true (bypass at M4)
+;; AP AF AM MB M1 M2 M3 M4 WB GR GC
+;; AP AF AM MB M1 M2 M3 M4 WB GR GC
+;; dsp_mac->dsp_acc_ext : 4 cycles
+;; dsp_mac->dsp_acc_mod : 4 cycles
+(define_bypass 4 "r74k_dsp_mac" "r74k_dsp_acc_ext")
+(define_bypass 4 "r74k_dsp_mac" "r74k_dsp_acc_mod")
+
+;; Assuming the following is true (bypass at WB)
+;; AP AF AM MB M1 M2 M3 M4 WB GR GC
+;; AP AF AM MB M1 M2 M3 M4 WB GR GC
+;; dsp_mac_sat->dsp_acc_ext : 5 cycles
+;; dsp_mac_sat->dsp_acc_mod : 5 cycles
+(define_bypass 5 "r74k_dsp_mac_sat" "r74k_dsp_acc_ext")
+(define_bypass 5 "r74k_dsp_mac_sat" "r74k_dsp_acc_mod")
+
+
+;; --------------------------------------------------------------
+;; Floating Point Instructions
+;; --------------------------------------------------------------
+
+;; 74Kf FPU runs at 1:1 or 2:1 core/FPU clock ratio.
+
+;; fadd, fabs, fneg,
+(define_insn_reservation "r74kf1_1_fadd" 4
+ (and (eq_attr "cpu" "74kf1_1")
+ (eq_attr "type" "fadd,fabs,fneg"))
+ "r74k_fpu_arith")
+
+(define_insn_reservation "r74kf2_1_fadd" 8
+ (and (eq_attr "cpu" "74kf2_1")
+ (eq_attr "type" "fadd,fabs,fneg"))
+ "r74k_fpu_arith*2")
+
+(define_insn_reservation "r74kf3_2_fadd" 6
+ (and (eq_attr "cpu" "74kf3_2")
+ (eq_attr "type" "fadd,fabs,fneg"))
+ "r74k_fpu_arith")
+
+;; fmove, fcmove
+(define_insn_reservation "r74kf1_1_fmove" 4
+ (and (eq_attr "cpu" "74kf1_1")
+ (eq_attr "type" "fmove"))
+ "r74k_fpu_arith")
+
+(define_insn_reservation "r74kf2_1_fmove" 8
+ (and (eq_attr "cpu" "74kf2_1")
+ (eq_attr "type" "fmove"))
+ "r74k_fpu_arith*2")
+
+(define_insn_reservation "r74kf3_2_fmove" 6
+ (and (eq_attr "cpu" "74kf3_2")
+ (eq_attr "type" "fmove"))
+ "r74k_fpu_arith")
+
+;; fload
+(define_insn_reservation "r74kf1_1_fload" 4
+ (and (eq_attr "cpu" "74kf1_1")
+ (eq_attr "type" "fpload,fpidxload"))
+ "r74k_agen+r74k_fpu_ldst")
+
+(define_insn_reservation "r74kf2_1_fload" 8
+ (and (eq_attr "cpu" "74kf2_1")
+ (eq_attr "type" "fpload,fpidxload"))
+ "r74k_agen+(r74k_fpu_ldst*2)")
+
+(define_insn_reservation "r74kf3_2_fload" 6
+ (and (eq_attr "cpu" "74kf3_2")
+ (eq_attr "type" "fpload,fpidxload"))
+ "r74k_agen+r74k_fpu_ldst")
+
+;; fstore
+(define_insn_reservation "r74kf1_1_fstore" 1
+ (and (eq_attr "cpu" "74kf1_1")
+ (eq_attr "type" "fpstore,fpidxstore"))
+ "r74k_agen+r74k_fpu_ldst")
+
+(define_insn_reservation "r74kf2_1_fstore" 2
+ (and (eq_attr "cpu" "74kf2_1")
+ (eq_attr "type" "fpstore,fpidxstore"))
+ "r74k_agen+(r74k_fpu_ldst*2)")
+
+(define_insn_reservation "r74kf3_2_fstore" 1
+ (and (eq_attr "cpu" "74kf3_2")
+ (eq_attr "type" "fpstore,fpidxstore"))
+ "r74k_agen+r74k_fpu_ldst")
+
+;; fmul, fmadd
+(define_insn_reservation "r74kf1_1_fmul_sf" 4
+ (and (eq_attr "cpu" "74kf1_1")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "r74k_fpu_arith")
+
+(define_insn_reservation "r74kf2_1_fmul_sf" 8
+ (and (eq_attr "cpu" "74kf2_1")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "r74k_fpu_arith*2")
+
+(define_insn_reservation "r74kf3_2_fmul_sf" 6
+ (and (eq_attr "cpu" "74kf3_2")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "r74k_fpu_arith")
+
+(define_insn_reservation "r74kf1_1_fmul_df" 5
+ (and (eq_attr "cpu" "74kf1_1")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "r74k_fpu_arith*2")
+
+(define_insn_reservation "r74kf2_1_fmul_df" 10
+ (and (eq_attr "cpu" "74kf2_1")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "r74k_fpu_arith*4")
+
+(define_insn_reservation "r74kf3_2_fmul_df" 7
+ (and (eq_attr "cpu" "74kf3_2")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "r74k_fpu_arith*2")
+
+;; fdiv, fsqrt
+(define_insn_reservation "r74kf1_1_fdiv_sf" 17
+ (and (eq_attr "cpu" "74kf1_1")
+ (and (eq_attr "type" "fdiv,fsqrt")
+ (eq_attr "mode" "SF")))
+ "r74k_fpu_arith*14")
+
+(define_insn_reservation "r74kf2_1_fdiv_sf" 34
+ (and (eq_attr "cpu" "74kf2_1")
+ (and (eq_attr "type" "fdiv,fsqrt")
+ (eq_attr "mode" "SF")))
+ "r74k_fpu_arith*28")
+
+(define_insn_reservation "r74kf3_2_fdiv_sf" 25
+ (and (eq_attr "cpu" "74kf3_2")
+ (and (eq_attr "type" "fdiv,fsqrt")
+ (eq_attr "mode" "SF")))
+ "r74k_fpu_arith*14")
+
+(define_insn_reservation "r74kf1_1_fdiv_df" 32
+ (and (eq_attr "cpu" "74kf1_1")
+ (and (eq_attr "type" "fdiv,fsqrt")
+ (eq_attr "mode" "DF")))
+ "r74k_fpu_arith*29")
+
+(define_insn_reservation "r74kf2_1_fdiv_df" 64
+ (and (eq_attr "cpu" "74kf2_1")
+ (and (eq_attr "type" "fdiv,fsqrt")
+ (eq_attr "mode" "DF")))
+ "r74k_fpu_arith*58")
+
+(define_insn_reservation "r74kf3_2_fdiv_df" 48
+ (and (eq_attr "cpu" "74kf3_2")
+ (and (eq_attr "type" "fdiv,fsqrt")
+ (eq_attr "mode" "DF")))
+ "r74k_fpu_arith*29")
+
+;; frsqrt
+(define_insn_reservation "r74kf1_1_frsqrt_sf" 17
+ (and (eq_attr "cpu" "74kf1_1")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "SF")))
+ "r74k_fpu_arith*14")
+
+(define_insn_reservation "r74kf2_1_frsqrt_sf" 34
+ (and (eq_attr "cpu" "74kf2_1")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "SF")))
+ "r74k_fpu_arith*28")
+
+(define_insn_reservation "r74kf3_2_frsqrt_sf" 25
+ (and (eq_attr "cpu" "74kf3_2")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "SF")))
+ "r74k_fpu_arith*14")
+
+(define_insn_reservation "r74kf1_1_frsqrt_df" 36
+ (and (eq_attr "cpu" "74kf1_1")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "DF")))
+ "r74k_fpu_arith*31")
+
+(define_insn_reservation "r74kf2_1_frsqrt_df" 72
+ (and (eq_attr "cpu" "74kf2_1")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "DF")))
+ "r74k_fpu_arith*62")
+
+(define_insn_reservation "r74kf3_2_frsqrt_df" 54
+ (and (eq_attr "cpu" "74kf3_2")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "DF")))
+ "r74k_fpu_arith*31")
+
+;; fcmp
+(define_insn_reservation "r74kf1_1_fcmp" 4
+ (and (eq_attr "cpu" "74kf1_1")
+ (eq_attr "type" "fcmp"))
+ "r74k_fpu_arith")
+
+(define_insn_reservation "r74kf2_1_fcmp" 8
+ (and (eq_attr "cpu" "74kf2_1")
+ (eq_attr "type" "fcmp"))
+ "r74k_fpu_arith*2")
+
+(define_insn_reservation "r74kf3_2_fcmp" 6
+ (and (eq_attr "cpu" "74kf3_2")
+ (eq_attr "type" "fcmp"))
+ "r74k_fpu_arith")
+
+;; fcvt
+(define_insn_reservation "r74kf1_1_fcvt" 4
+ (and (eq_attr "cpu" "74kf1_1")
+ (eq_attr "type" "fcvt"))
+ "r74k_fpu_arith")
+
+(define_insn_reservation "r74kf2_1_fcvt" 8
+ (and (eq_attr "cpu" "74kf2_1")
+ (eq_attr "type" "fcvt"))
+ "r74k_fpu_arith*2")
+
+(define_insn_reservation "r74kf3_2_fcvt" 6
+ (and (eq_attr "cpu" "74kf3_2")
+ (eq_attr "type" "fcvt"))
+ "r74k_fpu_arith")
+
+;; fxfer (MTC1, DMTC1: latency is 4) (MFC1, DMFC1: latency is 1)
+(define_insn_reservation "r74kf1_1_fxfer_to_c1" 4
+ (and (eq_attr "cpu" "74kf1_1")
+ (eq_attr "type" "mtc"))
+ "r74k_fpu_arith")
+
+(define_insn_reservation "r74kf2_1_fxfer_to_c1" 8
+ (and (eq_attr "cpu" "74kf2_1")
+ (eq_attr "type" "mtc"))
+ "r74k_fpu_arith*2")
+
+(define_insn_reservation "r74kf3_2_fxfer_to_c1" 6
+ (and (eq_attr "cpu" "74kf3_2")
+ (eq_attr "type" "mtc"))
+ "r74k_fpu_arith")
+
+(define_insn_reservation "r74kf1_1_fxfer_from_c1" 1
+ (and (eq_attr "cpu" "74kf1_1")
+ (eq_attr "type" "mfc"))
+ "r74k_fpu_arith")
+
+(define_insn_reservation "r74kf2_1_fxfer_from_c1" 2
+ (and (eq_attr "cpu" "74kf2_1")
+ (eq_attr "type" "mfc"))
+ "r74k_fpu_arith*2")
+
+(define_insn_reservation "r74kf3_2_fxfer_from_c1" 1
+ (and (eq_attr "cpu" "74kf3_2")
+ (eq_attr "type" "mfc"))
+ "r74k_fpu_arith")
diff --git a/gcc-4.9/gcc/config/mips/9000.md b/gcc-4.9/gcc/config/mips/9000.md
new file mode 100644
index 000000000..efc646f5a
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/9000.md
@@ -0,0 +1,151 @@
+;; DFA-based pipeline description for the RM9000.
+;; Copyright (C) 2003-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "rm9k_main, rm9k_imul, rm9k_fdiv")
+
+;; These units are for insns that can issue in either pipe. We don't
+;; want to use constructs like "rm9k_m | rm9k_f_int" since that would
+;; needlessly make an insn prefer the M pipe.
+(define_cpu_unit "rm9k_any1" "rm9k_main")
+(define_cpu_unit "rm9k_any2" "rm9k_main")
+
+;; F and M pipe units, for instructions that must be issued by a
+;; particular pipe. Split the F pipe into two units so that integer
+;; instructions can issue while the FPU is busy. We don't need to
+;; split M because it is only ever reserved for a single cycle.
+(define_cpu_unit "rm9k_m" "rm9k_main")
+(define_cpu_unit "rm9k_f_int" "rm9k_main")
+(define_cpu_unit "rm9k_f_float" "rm9k_main")
+
+(exclusion_set "rm9k_f_int" "rm9k_f_float")
+
+;; Multiply/divide units.
+(define_cpu_unit "rm9k_imul" "rm9k_imul")
+(define_cpu_unit "rm9k_fdiv" "rm9k_fdiv")
+
+(define_insn_reservation "rm9k_load" 3
+ (and (eq_attr "cpu" "r9000")
+ (eq_attr "type" "load,fpload,fpidxload"))
+ "rm9k_m")
+
+(define_insn_reservation "rm9k_store" 1
+ (and (eq_attr "cpu" "r9000")
+ (eq_attr "type" "store,fpstore,fpidxstore"))
+ "rm9k_m")
+
+(define_insn_reservation "rm9k_int" 1
+ (and (eq_attr "cpu" "r9000")
+ (eq_attr "type" "arith,shift,signext,slt,clz,const,logical,move,nop,trap"))
+ "rm9k_any1 | rm9k_any2")
+
+(define_insn_reservation "rm9k_int_cmove" 2
+ (and (eq_attr "cpu" "r9000")
+ (and (eq_attr "type" "condmove")
+ (eq_attr "mode" "SI,DI")))
+ "rm9k_any1 | rm9k_any2")
+
+;; This applies to both 'mul' and 'mult'.
+(define_insn_reservation "rm9k_mulsi" 3
+ (and (eq_attr "cpu" "r9000")
+ (and (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "!DI")))
+ "rm9k_f_int")
+
+(define_insn_reservation "rm9k_muldi" 7
+ (and (eq_attr "cpu" "r9000")
+ (and (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "DI")))
+ "rm9k_f_int + rm9k_imul * 7")
+
+(define_insn_reservation "rm9k_divsi" 38
+ (and (eq_attr "cpu" "r9000")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "!DI")))
+ "rm9k_f_int + rm9k_imul * 38")
+
+(define_insn_reservation "rm9k_divdi" 70
+ (and (eq_attr "cpu" "r9000")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "rm9k_f_int + rm9k_imul * 70")
+
+(define_insn_reservation "rm9k_mfhilo" 1
+ (and (eq_attr "cpu" "r9000")
+ (eq_attr "type" "mfhi,mflo"))
+ "rm9k_f_int")
+
+(define_insn_reservation "rm9k_mthilo" 5
+ (and (eq_attr "cpu" "r9000")
+ (eq_attr "type" "mthi,mtlo"))
+ "rm9k_f_int")
+
+(define_insn_reservation "rm9k_xfer" 2
+ (and (eq_attr "cpu" "r9000")
+ (eq_attr "type" "mfc,mtc"))
+ "rm9k_m")
+
+(define_insn_reservation "rm9k_fquick" 2
+ (and (eq_attr "cpu" "r9000")
+ (eq_attr "type" "fabs,fneg,fcmp,fmove"))
+ "rm9k_f_float")
+
+(define_insn_reservation "rm9k_fcmove" 2
+ (and (eq_attr "cpu" "r9000")
+ (and (eq_attr "type" "condmove")
+ (eq_attr "mode" "SF,DF")))
+ "rm9k_m")
+
+(define_insn_reservation "rm9k_fadd" 6
+ (and (eq_attr "cpu" "r9000")
+ (eq_attr "type" "fadd,fcvt"))
+ "rm9k_f_float")
+
+(define_insn_reservation "rm9k_fmuls" 6
+ (and (eq_attr "cpu" "r9000")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "rm9k_f_float")
+
+(define_insn_reservation "rm9k_fmuld" 9
+ (and (eq_attr "cpu" "r9000")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "rm9k_f_float * 3")
+
+(define_insn_reservation "rm9k_fdivs" 22
+ (and (eq_attr "cpu" "r9000")
+ (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt")
+ (eq_attr "mode" "SF")))
+ "rm9k_f_float + rm9k_fdiv * 22")
+
+(define_insn_reservation "rm9k_fdivd" 37
+ (and (eq_attr "cpu" "r9000")
+ (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt")
+ (eq_attr "mode" "DF")))
+ "rm9k_f_float + rm9k_fdiv * 37")
+
+(define_insn_reservation "rm9k_branch" 2
+ (and (eq_attr "cpu" "r9000")
+ (eq_attr "type" "branch,jump,call"))
+ "rm9k_any1 | rm9k_any2")
+
+(define_insn_reservation "rm9k_unknown" 1
+ (and (eq_attr "cpu" "r9000")
+ (eq_attr "type" "unknown,multi,atomic,syncloop"))
+ "rm9k_m + rm9k_f_int + rm9k_any1 + rm9k_any2")
diff --git a/gcc-4.9/gcc/config/mips/constraints.md b/gcc-4.9/gcc/config/mips/constraints.md
new file mode 100644
index 000000000..49e48954f
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/constraints.md
@@ -0,0 +1,363 @@
+;; Constraint definitions for MIPS.
+;; Copyright (C) 2006-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Register constraints
+
+(define_register_constraint "d" "BASE_REG_CLASS"
+ "An address register. This is equivalent to @code{r} unless
+ generating MIPS16 code.")
+
+(define_register_constraint "t" "T_REG"
+ "@internal")
+
+(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
+ "A floating-point register (if available).")
+
+(define_register_constraint "h" "NO_REGS"
+ "Formerly the @code{hi} register. This constraint is no longer supported.")
+
+(define_register_constraint "l" "TARGET_BIG_ENDIAN ? MD1_REG : MD0_REG"
+ "The @code{lo} register. Use this register to store values that are
+ no bigger than a word.")
+
+(define_register_constraint "x" "MD_REGS"
+ "The concatenated @code{hi} and @code{lo} registers. Use this register
+ to store doubleword values.")
+
+(define_register_constraint "b" "ALL_REGS"
+ "@internal")
+
+(define_register_constraint "u" "M16_REGS"
+ "@internal")
+
+;; MIPS16 code always calls through a MIPS16 register; see mips_emit_call_insn
+;; for details.
+(define_register_constraint "c" "TARGET_MIPS16 ? M16_REGS
+ : TARGET_USE_PIC_FN_ADDR_REG ? PIC_FN_ADDR_REG
+ : GR_REGS"
+ "A register suitable for use in an indirect jump. This will always be
+ @code{$25} for @option{-mabicalls}.")
+
+(define_register_constraint "e" "LEA_REGS"
+ "@internal")
+
+(define_register_constraint "j" "PIC_FN_ADDR_REG"
+ "@internal")
+
+;; Don't use this constraint in gcc code! It runs the risk of
+;; introducing a spill failure; see tls_get_tp_<mode>.
+(define_register_constraint "v" "V1_REG"
+ "Register @code{$3}. Do not use this constraint in new code;
+ it is retained only for compatibility with glibc.")
+
+(define_register_constraint "y" "GR_REGS"
+ "Equivalent to @code{r}; retained for backwards compatibility.")
+
+(define_register_constraint "z" "ST_REGS"
+ "A floating-point condition code register.")
+
+(define_register_constraint "A" "DSP_ACC_REGS"
+ "@internal")
+
+(define_register_constraint "a" "ACC_REGS"
+ "@internal")
+
+(define_register_constraint "B" "COP0_REGS"
+ "@internal")
+
+(define_register_constraint "C" "COP2_REGS"
+ "@internal")
+
+(define_register_constraint "D" "COP3_REGS"
+ "@internal")
+
+;; Registers that can be used as the target of multiply-accumulate
+;; instructions. The core MIPS32 ISA provides a hi/lo madd,
+;; but the DSP version allows any accumulator target.
+(define_register_constraint "ka" "ISA_HAS_DSP_MULT ? ACC_REGS : MD_REGS")
+
+(define_constraint "kf"
+ "@internal"
+ (match_operand 0 "force_to_mem_operand"))
+
+;; This is a normal rather than a register constraint because we can
+;; never use the stack pointer as a reload register.
+(define_constraint "ks"
+ "@internal"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == STACK_POINTER_REGNUM")))
+
+;; Integer constraints
+
+(define_constraint "I"
+ "A signed 16-bit constant (for arithmetic instructions)."
+ (and (match_code "const_int")
+ (match_test "SMALL_OPERAND (ival)")))
+
+(define_constraint "J"
+ "Integer zero."
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "K"
+ "An unsigned 16-bit constant (for logic instructions)."
+ (and (match_code "const_int")
+ (match_test "SMALL_OPERAND_UNSIGNED (ival)")))
+
+(define_constraint "L"
+ "A signed 32-bit constant in which the lower 16 bits are zero.
+ Such constants can be loaded using @code{lui}."
+ (and (match_code "const_int")
+ (match_test "LUI_OPERAND (ival)")))
+
+(define_constraint "M"
+ "A constant that cannot be loaded using @code{lui}, @code{addiu}
+ or @code{ori}."
+ (and (match_code "const_int")
+ (not (match_test "SMALL_OPERAND (ival)"))
+ (not (match_test "SMALL_OPERAND_UNSIGNED (ival)"))
+ (not (match_test "LUI_OPERAND (ival)"))))
+
+(define_constraint "N"
+ "A constant in the range -65535 to -1 (inclusive)."
+ (and (match_code "const_int")
+ (match_test "ival >= -0xffff && ival < 0")))
+
+(define_constraint "O"
+ "A signed 15-bit constant."
+ (and (match_code "const_int")
+ (match_test "ival >= -0x4000 && ival < 0x4000")))
+
+(define_constraint "P"
+ "A constant in the range 1 to 65535 (inclusive)."
+ (and (match_code "const_int")
+ (match_test "ival > 0 && ival < 0x10000")))
+
+;; Floating-point constraints
+
+(define_constraint "G"
+ "Floating-point zero."
+ (and (match_code "const_double")
+ (match_test "op == CONST0_RTX (mode)")))
+
+;; General constraints
+
+(define_constraint "Q"
+ "@internal"
+ (match_operand 0 "const_arith_operand"))
+
+(define_memory_constraint "R"
+ "An address that can be used in a non-macro load or store."
+ (and (match_code "mem")
+ (match_test "mips_address_insns (XEXP (op, 0), mode, false) == 1")))
+
+(define_constraint "S"
+ "@internal
+ A constant call address."
+ (and (match_operand 0 "call_insn_operand")
+ (match_test "CONSTANT_P (op)")))
+
+(define_constraint "Udb7"
+ "@internal
+ A decremented unsigned constant of 7 bits."
+ (match_operand 0 "db7_operand"))
+
+(define_constraint "Udb8"
+ "@internal
+ A decremented unsigned constant of 8 bits."
+ (match_operand 0 "db8_operand"))
+
+(define_constraint "Uead"
+ "@internal
+ A microMIPS encoded ADDIUR2 immediate operand."
+ (match_operand 0 "addiur2_operand"))
+
+(define_constraint "Uean"
+ "@internal
+ A microMIPS encoded ANDI operand."
+ (match_operand 0 "andi16_operand"))
+
+(define_constraint "Uesp"
+ "@internal
+ A microMIPS encoded ADDIUSP operand."
+ (match_operand 0 "addiusp_operand"))
+
+(define_constraint "Uib3"
+ "@internal
+ An unsigned, incremented constant of 3 bits."
+ (match_operand 0 "ib3_operand"))
+
+(define_constraint "Usb4"
+ "@internal
+ A signed constant of 4 bits."
+ (match_operand 0 "sb4_operand"))
+
+(define_constraint "Usb5"
+ "@internal
+ A signed constant of 5 bits."
+ (match_operand 0 "sb5_operand"))
+
+(define_constraint "Usb8"
+ "@internal
+ A signed constant of 8 bits."
+ (match_operand 0 "sb8_operand"))
+
+(define_constraint "Usd8"
+ "@internal
+ A signed constant of 8 bits, shifted left three places."
+ (match_operand 0 "sd8_operand"))
+
+(define_constraint "Uub8"
+ "@internal
+ An unsigned constant of 8 bits."
+ (match_operand 0 "ub8_operand"))
+
+(define_constraint "Uuw5"
+ "@internal
+ An unsigned constant of 5 bits, shifted left two places."
+ (match_operand 0 "uw5_operand"))
+
+(define_constraint "Uuw6"
+ "@internal
+ An unsigned constant of 6 bits, shifted left two places."
+ (match_operand 0 "uw6_operand"))
+
+(define_constraint "Uuw8"
+ "@internal
+ An unsigned constant of 8 bits, shifted left two places."
+ (match_operand 0 "uw8_operand"))
+
+(define_memory_constraint "W"
+ "@internal
+ A memory address based on a member of @code{BASE_REG_CLASS}. This is
+ true for all non-mips16 references (although it can sometimes be implicit
+ if @samp{!TARGET_EXPLICIT_RELOCS}). For MIPS16, it excludes stack and
+ constant-pool references."
+ (and (match_code "mem")
+ (match_operand 0 "memory_operand")
+ (ior (not (match_test "TARGET_MIPS16"))
+ (and (not (match_operand 0 "stack_operand"))
+ (not (match_test "CONSTANT_P (XEXP (op, 0))"))))))
+
+(define_constraint "YG"
+ "@internal
+ A vector zero."
+ (and (match_code "const_vector")
+ (match_test "op == CONST0_RTX (mode)")))
+
+(define_constraint "YA"
+ "@internal
+ An unsigned 6-bit constant."
+ (and (match_code "const_int")
+ (match_test "UIMM6_OPERAND (ival)")))
+
+(define_constraint "YB"
+ "@internal
+ A signed 10-bit constant."
+ (and (match_code "const_int")
+ (match_test "IMM10_OPERAND (ival)")))
+
+(define_constraint "Yb"
+ "@internal"
+ (match_operand 0 "qi_mask_operand"))
+
+(define_constraint "Yd"
+ "@internal
+ A constant @code{move_operand} that can be safely loaded into @code{$25}
+ using @code{la}."
+ (and (match_operand 0 "move_operand")
+ (match_test "CONSTANT_P (op)")
+ (not (match_test "mips_dangerous_for_la25_p (op)"))))
+
+(define_constraint "Yf"
+ "@internal
+ A constant @code{move_operand} that cannot be safely loaded into @code{$25}
+ using @code{la}."
+ (and (match_operand 0 "move_operand")
+ (match_test "CONSTANT_P (op)")
+ (match_test "mips_dangerous_for_la25_p (op)")))
+
+(define_constraint "Yh"
+ "@internal"
+ (match_operand 0 "hi_mask_operand"))
+
+(define_constraint "Yw"
+ "@internal"
+ (match_operand 0 "si_mask_operand"))
+
+(define_constraint "Yx"
+ "@internal"
+ (match_operand 0 "low_bitmask_operand"))
+
+(define_memory_constraint "ZC"
+ "When compiling microMIPS code, this constraint matches a memory operand
+ whose address is formed from a base register and a 12-bit offset. These
+ operands can be used for microMIPS instructions such as @code{ll} and
+ @code{sc}. When not compiling for microMIPS code, @code{ZC} is
+ equivalent to @code{R}."
+ (and (match_code "mem")
+ (if_then_else
+ (match_test "TARGET_MICROMIPS")
+ (match_test "umips_12bit_offset_address_p (XEXP (op, 0), mode)")
+ (match_test "mips_address_insns (XEXP (op, 0), mode, false)"))))
+
+(define_address_constraint "ZD"
+ "When compiling microMIPS code, this constraint matches an address operand
+ that is formed from a base register and a 12-bit offset. These operands
+ can be used for microMIPS instructions such as @code{prefetch}. When
+ not compiling for microMIPS code, @code{ZD} is equivalent to @code{p}."
+ (if_then_else (match_test "TARGET_MICROMIPS")
+ (match_test "umips_12bit_offset_address_p (op, mode)")
+ (match_test "mips_address_insns (op, mode, false)")))
+
+(define_memory_constraint "ZR"
+ "@internal
+ An address valid for loading/storing register exclusive"
+ (match_operand 0 "mem_noofs_operand"))
+
+(define_memory_constraint "ZS"
+ "@internal
+ A microMIPS memory operand for use with the LWSP/SWSP insns."
+ (and (match_code "mem")
+ (match_operand 0 "lwsp_swsp_operand")))
+
+(define_memory_constraint "ZT"
+ "@internal
+ A microMIPS memory operand for use with the LW16/SW16 insns."
+ (and (match_code "mem")
+ (match_operand 0 "lw16_sw16_operand")))
+
+(define_memory_constraint "ZU"
+ "@internal
+ A microMIPS memory operand for use with the LHU16/SH16 insns."
+ (and (match_code "mem")
+ (match_operand 0 "lhu16_sh16_operand")))
+
+(define_memory_constraint "ZV"
+ "@internal
+ A microMIPS memory operand for use with the SB16 insn."
+ (and (match_code "mem")
+ (match_operand 0 "sb16_operand")))
+
+(define_memory_constraint "ZW"
+ "@internal
+ A microMIPS memory operand for use with the LBU16 insn."
+ (and (match_code "mem")
+ (match_operand 0 "lbu16_operand")))
+
diff --git a/gcc-4.9/gcc/config/mips/driver-native.c b/gcc-4.9/gcc/config/mips/driver-native.c
new file mode 100644
index 000000000..4c2a658c7
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/driver-native.c
@@ -0,0 +1,89 @@
+/* Subroutines for the gcc driver.
+ Copyright (C) 2008-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+
+/* This will be called by the spec parser in gcc.c when it sees
+ a %:local_cpu_detect(args) construct. Currently it will be called
+ with either "arch" or "tune" as argument depending on if -march=native
+ or -mtune=native is to be substituted.
+
+ It returns a string containing new command line parameters to be
+ put at the place of the above two options, depending on what CPU
+ this is executed. E.g. "-march=loongson2f" on a Loongson 2F for
+ -march=native. If the routine can't detect a known processor,
+ the -march or -mtune option is discarded.
+
+ ARGC and ARGV are set depending on the actual arguments given
+ in the spec. */
+const char *
+host_detect_local_cpu (int argc, const char **argv)
+{
+ const char *cpu = NULL;
+ char buf[128];
+ FILE *f;
+ bool arch;
+
+ if (argc < 1)
+ return NULL;
+
+ arch = strcmp (argv[0], "arch") == 0;
+ if (!arch && strcmp (argv[0], "tune"))
+ return NULL;
+
+ f = fopen ("/proc/cpuinfo", "r");
+ if (f == NULL)
+ return NULL;
+
+ while (fgets (buf, sizeof (buf), f) != NULL)
+ if (strncmp (buf, "cpu model", sizeof ("cpu model") - 1) == 0)
+ {
+ if (strstr (buf, "Godson2 V0.2") != NULL
+ || strstr (buf, "Loongson-2 V0.2") != NULL
+ || strstr (buf, "Loongson-2E") != NULL)
+ cpu = "loongson2e";
+ else if (strstr (buf, "Godson2 V0.3") != NULL
+ || strstr (buf, "Loongson-2 V0.3") != NULL
+ || strstr (buf, "Loongson-2F") != NULL)
+ cpu = "loongson2f";
+ else if (strstr (buf, "Godson3 V0.5") != NULL
+ || strstr (buf, "Loongson-3 V0.5") != NULL
+ || strstr (buf, "Loongson-3A") != NULL)
+ cpu = "loongson3a";
+ else if (strstr (buf, "SiByte SB1") != NULL)
+ cpu = "sb1";
+ else if (strstr (buf, "R5000") != NULL)
+ cpu = "r5000";
+ else if (strstr (buf, "Octeon II") != NULL)
+ cpu = "octeon2";
+ else if (strstr (buf, "Octeon") != NULL)
+ cpu = "octeon";
+ break;
+ }
+
+ fclose (f);
+
+ if (cpu == NULL)
+ return NULL;
+
+ return concat ("-m", argv[0], "=", cpu, NULL);
+}
diff --git a/gcc-4.9/gcc/config/mips/elf.h b/gcc-4.9/gcc/config/mips/elf.h
new file mode 100644
index 000000000..95f107d5e
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/elf.h
@@ -0,0 +1,50 @@
+/* Target macros for mips*-elf targets.
+ Copyright (C) 1994-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* MIPS assemblers don't have the usual .set foo,bar construct;
+ .set is used for assembler options instead. */
+#undef SET_ASM_OP
+#define ASM_OUTPUT_DEF(FILE, LABEL1, LABEL2) \
+ do \
+ { \
+ fputc ('\t', FILE); \
+ assemble_name (FILE, LABEL1); \
+ fputs (" = ", FILE); \
+ assemble_name (FILE, LABEL2); \
+ fputc ('\n', FILE); \
+ } \
+ while (0)
+
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME mips_declare_object_name
+
+#undef ASM_FINISH_DECLARE_OBJECT
+#define ASM_FINISH_DECLARE_OBJECT mips_finish_declare_object
+
+/* Leave the linker script to choose the appropriate libraries. */
+#undef LIB_SPEC
+#define LIB_SPEC ""
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crti%O%s crtbegin%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s crtn%O%s"
+
+#define NO_IMPLICIT_EXTERN_C 1
diff --git a/gcc-4.9/gcc/config/mips/elfoabi.h b/gcc-4.9/gcc/config/mips/elfoabi.h
new file mode 100644
index 000000000..d88a79c0b
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/elfoabi.h
@@ -0,0 +1,39 @@
+/* Target macros for mips*-elf targets that selected between o32 and o64
+ based on the target architecture.
+ Copyright (C) 1994-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS \
+ /* Make sure a -mips option is present. This helps us to pick \
+ the right multilib, and also makes the later specs easier \
+ to write. */ \
+ MIPS_ISA_LEVEL_SPEC, \
+ \
+ /* If no ABI option is specified, infer one from the ISA level \
+ or -mgp setting. */ \
+ "%{!mabi=*: %{" MIPS_32BIT_OPTION_SPEC ": -mabi=32;: -mabi=o64}}", \
+ \
+ /* Remove a redundant -mfp64 for -mabi=o64; we want the !mfp64 \
+ multilibs. There's no need to check whether the architecture \
+ is 64-bit; cc1 will complain if it isn't. */ \
+ "%{mabi=o64: %<mfp64}", \
+ \
+ /* Configuration-independent MIPS rules.*/ \
+ BASE_DRIVER_SELF_SPECS
+
diff --git a/gcc-4.9/gcc/config/mips/elforion.h b/gcc-4.9/gcc/config/mips/elforion.h
new file mode 100644
index 000000000..4ba39af6e
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/elforion.h
@@ -0,0 +1,20 @@
+/* Definitions of target machine for GNU compiler. MIPS ORION version.
+ Copyright (C) 1994-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#define MIPS_CPU_STRING_DEFAULT "orion"
diff --git a/gcc-4.9/gcc/config/mips/generic.md b/gcc-4.9/gcc/config/mips/generic.md
new file mode 100644
index 000000000..b5e2840e2
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/generic.md
@@ -0,0 +1,121 @@
+;; Generic DFA-based pipeline description for MIPS targets
+;; Copyright (C) 2004-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; This file is derived from the old define_function_unit description.
+;; Each reservation can be overridden on a processor-by-processor basis.
+
+(define_insn_reservation "generic_alu" 1
+ (eq_attr "type" "unknown,prefetch,prefetchx,condmove,const,arith,
+ shift,slt,clz,trap,multi,nop,logical,signext,move")
+ "alu")
+
+(define_insn_reservation "generic_load" 3
+ (eq_attr "type" "load,fpload,fpidxload")
+ "alu")
+
+(define_insn_reservation "generic_store" 1
+ (eq_attr "type" "store,fpstore,fpidxstore")
+ "alu")
+
+(define_insn_reservation "generic_xfer" 2
+ (eq_attr "type" "mfc,mtc")
+ "alu")
+
+(define_insn_reservation "generic_branch" 1
+ (eq_attr "type" "branch,jump,call")
+ "alu")
+
+(define_insn_reservation "generic_hilo" 1
+ (eq_attr "type" "mfhi,mflo,mthi,mtlo")
+ "imuldiv*3")
+
+(define_insn_reservation "generic_imul" 17
+ (eq_attr "type" "imul,imul3,imadd")
+ "imuldiv*17")
+
+(define_insn_reservation "generic_idiv" 38
+ (eq_attr "type" "idiv")
+ "imuldiv*38")
+
+(define_insn_reservation "generic_fcvt" 1
+ (eq_attr "type" "fcvt")
+ "alu")
+
+(define_insn_reservation "generic_fmove" 2
+ (eq_attr "type" "fabs,fneg,fmove")
+ "alu")
+
+(define_insn_reservation "generic_fcmp" 3
+ (eq_attr "type" "fcmp")
+ "alu")
+
+(define_insn_reservation "generic_fadd" 4
+ (eq_attr "type" "fadd")
+ "alu")
+
+(define_insn_reservation "generic_fmul_single" 7
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF"))
+ "alu")
+
+(define_insn_reservation "generic_fmul_double" 8
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF"))
+ "alu")
+
+(define_insn_reservation "generic_fdiv_single" 23
+ (and (eq_attr "type" "fdiv,frdiv")
+ (eq_attr "mode" "SF"))
+ "alu")
+
+(define_insn_reservation "generic_fdiv_double" 36
+ (and (eq_attr "type" "fdiv,frdiv")
+ (eq_attr "mode" "DF"))
+ "alu")
+
+(define_insn_reservation "generic_fsqrt_single" 54
+ (and (eq_attr "type" "fsqrt,frsqrt")
+ (eq_attr "mode" "SF"))
+ "alu")
+
+(define_insn_reservation "generic_fsqrt_double" 112
+ (and (eq_attr "type" "fsqrt,frsqrt")
+ (eq_attr "mode" "DF"))
+ "alu")
+
+(define_insn_reservation "generic_frecip_fsqrt_step" 5
+ (eq_attr "type" "frdiv1,frdiv2,frsqrt1,frsqrt2")
+ "alu")
+
+(define_insn_reservation "generic_atomic" 10
+ (eq_attr "type" "atomic")
+ "alu")
+
+;; Sync loop consists of (in order)
+;; (1) optional sync,
+;; (2) LL instruction,
+;; (3) branch and 1-2 ALU instructions,
+;; (4) SC instruction,
+;; (5) branch and ALU instruction.
+;; The net result of this reservation is a big delay with a flush of
+;; ALU pipeline.
+(define_insn_reservation "generic_sync_loop" 40
+ (eq_attr "type" "syncloop")
+ "alu*39")
diff --git a/gcc-4.9/gcc/config/mips/genopt.sh b/gcc-4.9/gcc/config/mips/genopt.sh
new file mode 100755
index 000000000..457d5bb82
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/genopt.sh
@@ -0,0 +1,123 @@
+#!/bin/sh
+# Generate mips-tables.opt from the list of CPUs in mips-cpus.def.
+# Copyright (C) 2011-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+cat <<EOF
+; -*- buffer-read-only: t -*-
+; Generated automatically by genopt.sh from mips-cpus.def.
+
+; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+Enum
+Name(mips_arch_opt_value) Type(int)
+Known MIPS CPUs (for use with the -march= and -mtune= options):
+
+Enum
+Name(mips_mips_opt_value) Type(int)
+Known MIPS ISA levels (for use with the -mips option):
+
+EnumValue
+Enum(mips_arch_opt_value) String(from-abi) Value(MIPS_ARCH_OPTION_FROM_ABI)
+
+EnumValue
+Enum(mips_arch_opt_value) String(native) Value(MIPS_ARCH_OPTION_NATIVE) DriverOnly
+
+EOF
+
+awk -F'[(, ]+' '
+BEGIN {
+ value = 0
+}
+
+# Write an entry for a single string accepted as a -march= argument.
+
+function write_one_arch_value(name, value, flags)
+{
+ print "EnumValue"
+ print "Enum(mips_arch_opt_value) String(" name ") Value(" value ")" flags
+ print ""
+ if (name ~ "^mips") {
+ sub("^mips", "", name)
+ print "EnumValue"
+ print "Enum(mips_mips_opt_value) String(" name ") Value(" value ")"
+ print ""
+ }
+}
+
+# The logic for matching CPU name variants should be the same as in GAS.
+
+# Write an entry for a single string accepted as a -march= argument,
+# plus any variant with a final "000" replaced by "k".
+
+function write_arch_value_maybe_k(name, value, flags)
+{
+ write_one_arch_value(name, value, flags)
+ if (name ~ "000$") {
+ sub("000$", "k", name)
+ write_one_arch_value(name, value, "")
+ }
+}
+
+# Write all the entries for a -march= argument. In addition to
+# replacement of a final "000" with "k", an argument starting with
+# "vr", "rm" or "r" followed by a number, or just a plain number,
+# matches a plain number or "r" followed by a plain number.
+
+function write_all_arch_values(name, value)
+{
+ write_arch_value_maybe_k(name, value, " Canonical")
+ cname = name
+ if (cname ~ "^vr") {
+ sub("^vr", "", cname)
+ } else if (cname ~ "^rm") {
+ sub("^rm", "", cname)
+ } else if (cname ~ "^r") {
+ sub("^r", "", cname)
+ }
+ if (cname ~ "^[0-9]") {
+ if (cname != name)
+ write_arch_value_maybe_k(cname, value, "")
+ rname = "r" cname
+ if (rname != name)
+ write_arch_value_maybe_k(rname, value, "")
+ }
+}
+
+/^MIPS_CPU/ {
+ name = $2
+ gsub("\"", "", name)
+ write_all_arch_values(name, value)
+ value++
+}' $1/mips-cpus.def
diff --git a/gcc-4.9/gcc/config/mips/gnu-user.h b/gcc-4.9/gcc/config/mips/gnu-user.h
new file mode 100644
index 000000000..638d7f0f5
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/gnu-user.h
@@ -0,0 +1,139 @@
+/* Definitions for MIPS systems using GNU userspace.
+ Copyright (C) 1998-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME mips_declare_object_name
+
+/* If we don't set MASK_ABICALLS, we can't default to PIC. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT MASK_ABICALLS
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ GNU_USER_TARGET_OS_CPP_BUILTINS(); \
+ /* The GNU C++ standard library requires this. */ \
+ if (c_dialect_cxx ()) \
+ builtin_define ("_GNU_SOURCE"); \
+ } while (0)
+
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
+
+/* A standard GNU/Linux mapping. On most targets, it is included in
+ CC1_SPEC itself by config/linux.h, but mips.h overrides CC1_SPEC
+ and provides this hook instead. */
+#undef SUBTARGET_CC1_SPEC
+#define SUBTARGET_CC1_SPEC GNU_USER_TARGET_CC1_SPEC
+
+/* -G is incompatible with -KPIC which is the default, so only allow objects
+ in the small data section if the user explicitly asks for it. */
+#undef MIPS_DEFAULT_GVALUE
+#define MIPS_DEFAULT_GVALUE 0
+
+/* Borrowed from sparc/linux.h */
+#undef GNU_USER_TARGET_LINK_SPEC
+#define GNU_USER_TARGET_LINK_SPEC \
+ "%(endian_spec) \
+ %{shared:-shared} \
+ %{!shared: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ -dynamic-linker " GNU_USER_DYNAMIC_LINKER "} \
+ %{static:-static}}"
+#undef LINK_SPEC
+#define LINK_SPEC GNU_USER_TARGET_LINK_SPEC
+
+#undef SUBTARGET_ASM_SPEC
+#define SUBTARGET_ASM_SPEC \
+ "%{!mno-abicalls:%{mplt:-call_nonpic;:-KPIC}}"
+
+/* The MIPS assembler has different syntax for .set. We set it to
+ .dummy to trap any errors. */
+#undef SET_ASM_OP
+#define SET_ASM_OP "\t.dummy\t"
+
+#undef ASM_OUTPUT_DEF
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { \
+ fputc ( '\t', FILE); \
+ assemble_name (FILE, LABEL1); \
+ fputs ( " = ", FILE); \
+ assemble_name (FILE, LABEL2); \
+ fputc ( '\n', FILE); \
+ } while (0)
+
+/* The glibc _mcount stub will save $v0 for us. Don't mess with saving
+ it, since ASM_OUTPUT_REG_PUSH/ASM_OUTPUT_REG_POP do not work in the
+ presence of $gp-relative calls. */
+#undef ASM_OUTPUT_REG_PUSH
+#undef ASM_OUTPUT_REG_POP
+
+#undef LIB_SPEC
+#define LIB_SPEC GNU_USER_TARGET_LIB_SPEC
+
+#ifdef HAVE_AS_NO_SHARED
+/* Default to -mno-shared for non-PIC. */
+# define NO_SHARED_SPECS \
+ " %{mshared|mno-shared|fpic|fPIC|fpie|fPIE:;:-mno-shared}"
+#else
+# define NO_SHARED_SPECS ""
+#endif
+
+/* -march=native handling only makes sense with compiler running on
+ a MIPS chip. */
+#if defined(__mips__)
+extern const char *host_detect_local_cpu (int argc, const char **argv);
+# define EXTRA_SPEC_FUNCTIONS \
+ { "local_cpu_detect", host_detect_local_cpu },
+
+# define MARCH_MTUNE_NATIVE_SPECS \
+ " %{march=native:%<march=native %:local_cpu_detect(arch)}" \
+ " %{mtune=native:%<mtune=native %:local_cpu_detect(tune)}"
+#else
+# define MARCH_MTUNE_NATIVE_SPECS ""
+#endif
+
+#define LINUX_DRIVER_SELF_SPECS \
+ NO_SHARED_SPECS \
+ MARCH_MTUNE_NATIVE_SPECS, \
+ /* -mplt has no effect without -mno-shared. Simplify later \
+ specs handling by removing a redundant option. */ \
+ "%{!mno-shared:%<mplt}", \
+ /* -mplt likewise has no effect for -mabi=64 without -msym32. */ \
+ "%{mabi=64:%{!msym32:%<mplt}}"
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS \
+ BASE_DRIVER_SELF_SPECS, \
+ LINUX_DRIVER_SELF_SPECS
+
+/* Similar to standard Linux, but adding -ffast-math support. */
+#undef GNU_USER_TARGET_MATHFILE_SPEC
+#define GNU_USER_TARGET_MATHFILE_SPEC \
+ "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s}"
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ GNU_USER_TARGET_MATHFILE_SPEC " " \
+ GNU_USER_TARGET_ENDFILE_SPEC
diff --git a/gcc-4.9/gcc/config/mips/gnu-user64.h b/gcc-4.9/gcc/config/mips/gnu-user64.h
new file mode 100644
index 000000000..b97b4a768
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/gnu-user64.h
@@ -0,0 +1,52 @@
+/* Definitions for MIPS systems using GNU userspace and n32/64 abi.
+ Copyright (C) 2002-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Force the default endianness and ABI flags onto the command line
+ in order to make the other specs easier to write. */
+
+#define LINUX64_DRIVER_SELF_SPECS \
+ LINUX_DRIVER_SELF_SPECS \
+ " %{!EB:%{!EL:%(endian_spec)}}" \
+ " %{!mabi=*: -" MULTILIB_ABI_DEFAULT "}"
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS \
+ BASE_DRIVER_SELF_SPECS, \
+ LINUX64_DRIVER_SELF_SPECS
+
+#undef GNU_USER_TARGET_LINK_SPEC
+#define GNU_USER_TARGET_LINK_SPEC "\
+%{G*} %{EB} %{EL} %{mips1} %{mips2} %{mips3} %{mips4} \
+%{shared} \
+ %(endian_spec) \
+ %{!shared: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{mabi=n32: -dynamic-linker " GNU_USER_DYNAMIC_LINKERN32 "} \
+ %{mabi=64: -dynamic-linker " GNU_USER_DYNAMIC_LINKER64 "} \
+ %{mabi=32: -dynamic-linker " GNU_USER_DYNAMIC_LINKER32 "}} \
+ %{static:-static}} \
+%{mabi=n32:-m" GNU_USER_LINK_EMULATIONN32 "} \
+%{mabi=64:-m" GNU_USER_LINK_EMULATION64 "} \
+%{mabi=32:-m" GNU_USER_LINK_EMULATION32 "}"
+#undef LINK_SPEC
+#define LINK_SPEC GNU_USER_TARGET_LINK_SPEC
+
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX (TARGET_OLDABI ? "$" : ".")
diff --git a/gcc-4.9/gcc/config/mips/linux-common.h b/gcc-4.9/gcc/config/mips/linux-common.h
new file mode 100644
index 000000000..e1e977eb9
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/linux-common.h
@@ -0,0 +1,64 @@
+/* Definitions for MIPS running Linux-based GNU systems with ELF format.
+ Copyright (C) 2012-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do { \
+ GNU_USER_TARGET_OS_CPP_BUILTINS(); \
+ /* The GNU C++ standard library requires this. */ \
+ if (c_dialect_cxx ()) \
+ builtin_define ("_GNU_SOURCE"); \
+ ANDROID_TARGET_OS_CPP_BUILTINS(); \
+ } while (0)
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ LINUX_OR_ANDROID_LD (GNU_USER_TARGET_LINK_SPEC, \
+ GNU_USER_TARGET_LINK_SPEC " " ANDROID_LINK_SPEC)
+
+#undef SUBTARGET_CC1_SPEC
+#define SUBTARGET_CC1_SPEC \
+ LINUX_OR_ANDROID_CC (GNU_USER_TARGET_CC1_SPEC, \
+ GNU_USER_TARGET_CC1_SPEC " " ANDROID_CC1_SPEC)
+
+#undef CC1PLUS_SPEC
+#define CC1PLUS_SPEC \
+ LINUX_OR_ANDROID_CC ("", ANDROID_CC1PLUS_SPEC)
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+ LINUX_OR_ANDROID_LD (GNU_USER_TARGET_LIB_SPEC, \
+ GNU_USER_TARGET_NO_PTHREADS_LIB_SPEC " " ANDROID_LIB_SPEC)
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ LINUX_OR_ANDROID_LD (GNU_USER_TARGET_STARTFILE_SPEC, ANDROID_STARTFILE_SPEC)
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ LINUX_OR_ANDROID_LD (GNU_USER_TARGET_MATHFILE_SPEC " " \
+ GNU_USER_TARGET_ENDFILE_SPEC, \
+ GNU_USER_TARGET_MATHFILE_SPEC " " \
+ ANDROID_ENDFILE_SPEC)
+
+/* Define this to be nonzero if static stack checking is supported. */
+#define STACK_CHECK_STATIC_BUILTIN 1
+
+/* The default value isn't sufficient in 64-bit mode. */
+#define STACK_CHECK_PROTECT (TARGET_64BIT ? 16 * 1024 : 12 * 1024)
diff --git a/gcc-4.9/gcc/config/mips/linux.h b/gcc-4.9/gcc/config/mips/linux.h
new file mode 100644
index 000000000..e539422d4
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/linux.h
@@ -0,0 +1,25 @@
+/* Definitions for MIPS running Linux-based GNU systems with ELF format.
+ Copyright (C) 1998-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#define GLIBC_DYNAMIC_LINKER \
+ "%{mnan=2008:/lib/ld-linux-mipsn8.so.1;:/lib/ld.so.1}"
+
+#undef UCLIBC_DYNAMIC_LINKER
+#define UCLIBC_DYNAMIC_LINKER \
+ "%{mnan=2008:/lib/ld-uClibc-mipsn8.so.0;:/lib/ld-uClibc.so.0}"
diff --git a/gcc-4.9/gcc/config/mips/linux64.h b/gcc-4.9/gcc/config/mips/linux64.h
new file mode 100644
index 000000000..7ad3b2af2
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/linux64.h
@@ -0,0 +1,44 @@
+/* Definitions for MIPS running Linux-based GNU systems with ELF format
+ using n32/64 abi.
+ Copyright (C) 2002-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#define GNU_USER_LINK_EMULATION32 "elf32%{EB:b}%{EL:l}tsmip"
+#define GNU_USER_LINK_EMULATION64 "elf64%{EB:b}%{EL:l}tsmip"
+#define GNU_USER_LINK_EMULATIONN32 "elf32%{EB:b}%{EL:l}tsmipn32"
+
+#define GLIBC_DYNAMIC_LINKER32 \
+ "%{mnan=2008:/lib/ld-linux-mipsn8.so.1;:/lib/ld.so.1}"
+#define GLIBC_DYNAMIC_LINKER64 \
+ "%{mnan=2008:/lib64/ld-linux-mipsn8.so.1;:/lib64/ld.so.1}"
+#define GLIBC_DYNAMIC_LINKERN32 \
+ "%{mnan=2008:/lib32/ld-linux-mipsn8.so.1;:/lib32/ld.so.1}"
+
+#undef UCLIBC_DYNAMIC_LINKER32
+#define UCLIBC_DYNAMIC_LINKER32 \
+ "%{mnan=2008:/lib/ld-uClibc-mipsn8.so.0;:/lib/ld-uClibc.so.0}"
+#undef UCLIBC_DYNAMIC_LINKER64
+#define UCLIBC_DYNAMIC_LINKER64 \
+ "%{mnan=2008:/lib/ld64-uClibc-mipsn8.so.0;:/lib/ld64-uClibc.so.0}"
+#define UCLIBC_DYNAMIC_LINKERN32 \
+ "%{mnan=2008:/lib32/ld-uClibc-mipsn8.so.0;:/lib32/ld-uClibc.so.0}"
+
+#define BIONIC_DYNAMIC_LINKERN32 "/system/bin/linker32"
+#define GNU_USER_DYNAMIC_LINKERN32 \
+ CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKERN32, UCLIBC_DYNAMIC_LINKERN32, \
+ BIONIC_DYNAMIC_LINKERN32)
diff --git a/gcc-4.9/gcc/config/mips/loongson.h b/gcc-4.9/gcc/config/mips/loongson.h
new file mode 100644
index 000000000..6e3de0d1d
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/loongson.h
@@ -0,0 +1,690 @@
+/* Intrinsics for ST Microelectronics Loongson-2E/2F SIMD operations.
+
+ Copyright (C) 2008-2014 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GCC_LOONGSON_H
+#define _GCC_LOONGSON_H
+
+#if !defined(__mips_loongson_vector_rev)
+# error "You must select -march=loongson2e or -march=loongson2f to use loongson.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/* Vectors of unsigned bytes, halfwords and words. */
+typedef uint8_t uint8x8_t __attribute__((vector_size (8)));
+typedef uint16_t uint16x4_t __attribute__((vector_size (8)));
+typedef uint32_t uint32x2_t __attribute__((vector_size (8)));
+
+/* Vectors of signed bytes, halfwords and words. */
+typedef int8_t int8x8_t __attribute__((vector_size (8)));
+typedef int16_t int16x4_t __attribute__((vector_size (8)));
+typedef int32_t int32x2_t __attribute__((vector_size (8)));
+
+/* SIMD intrinsics.
+ Unless otherwise noted, calls to the functions below will expand into
+ precisely one machine instruction, modulo any moves required to
+ satisfy register allocation constraints. */
+
+/* Pack with signed saturation. */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+packsswh (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_packsswh (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+packsshb (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_packsshb (s, t);
+}
+
+/* Pack with unsigned saturation. */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+packushb (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_packushb (s, t);
+}
+
+/* Vector addition, treating overflow by wraparound. */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+paddw_u (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_paddw_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+paddh_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_paddh_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+paddb_u (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_paddb_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+paddw_s (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_paddw_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+paddh_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_paddh_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+paddb_s (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_paddb_s (s, t);
+}
+
+/* Addition of doubleword integers, treating overflow by wraparound. */
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+paddd_u (uint64_t s, uint64_t t)
+{
+ return __builtin_loongson_paddd_u (s, t);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+paddd_s (int64_t s, int64_t t)
+{
+ return __builtin_loongson_paddd_s (s, t);
+}
+
+/* Vector addition, treating overflow by signed saturation. */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+paddsh (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_paddsh (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+paddsb (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_paddsb (s, t);
+}
+
+/* Vector addition, treating overflow by unsigned saturation. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+paddush (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_paddush (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+paddusb (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_paddusb (s, t);
+}
+
+/* Logical AND NOT. */
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+pandn_ud (uint64_t s, uint64_t t)
+{
+ return __builtin_loongson_pandn_ud (s, t);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+pandn_uw (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_pandn_uw (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pandn_uh (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pandn_uh (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pandn_ub (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_pandn_ub (s, t);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+pandn_sd (int64_t s, int64_t t)
+{
+ return __builtin_loongson_pandn_sd (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+pandn_sw (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_pandn_sw (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pandn_sh (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pandn_sh (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+pandn_sb (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_pandn_sb (s, t);
+}
+
+/* Average. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pavgh (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pavgh (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pavgb (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_pavgb (s, t);
+}
+
+/* Equality test. */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+pcmpeqw_u (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_pcmpeqw_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pcmpeqh_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pcmpeqh_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pcmpeqb_u (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_pcmpeqb_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+pcmpeqw_s (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_pcmpeqw_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pcmpeqh_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pcmpeqh_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+pcmpeqb_s (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_pcmpeqb_s (s, t);
+}
+
+/* Greater-than test. */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+pcmpgtw_u (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_pcmpgtw_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pcmpgth_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pcmpgth_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pcmpgtb_u (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_pcmpgtb_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+pcmpgtw_s (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_pcmpgtw_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pcmpgth_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pcmpgth_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+pcmpgtb_s (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_pcmpgtb_s (s, t);
+}
+
+/* Extract halfword. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pextrh_u (uint16x4_t s, int field /* 0--3 */)
+{
+ return __builtin_loongson_pextrh_u (s, field);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pextrh_s (int16x4_t s, int field /* 0--3 */)
+{
+ return __builtin_loongson_pextrh_s (s, field);
+}
+
+/* Insert halfword. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pinsrh_0_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pinsrh_0_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pinsrh_1_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pinsrh_1_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pinsrh_2_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pinsrh_2_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pinsrh_3_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pinsrh_3_u (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pinsrh_0_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pinsrh_0_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pinsrh_1_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pinsrh_1_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pinsrh_2_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pinsrh_2_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pinsrh_3_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pinsrh_3_s (s, t);
+}
+
+/* Multiply and add. */
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+pmaddhw (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pmaddhw (s, t);
+}
+
+/* Maximum of signed halfwords. */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pmaxsh (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pmaxsh (s, t);
+}
+
+/* Maximum of unsigned bytes. */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pmaxub (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_pmaxub (s, t);
+}
+
+/* Minimum of signed halfwords. */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pminsh (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pminsh (s, t);
+}
+
+/* Minimum of unsigned bytes. */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pminub (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_pminub (s, t);
+}
+
+/* Move byte mask. */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pmovmskb_u (uint8x8_t s)
+{
+ return __builtin_loongson_pmovmskb_u (s);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+pmovmskb_s (int8x8_t s)
+{
+ return __builtin_loongson_pmovmskb_s (s);
+}
+
+/* Multiply unsigned integers and store high result. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pmulhuh (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pmulhuh (s, t);
+}
+
+/* Multiply signed integers and store high result. */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pmulhh (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pmulhh (s, t);
+}
+
+/* Multiply signed integers and store low result. */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pmullh (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pmullh (s, t);
+}
+
+/* Multiply unsigned word integers. */
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+pmuluw (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_pmuluw (s, t);
+}
+
+/* Absolute difference. */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pasubub (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_pasubub (s, t);
+}
+
+/* Sum of unsigned byte integers. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+biadd (uint8x8_t s)
+{
+ return __builtin_loongson_biadd (s);
+}
+
+/* Sum of absolute differences.
+ Note that this intrinsic expands into two machine instructions:
+ PASUBUB followed by BIADD. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psadbh (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_psadbh (s, t);
+}
+
+/* Shuffle halfwords. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pshufh_u (uint16x4_t dest, uint16x4_t s, uint8_t order)
+{
+ return __builtin_loongson_pshufh_u (s, order);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pshufh_s (int16x4_t dest, int16x4_t s, uint8_t order)
+{
+ return __builtin_loongson_pshufh_s (s, order);
+}
+
+/* Shift left logical. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psllh_u (uint16x4_t s, uint8_t amount)
+{
+ return __builtin_loongson_psllh_u (s, amount);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psllh_s (int16x4_t s, uint8_t amount)
+{
+ return __builtin_loongson_psllh_s (s, amount);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+psllw_u (uint32x2_t s, uint8_t amount)
+{
+ return __builtin_loongson_psllw_u (s, amount);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+psllw_s (int32x2_t s, uint8_t amount)
+{
+ return __builtin_loongson_psllw_s (s, amount);
+}
+
+/* Shift right logical. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psrlh_u (uint16x4_t s, uint8_t amount)
+{
+ return __builtin_loongson_psrlh_u (s, amount);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psrlh_s (int16x4_t s, uint8_t amount)
+{
+ return __builtin_loongson_psrlh_s (s, amount);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+psrlw_u (uint32x2_t s, uint8_t amount)
+{
+ return __builtin_loongson_psrlw_u (s, amount);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+psrlw_s (int32x2_t s, uint8_t amount)
+{
+ return __builtin_loongson_psrlw_s (s, amount);
+}
+
+/* Shift right arithmetic. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psrah_u (uint16x4_t s, uint8_t amount)
+{
+ return __builtin_loongson_psrah_u (s, amount);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psrah_s (int16x4_t s, uint8_t amount)
+{
+ return __builtin_loongson_psrah_s (s, amount);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+psraw_u (uint32x2_t s, uint8_t amount)
+{
+ return __builtin_loongson_psraw_u (s, amount);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+psraw_s (int32x2_t s, uint8_t amount)
+{
+ return __builtin_loongson_psraw_s (s, amount);
+}
+
+/* Vector subtraction, treating overflow by wraparound. */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+psubw_u (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_psubw_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psubh_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_psubh_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+psubb_u (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_psubb_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+psubw_s (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_psubw_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psubh_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_psubh_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+psubb_s (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_psubb_s (s, t);
+}
+
+/* Subtraction of doubleword integers, treating overflow by wraparound. */
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+psubd_u (uint64_t s, uint64_t t)
+{
+ return __builtin_loongson_psubd_u (s, t);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+psubd_s (int64_t s, int64_t t)
+{
+ return __builtin_loongson_psubd_s (s, t);
+}
+
+/* Vector subtraction, treating overflow by signed saturation. */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psubsh (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_psubsh (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+psubsb (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_psubsb (s, t);
+}
+
+/* Vector subtraction, treating overflow by unsigned saturation. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psubush (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_psubush (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+psubusb (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_psubusb (s, t);
+}
+
+/* Unpack high data. */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+punpckhwd_u (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_punpckhwd_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+punpckhhw_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_punpckhhw_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+punpckhbh_u (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_punpckhbh_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+punpckhwd_s (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_punpckhwd_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+punpckhhw_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_punpckhhw_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+punpckhbh_s (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_punpckhbh_s (s, t);
+}
+
+/* Unpack low data. */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+punpcklwd_u (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_punpcklwd_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+punpcklhw_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_punpcklhw_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+punpcklbh_u (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_punpcklbh_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+punpcklwd_s (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_punpcklwd_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+punpcklhw_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_punpcklhw_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+punpcklbh_s (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_punpcklbh_s (s, t);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/gcc-4.9/gcc/config/mips/loongson.md b/gcc-4.9/gcc/config/mips/loongson.md
new file mode 100644
index 000000000..474033d1e
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/loongson.md
@@ -0,0 +1,939 @@
+;; Machine description for Loongson-specific patterns, such as
+;; ST Microelectronics Loongson-2E/2F etc.
+;; Copyright (C) 2008-2014 Free Software Foundation, Inc.
+;; Contributed by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_c_enum "unspec" [
+ UNSPEC_LOONGSON_PAVG
+ UNSPEC_LOONGSON_PCMPEQ
+ UNSPEC_LOONGSON_PCMPGT
+ UNSPEC_LOONGSON_PEXTR
+ UNSPEC_LOONGSON_PINSRH
+ UNSPEC_LOONGSON_VINIT
+ UNSPEC_LOONGSON_PMADD
+ UNSPEC_LOONGSON_PMOVMSK
+ UNSPEC_LOONGSON_PMULHU
+ UNSPEC_LOONGSON_PMULH
+ UNSPEC_LOONGSON_PMULU
+ UNSPEC_LOONGSON_PASUBUB
+ UNSPEC_LOONGSON_BIADD
+ UNSPEC_LOONGSON_PSADBH
+ UNSPEC_LOONGSON_PSHUFH
+ UNSPEC_LOONGSON_PUNPCKH
+ UNSPEC_LOONGSON_PUNPCKL
+ UNSPEC_LOONGSON_PADDD
+ UNSPEC_LOONGSON_PSUBD
+ UNSPEC_LOONGSON_DSLL
+ UNSPEC_LOONGSON_DSRL
+])
+
+;; Mode iterators and attributes.
+
+;; 64-bit vectors of bytes.
+(define_mode_iterator VB [V8QI])
+
+;; 64-bit vectors of halfwords.
+(define_mode_iterator VH [V4HI])
+
+;; 64-bit vectors of words.
+(define_mode_iterator VW [V2SI])
+
+;; 64-bit vectors of halfwords and bytes.
+(define_mode_iterator VHB [V4HI V8QI])
+
+;; 64-bit vectors of words and halfwords.
+(define_mode_iterator VWH [V2SI V4HI])
+
+;; 64-bit vectors of words and bytes
+(define_mode_iterator VWB [V2SI V8QI])
+
+;; 64-bit vectors of words, halfwords and bytes.
+(define_mode_iterator VWHB [V2SI V4HI V8QI])
+
+;; 64-bit vectors of words, halfwords and bytes; and DImode.
+(define_mode_iterator VWHBDI [V2SI V4HI V8QI DI])
+
+;; The Loongson instruction suffixes corresponding to the modes in the
+;; VWHBDI iterator.
+(define_mode_attr V_suffix [(V2SI "w") (V4HI "h") (V8QI "b") (DI "d")])
+
+;; Given a vector type T, the mode of a vector half the size of T
+;; and with the same number of elements.
+(define_mode_attr V_squash [(V2SI "V2HI") (V4HI "V4QI")])
+
+;; Given a vector type T, the mode of a vector the same size as T
+;; but with half as many elements.
+(define_mode_attr V_stretch_half [(V2SI "DI") (V4HI "V2SI") (V8QI "V4HI")])
+
+;; The Loongson instruction suffixes corresponding to the transformation
+;; expressed by V_stretch_half.
+(define_mode_attr V_stretch_half_suffix [(V2SI "wd") (V4HI "hw") (V8QI "bh")])
+
+;; Given a vector type T, the mode of a vector the same size as T
+;; but with twice as many elements.
+(define_mode_attr V_squash_double [(V2SI "V4HI") (V4HI "V8QI")])
+
+;; Given a vector type T, the inner mode.
+(define_mode_attr V_inner [(V8QI "QI") (V4HI "HI") (V2SI "SI")])
+
+;; The Loongson instruction suffixes corresponding to the conversions
+;; specified by V_half_width.
+(define_mode_attr V_squash_double_suffix [(V2SI "wh") (V4HI "hb")])
+
+;; Move patterns.
+
+;; Expander to legitimize moves involving values of vector modes.
+(define_expand "mov<mode>"
+ [(set (match_operand:VWHB 0)
+ (match_operand:VWHB 1))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ if (mips_legitimize_move (<MODE>mode, operands[0], operands[1]))
+ DONE;
+})
+
+;; Handle legitimized moves between values of vector modes.
+(define_insn "mov<mode>_internal"
+ [(set (match_operand:VWHB 0 "nonimmediate_operand" "=m,f,d,f, d, m, d")
+ (match_operand:VWHB 1 "move_operand" "f,m,f,dYG,dYG,dYG,m"))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "fpstore,fpload,mfc,mtc,move,store,load")
+ (set_attr "mode" "DI")])
+
+;; Initialization of a vector.
+
+(define_expand "vec_init<mode>"
+ [(set (match_operand:VWHB 0 "register_operand")
+ (match_operand 1 ""))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ mips_expand_vector_init (operands[0], operands[1]);
+ DONE;
+})
+
+;; Helper for vec_init. Initialize element 0 of the output from the input.
+;; All other elements are undefined.
+(define_insn "loongson_vec_init1_<mode>"
+ [(set (match_operand:VHB 0 "register_operand" "=f")
+ (unspec:VHB [(truncate:<V_inner>
+ (match_operand:DI 1 "reg_or_0_operand" "Jd"))]
+ UNSPEC_LOONGSON_VINIT))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "dmtc1\t%z1,%0"
+ [(set_attr "move_type" "mtc")
+ (set_attr "mode" "DI")])
+
+;; Helper for vec_initv2si.
+(define_insn "*vec_concatv2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=f")
+ (vec_concat:V2SI
+ (match_operand:SI 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "punpcklwd\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+;; Instruction patterns for SIMD instructions.
+
+;; Pack with signed saturation.
+(define_insn "vec_pack_ssat_<mode>"
+ [(set (match_operand:<V_squash_double> 0 "register_operand" "=f")
+ (vec_concat:<V_squash_double>
+ (ss_truncate:<V_squash>
+ (match_operand:VWH 1 "register_operand" "f"))
+ (ss_truncate:<V_squash>
+ (match_operand:VWH 2 "register_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "packss<V_squash_double_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Pack with unsigned saturation.
+(define_insn "vec_pack_usat_<mode>"
+ [(set (match_operand:<V_squash_double> 0 "register_operand" "=f")
+ (vec_concat:<V_squash_double>
+ (us_truncate:<V_squash>
+ (match_operand:VH 1 "register_operand" "f"))
+ (us_truncate:<V_squash>
+ (match_operand:VH 2 "register_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "packus<V_squash_double_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Addition, treating overflow by wraparound.
+(define_insn "add<mode>3"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (plus:VWHB (match_operand:VWHB 1 "register_operand" "f")
+ (match_operand:VWHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "padd<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Addition of doubleword integers stored in FP registers.
+;; Overflow is treated by wraparound.
+;; We use 'unspec' instead of 'plus' here to avoid clash with
+;; mips.md::add<mode>3. If 'plus' was used, then such instruction
+;; would be recognized as adddi3 and reload would make it use
+;; GPRs instead of FPRs.
+(define_insn "loongson_paddd"
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "f")
+ (match_operand:DI 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PADDD))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "paddd\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Addition, treating overflow by signed saturation.
+(define_insn "ssadd<mode>3"
+ [(set (match_operand:VHB 0 "register_operand" "=f")
+ (ss_plus:VHB (match_operand:VHB 1 "register_operand" "f")
+ (match_operand:VHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "padds<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Addition, treating overflow by unsigned saturation.
+(define_insn "usadd<mode>3"
+ [(set (match_operand:VHB 0 "register_operand" "=f")
+ (us_plus:VHB (match_operand:VHB 1 "register_operand" "f")
+ (match_operand:VHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "paddus<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Logical AND NOT.
+(define_insn "loongson_pandn_<V_suffix>"
+ [(set (match_operand:VWHBDI 0 "register_operand" "=f")
+ (and:VWHBDI
+ (not:VWHBDI (match_operand:VWHBDI 1 "register_operand" "f"))
+ (match_operand:VWHBDI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pandn\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Logical AND.
+(define_insn "and<mode>3"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (and:VWHB (match_operand:VWHB 1 "register_operand" "f")
+ (match_operand:VWHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "and\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Logical OR.
+(define_insn "ior<mode>3"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (ior:VWHB (match_operand:VWHB 1 "register_operand" "f")
+ (match_operand:VWHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "or\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+;; Logical XOR.
+(define_insn "xor<mode>3"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (xor:VWHB (match_operand:VWHB 1 "register_operand" "f")
+ (match_operand:VWHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "xor\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Logical NOR.
+(define_insn "*loongson_nor"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (and:VWHB
+ (not:VWHB (match_operand:VWHB 1 "register_operand" "f"))
+ (not:VWHB (match_operand:VWHB 2 "register_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "nor\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Logical NOT.
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (not:VWHB (match_operand:VWHB 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "nor\t%0,%1,%1"
+ [(set_attr "type" "fmul")])
+
+;; Average.
+(define_insn "loongson_pavg<V_suffix>"
+ [(set (match_operand:VHB 0 "register_operand" "=f")
+ (unspec:VHB [(match_operand:VHB 1 "register_operand" "f")
+ (match_operand:VHB 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PAVG))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pavg<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Equality test.
+(define_insn "loongson_pcmpeq<V_suffix>"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (unspec:VWHB [(match_operand:VWHB 1 "register_operand" "f")
+ (match_operand:VWHB 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PCMPEQ))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pcmpeq<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Greater-than test.
+(define_insn "loongson_pcmpgt<V_suffix>"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (unspec:VWHB [(match_operand:VWHB 1 "register_operand" "f")
+ (match_operand:VWHB 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PCMPGT))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pcmpgt<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Extract halfword.
+(define_insn "loongson_pextrh"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PEXTR))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pextrh\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+;; Insert halfword.
+(define_insn "loongson_pinsrh_0"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 4) (const_int 1)
+ (const_int 2) (const_int 3)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pinsrh_0\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_pinsrh_1"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 4)
+ (const_int 2) (const_int 3)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pinsrh_1\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_pinsrh_2"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 4) (const_int 3)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pinsrh_2\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_pinsrh_3"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 4)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pinsrh_3\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "*vec_setv4hi"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")
+ (match_operand:SI 3 "const_0_to_3_operand" "")]
+ UNSPEC_LOONGSON_PINSRH))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pinsrh_%3\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_expand "vec_setv4hi"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:HI 2 "register_operand" "f")
+ (match_operand:SI 3 "const_0_to_3_operand" "")]
+ UNSPEC_LOONGSON_PINSRH))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ rtx ext = gen_reg_rtx (SImode);
+ emit_move_insn (ext, gen_lowpart (SImode, operands[1]));
+ operands[1] = ext;
+})
+
+;; Multiply and add packed integers.
+(define_insn "loongson_pmaddhw"
+ [(set (match_operand:V2SI 0 "register_operand" "=f")
+ (unspec:V2SI [(match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PMADD))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pmaddhw\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+(define_expand "sdot_prodv4hi"
+ [(match_operand:V2SI 0 "register_operand" "")
+ (match_operand:V4HI 1 "register_operand" "")
+ (match_operand:V4HI 2 "register_operand" "")
+ (match_operand:V2SI 3 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ rtx t = gen_reg_rtx (V2SImode);
+ emit_insn (gen_loongson_pmaddhw (t, operands[1], operands[2]));
+ emit_insn (gen_addv2si3 (operands[0], t, operands[3]));
+ DONE;
+})
+
+;; Maximum of signed halfwords.
+(define_insn "smaxv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (smax:V4HI (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pmaxsh\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+(define_expand "smax<mode>3"
+ [(match_operand:VWB 0 "register_operand" "")
+ (match_operand:VWB 1 "register_operand" "")
+ (match_operand:VWB 2 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ mips_expand_vec_minmax (operands[0], operands[1], operands[2],
+ gen_loongson_pcmpgt<V_suffix>, false);
+ DONE;
+})
+
+;; Maximum of unsigned bytes.
+(define_insn "umaxv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (umax:V8QI (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pmaxub\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Minimum of signed halfwords.
+(define_insn "sminv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (smin:V4HI (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pminsh\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+(define_expand "smin<mode>3"
+ [(match_operand:VWB 0 "register_operand" "")
+ (match_operand:VWB 1 "register_operand" "")
+ (match_operand:VWB 2 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ mips_expand_vec_minmax (operands[0], operands[1], operands[2],
+ gen_loongson_pcmpgt<V_suffix>, true);
+ DONE;
+})
+
+;; Minimum of unsigned bytes.
+(define_insn "uminv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (umin:V8QI (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pminub\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Move byte mask.
+(define_insn "loongson_pmovmsk<V_suffix>"
+ [(set (match_operand:VB 0 "register_operand" "=f")
+ (unspec:VB [(match_operand:VB 1 "register_operand" "f")]
+ UNSPEC_LOONGSON_PMOVMSK))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pmovmsk<V_suffix>\t%0,%1"
+ [(set_attr "type" "fabs")])
+
+;; Multiply unsigned integers and store high result.
+(define_insn "umul<mode>3_highpart"
+ [(set (match_operand:VH 0 "register_operand" "=f")
+ (unspec:VH [(match_operand:VH 1 "register_operand" "f")
+ (match_operand:VH 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PMULHU))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pmulhu<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Multiply signed integers and store high result.
+(define_insn "smul<mode>3_highpart"
+ [(set (match_operand:VH 0 "register_operand" "=f")
+ (unspec:VH [(match_operand:VH 1 "register_operand" "f")
+ (match_operand:VH 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PMULH))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pmulh<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Multiply signed integers and store low result.
+(define_insn "mul<mode>3"
+ [(set (match_operand:VH 0 "register_operand" "=f")
+ (mult:VH (match_operand:VH 1 "register_operand" "f")
+ (match_operand:VH 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pmull<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Multiply unsigned word integers.
+(define_insn "loongson_pmulu<V_suffix>"
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (unspec:DI [(match_operand:VW 1 "register_operand" "f")
+ (match_operand:VW 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PMULU))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pmulu<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Absolute difference.
+(define_insn "loongson_pasubub"
+ [(set (match_operand:VB 0 "register_operand" "=f")
+ (unspec:VB [(match_operand:VB 1 "register_operand" "f")
+ (match_operand:VB 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PASUBUB))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pasubub\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Sum of unsigned byte integers.
+(define_insn "loongson_biadd"
+ [(set (match_operand:<V_stretch_half> 0 "register_operand" "=f")
+ (unspec:<V_stretch_half> [(match_operand:VB 1 "register_operand" "f")]
+ UNSPEC_LOONGSON_BIADD))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "biadd\t%0,%1"
+ [(set_attr "type" "fabs")])
+
+(define_insn "reduc_uplus_v8qi"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (unspec:V8QI [(match_operand:V8QI 1 "register_operand" "f")]
+ UNSPEC_LOONGSON_BIADD))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "biadd\t%0,%1"
+ [(set_attr "type" "fabs")])
+
+;; Sum of absolute differences.
+(define_insn "loongson_psadbh"
+ [(set (match_operand:<V_stretch_half> 0 "register_operand" "=f")
+ (unspec:<V_stretch_half> [(match_operand:VB 1 "register_operand" "f")
+ (match_operand:VB 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PSADBH))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pasubub\t%0,%1,%2;biadd\t%0,%0"
+ [(set_attr "type" "fadd")])
+
+;; Shuffle halfwords.
+(define_insn "loongson_pshufh"
+ [(set (match_operand:VH 0 "register_operand" "=f")
+ (unspec:VH [(match_operand:VH 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PSHUFH))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "pshufh\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Shift left logical.
+(define_insn "ashl<mode>3"
+ [(set (match_operand:VWH 0 "register_operand" "=f")
+ (ashift:VWH (match_operand:VWH 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "psll<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+;; Shift right arithmetic.
+(define_insn "ashr<mode>3"
+ [(set (match_operand:VWH 0 "register_operand" "=f")
+ (ashiftrt:VWH (match_operand:VWH 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "psra<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+;; Shift right logical.
+(define_insn "lshr<mode>3"
+ [(set (match_operand:VWH 0 "register_operand" "=f")
+ (lshiftrt:VWH (match_operand:VWH 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "psrl<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+;; Subtraction, treating overflow by wraparound.
+(define_insn "sub<mode>3"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (minus:VWHB (match_operand:VWHB 1 "register_operand" "f")
+ (match_operand:VWHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "psub<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Subtraction of doubleword integers stored in FP registers.
+;; Overflow is treated by wraparound.
+;; See loongson_paddd for the reason we use 'unspec' rather than
+;; 'minus' here.
+(define_insn "loongson_psubd"
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "f")
+ (match_operand:DI 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PSUBD))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "psubd\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Subtraction, treating overflow by signed saturation.
+(define_insn "sssub<mode>3"
+ [(set (match_operand:VHB 0 "register_operand" "=f")
+ (ss_minus:VHB (match_operand:VHB 1 "register_operand" "f")
+ (match_operand:VHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "psubs<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Subtraction, treating overflow by unsigned saturation.
+(define_insn "ussub<mode>3"
+ [(set (match_operand:VHB 0 "register_operand" "=f")
+ (us_minus:VHB (match_operand:VHB 1 "register_operand" "f")
+ (match_operand:VHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "psubus<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Unpack high data. Recall that Loongson only runs in little-endian.
+(define_insn "loongson_punpckhbh"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (vec_select:V8QI
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f"))
+ (parallel [(const_int 4) (const_int 12)
+ (const_int 5) (const_int 13)
+ (const_int 6) (const_int 14)
+ (const_int 7) (const_int 15)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "punpckhbh\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_punpckhhw"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 2) (const_int 6)
+ (const_int 3) (const_int 7)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "punpckhhw\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_punpckhhw_qi"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (vec_select:V8QI
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f"))
+ (parallel [(const_int 4) (const_int 5)
+ (const_int 12) (const_int 13)
+ (const_int 6) (const_int 7)
+ (const_int 14) (const_int 15)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "punpckhhw\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_punpckhwd"
+ [(set (match_operand:V2SI 0 "register_operand" "=f")
+ (vec_select:V2SI
+ (vec_concat:V4SI
+ (match_operand:V2SI 1 "register_operand" "f")
+ (match_operand:V2SI 2 "register_operand" "f"))
+ (parallel [(const_int 1) (const_int 3)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "punpckhwd\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+(define_insn "loongson_punpckhwd_qi"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (vec_select:V8QI
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f"))
+ (parallel [(const_int 4) (const_int 5)
+ (const_int 6) (const_int 7)
+ (const_int 12) (const_int 13)
+ (const_int 14) (const_int 15)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "punpckhwd\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+(define_insn "loongson_punpckhwd_hi"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 2) (const_int 3)
+ (const_int 6) (const_int 7)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "punpckhwd\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+;; Unpack low data.
+(define_insn "loongson_punpcklbh"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (vec_select:V8QI
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 8)
+ (const_int 1) (const_int 9)
+ (const_int 2) (const_int 10)
+ (const_int 3) (const_int 11)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "punpcklbh\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_punpcklhw"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 4)
+ (const_int 1) (const_int 5)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "punpcklhw\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "*loongson_punpcklhw_qi"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (vec_select:V8QI
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 8) (const_int 9)
+ (const_int 2) (const_int 3)
+ (const_int 10) (const_int 11)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "punpcklhw\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_punpcklwd"
+ [(set (match_operand:V2SI 0 "register_operand" "=f")
+ (vec_select:V2SI
+ (vec_concat:V4SI
+ (match_operand:V2SI 1 "register_operand" "f")
+ (match_operand:V2SI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 2)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "punpcklwd\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+(define_insn "*loongson_punpcklwd_qi"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (vec_select:V8QI
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)
+ (const_int 8) (const_int 9)
+ (const_int 10) (const_int 11)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "punpcklwd\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+(define_insn "*loongson_punpcklwd_hi"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 4) (const_int 5)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "punpcklwd\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+(define_expand "vec_perm_const<mode>"
+ [(match_operand:VWHB 0 "register_operand" "")
+ (match_operand:VWHB 1 "register_operand" "")
+ (match_operand:VWHB 2 "register_operand" "")
+ (match_operand:VWHB 3 "" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ if (mips_expand_vec_perm_const (operands))
+ DONE;
+ else
+ FAIL;
+})
+
+(define_expand "vec_unpacks_lo_<mode>"
+ [(match_operand:<V_stretch_half> 0 "register_operand" "")
+ (match_operand:VHB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ mips_expand_vec_unpack (operands, false, false);
+ DONE;
+})
+
+(define_expand "vec_unpacks_hi_<mode>"
+ [(match_operand:<V_stretch_half> 0 "register_operand" "")
+ (match_operand:VHB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ mips_expand_vec_unpack (operands, false, true);
+ DONE;
+})
+
+(define_expand "vec_unpacku_lo_<mode>"
+ [(match_operand:<V_stretch_half> 0 "register_operand" "")
+ (match_operand:VHB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ mips_expand_vec_unpack (operands, true, false);
+ DONE;
+})
+
+(define_expand "vec_unpacku_hi_<mode>"
+ [(match_operand:<V_stretch_half> 0 "register_operand" "")
+ (match_operand:VHB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ mips_expand_vec_unpack (operands, true, true);
+ DONE;
+})
+
+;; Whole vector shifts, used for reduction epilogues.
+(define_insn "vec_shl_<mode>"
+ [(set (match_operand:VWHBDI 0 "register_operand" "=f")
+ (unspec:VWHBDI [(match_operand:VWHBDI 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_DSLL))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "dsll\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+(define_insn "vec_shr_<mode>"
+ [(set (match_operand:VWHBDI 0 "register_operand" "=f")
+ (unspec:VWHBDI [(match_operand:VWHBDI 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_DSRL))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+ "dsrl\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+(define_expand "reduc_uplus_<mode>"
+ [(match_operand:VWH 0 "register_operand" "")
+ (match_operand:VWH 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ mips_expand_vec_reduc (operands[0], operands[1], gen_add<mode>3);
+ DONE;
+})
+
+; ??? Given that we're not describing a widening reduction, we should
+; not have separate optabs for signed and unsigned.
+(define_expand "reduc_splus_<mode>"
+ [(match_operand:VWHB 0 "register_operand" "")
+ (match_operand:VWHB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ emit_insn (gen_reduc_uplus_<mode>(operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "reduc_smax_<mode>"
+ [(match_operand:VWHB 0 "register_operand" "")
+ (match_operand:VWHB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ mips_expand_vec_reduc (operands[0], operands[1], gen_smax<mode>3);
+ DONE;
+})
+
+(define_expand "reduc_smin_<mode>"
+ [(match_operand:VWHB 0 "register_operand" "")
+ (match_operand:VWHB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ mips_expand_vec_reduc (operands[0], operands[1], gen_smin<mode>3);
+ DONE;
+})
+
+(define_expand "reduc_umax_<mode>"
+ [(match_operand:VB 0 "register_operand" "")
+ (match_operand:VB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ mips_expand_vec_reduc (operands[0], operands[1], gen_umax<mode>3);
+ DONE;
+})
+
+(define_expand "reduc_umin_<mode>"
+ [(match_operand:VB 0 "register_operand" "")
+ (match_operand:VB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
+{
+ mips_expand_vec_reduc (operands[0], operands[1], gen_umin<mode>3);
+ DONE;
+})
+
+;; Integer division and modulus. For integer multiplication, see mips.md.
+
+(define_insn "<u>div<mode>3"
+ [(set (match_operand:GPR 0 "register_operand" "=&d")
+ (any_div:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "register_operand" "d")))]
+ "TARGET_LOONGSON_2EF || TARGET_LOONGSON_3A"
+ {
+ if (TARGET_LOONGSON_2EF)
+ return mips_output_division ("<d>div<u>.g\t%0,%1,%2", operands);
+ else
+ return mips_output_division ("gs<d>div<u>\t%0,%1,%2", operands);
+ }
+ [(set_attr "type" "idiv3")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "<u>mod<mode>3"
+ [(set (match_operand:GPR 0 "register_operand" "=&d")
+ (any_mod:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "register_operand" "d")))]
+ "TARGET_LOONGSON_2EF || TARGET_LOONGSON_3A"
+ {
+ if (TARGET_LOONGSON_2EF)
+ return mips_output_division ("<d>mod<u>.g\t%0,%1,%2", operands);
+ else
+ return mips_output_division ("gs<d>mod<u>\t%0,%1,%2", operands);
+ }
+ [(set_attr "type" "idiv3")
+ (set_attr "mode" "<MODE>")])
diff --git a/gcc-4.9/gcc/config/mips/loongson2ef.md b/gcc-4.9/gcc/config/mips/loongson2ef.md
new file mode 100644
index 000000000..0f71d0bb6
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/loongson2ef.md
@@ -0,0 +1,252 @@
+;; Pipeline model for ST Microelectronics Loongson-2E/2F cores.
+
+;; Copyright (C) 2008-2014 Free Software Foundation, Inc.
+;; Contributed by CodeSourcery.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_c_enum "unspec" [
+ UNSPEC_LOONGSON_ALU1_TURN_ENABLED_INSN
+ UNSPEC_LOONGSON_ALU2_TURN_ENABLED_INSN
+ UNSPEC_LOONGSON_FALU1_TURN_ENABLED_INSN
+ UNSPEC_LOONGSON_FALU2_TURN_ENABLED_INSN
+])
+
+;; Automaton for integer instructions.
+(define_automaton "ls2_alu")
+
+;; ALU1 and ALU2.
+;; We need to query these units to adjust round-robin counter.
+(define_query_cpu_unit "ls2_alu1_core,ls2_alu2_core" "ls2_alu")
+
+;; Pseudo units to help modeling of ALU1/2 round-robin dispatch strategy.
+(define_cpu_unit "ls2_alu1_turn,ls2_alu2_turn" "ls2_alu")
+
+;; Pseudo units to enable/disable ls2_alu[12]_turn units.
+;; ls2_alu[12]_turn unit can be subscribed only after ls2_alu[12]_turn_enabled
+;; unit is subscribed.
+(define_cpu_unit "ls2_alu1_turn_enabled,ls2_alu2_turn_enabled" "ls2_alu")
+(presence_set "ls2_alu1_turn" "ls2_alu1_turn_enabled")
+(presence_set "ls2_alu2_turn" "ls2_alu2_turn_enabled")
+
+;; Reservations for ALU1 (ALU2) instructions.
+;; Instruction goes to ALU1 (ALU2) and makes next ALU1/2 instruction to
+;; be dispatched to ALU2 (ALU1).
+(define_reservation "ls2_alu1"
+ "(ls2_alu1_core+ls2_alu2_turn_enabled)|ls2_alu1_core")
+(define_reservation "ls2_alu2"
+ "(ls2_alu2_core+ls2_alu1_turn_enabled)|ls2_alu2_core")
+
+;; Reservation for ALU1/2 instructions.
+;; Instruction will go to ALU1 iff ls2_alu1_turn_enabled is subscribed and
+;; switch the turn to ALU2 by subscribing ls2_alu2_turn_enabled.
+;; Or to ALU2 otherwise.
+(define_reservation "ls2_alu"
+ "(ls2_alu1_core+ls2_alu1_turn+ls2_alu2_turn_enabled)
+ |(ls2_alu1_core+ls2_alu1_turn)
+ |(ls2_alu2_core+ls2_alu2_turn+ls2_alu1_turn_enabled)
+ |(ls2_alu2_core+ls2_alu2_turn)")
+
+;; Automaton for floating-point instructions.
+(define_automaton "ls2_falu")
+
+;; FALU1 and FALU2.
+;; We need to query these units to adjust round-robin counter.
+(define_query_cpu_unit "ls2_falu1_core,ls2_falu2_core" "ls2_falu")
+
+;; Pseudo units to help modeling of FALU1/2 round-robin dispatch strategy.
+(define_cpu_unit "ls2_falu1_turn,ls2_falu2_turn" "ls2_falu")
+
+;; Pseudo units to enable/disable ls2_falu[12]_turn units.
+;; ls2_falu[12]_turn unit can be subscribed only after
+;; ls2_falu[12]_turn_enabled unit is subscribed.
+(define_cpu_unit "ls2_falu1_turn_enabled,ls2_falu2_turn_enabled" "ls2_falu")
+(presence_set "ls2_falu1_turn" "ls2_falu1_turn_enabled")
+(presence_set "ls2_falu2_turn" "ls2_falu2_turn_enabled")
+
+;; Reservations for FALU1 (FALU2) instructions.
+;; Instruction goes to FALU1 (FALU2) and makes next FALU1/2 instruction to
+;; be dispatched to FALU2 (FALU1).
+(define_reservation "ls2_falu1"
+ "(ls2_falu1_core+ls2_falu2_turn_enabled)|ls2_falu1_core")
+(define_reservation "ls2_falu2"
+ "(ls2_falu2_core+ls2_falu1_turn_enabled)|ls2_falu2_core")
+
+;; Reservation for FALU1/2 instructions.
+;; Instruction will go to FALU1 iff ls2_falu1_turn_enabled is subscribed and
+;; switch the turn to FALU2 by subscribing ls2_falu2_turn_enabled.
+;; Or to FALU2 otherwise.
+(define_reservation "ls2_falu"
+ "(ls2_falu1+ls2_falu1_turn+ls2_falu2_turn_enabled)
+ |(ls2_falu1+ls2_falu1_turn)
+ |(ls2_falu2+ls2_falu2_turn+ls2_falu1_turn_enabled)
+ |(ls2_falu2+ls2_falu2_turn)")
+
+;; The following 4 instructions each subscribe one of
+;; ls2_[f]alu{1,2}_turn_enabled units according to this attribute.
+;; These instructions are used in mips.c: sched_ls2_dfa_post_advance_cycle.
+
+(define_attr "ls2_turn_type" "alu1,alu2,falu1,falu2,unknown,atomic,syncloop"
+ (const_string "unknown"))
+
+;; Subscribe ls2_alu1_turn_enabled.
+(define_insn "ls2_alu1_turn_enabled_insn"
+ [(unspec [(const_int 0)] UNSPEC_LOONGSON_ALU1_TURN_ENABLED_INSN)]
+ "TUNE_LOONGSON_2EF"
+ { gcc_unreachable (); }
+ [(set_attr "ls2_turn_type" "alu1")])
+
+(define_insn_reservation "ls2_alu1_turn_enabled" 0
+ (eq_attr "ls2_turn_type" "alu1")
+ "ls2_alu1_turn_enabled")
+
+;; Subscribe ls2_alu2_turn_enabled.
+(define_insn "ls2_alu2_turn_enabled_insn"
+ [(unspec [(const_int 0)] UNSPEC_LOONGSON_ALU2_TURN_ENABLED_INSN)]
+ "TUNE_LOONGSON_2EF"
+ { gcc_unreachable (); }
+ [(set_attr "ls2_turn_type" "alu2")])
+
+(define_insn_reservation "ls2_alu2_turn_enabled" 0
+ (eq_attr "ls2_turn_type" "alu2")
+ "ls2_alu2_turn_enabled")
+
+;; Subscribe ls2_falu1_turn_enabled.
+(define_insn "ls2_falu1_turn_enabled_insn"
+ [(unspec [(const_int 0)] UNSPEC_LOONGSON_FALU1_TURN_ENABLED_INSN)]
+ "TUNE_LOONGSON_2EF"
+ { gcc_unreachable (); }
+ [(set_attr "ls2_turn_type" "falu1")])
+
+(define_insn_reservation "ls2_falu1_turn_enabled" 0
+ (eq_attr "ls2_turn_type" "falu1")
+ "ls2_falu1_turn_enabled")
+
+;; Subscribe ls2_falu2_turn_enabled.
+(define_insn "ls2_falu2_turn_enabled_insn"
+ [(unspec [(const_int 0)] UNSPEC_LOONGSON_FALU2_TURN_ENABLED_INSN)]
+ "TUNE_LOONGSON_2EF"
+ { gcc_unreachable (); }
+ [(set_attr "ls2_turn_type" "falu2")])
+
+(define_insn_reservation "ls2_falu2_turn_enabled" 0
+ (eq_attr "ls2_turn_type" "falu2")
+ "ls2_falu2_turn_enabled")
+
+;; Automaton for memory operations.
+(define_automaton "ls2_mem")
+
+;; Memory unit.
+(define_query_cpu_unit "ls2_mem" "ls2_mem")
+
+;; Reservation for integer instructions.
+(define_insn_reservation "ls2_alu" 2
+ (and (eq_attr "cpu" "loongson_2e,loongson_2f")
+ (eq_attr "type" "arith,condmove,const,logical,mfhi,mflo,move,
+ mthi,mtlo,nop,shift,signext,slt"))
+ "ls2_alu")
+
+;; Reservation for branch instructions.
+(define_insn_reservation "ls2_branch" 2
+ (and (eq_attr "cpu" "loongson_2e,loongson_2f")
+ (eq_attr "type" "branch,jump,call,trap"))
+ "ls2_alu1")
+
+;; Reservation for integer multiplication instructions.
+(define_insn_reservation "ls2_imult" 5
+ (and (eq_attr "cpu" "loongson_2e,loongson_2f")
+ (eq_attr "type" "imul,imul3nc"))
+ "ls2_alu2,ls2_alu2_core")
+
+;; Reservation for integer division / remainder instructions.
+;; These instructions use the SRT algorithm and hence take 2-38 cycles.
+(define_insn_reservation "ls2_idiv" 20
+ (and (eq_attr "cpu" "loongson_2e,loongson_2f")
+ (eq_attr "type" "idiv,idiv3"))
+ "ls2_alu2,ls2_alu2_core*18")
+
+;; Reservation for memory load instructions.
+(define_insn_reservation "ls2_load" 5
+ (and (eq_attr "cpu" "loongson_2e,loongson_2f")
+ (eq_attr "type" "load,fpload,mfc,mtc"))
+ "ls2_mem")
+
+(define_insn_reservation "ls2_prefetch" 0
+ (and (eq_attr "cpu" "loongson_2e,loongson_2f")
+ (eq_attr "type" "prefetch,prefetchx"))
+ "ls2_mem")
+
+;; Reservation for memory store instructions.
+;; With stores we assume they don't alias with dependent loads.
+;; Therefore we set the latency to zero.
+(define_insn_reservation "ls2_store" 0
+ (and (eq_attr "cpu" "loongson_2e,loongson_2f")
+ (eq_attr "type" "store,fpstore"))
+ "ls2_mem")
+
+;; Reservation for floating-point instructions of latency 3.
+(define_insn_reservation "ls2_fp3" 3
+ (and (eq_attr "cpu" "loongson_2e,loongson_2f")
+ (eq_attr "type" "fabs,fneg,fcmp,fmove"))
+ "ls2_falu1")
+
+;; Reservation for floating-point instructions of latency 5.
+(define_insn_reservation "ls2_fp5" 5
+ (and (eq_attr "cpu" "loongson_2e,loongson_2f")
+ (eq_attr "type" "fcvt"))
+ "ls2_falu1")
+
+;; Reservation for floating-point instructions that can go
+;; to either of FALU1/2 units.
+(define_insn_reservation "ls2_falu" 7
+ (and (eq_attr "cpu" "loongson_2e,loongson_2f")
+ (eq_attr "type" "fadd,fmul,fmadd"))
+ "ls2_falu")
+
+;; Reservation for floating-point division / remainder instructions.
+;; These instructions use the SRT algorithm and hence take a variable amount
+;; of cycles:
+;; div.s takes 5-11 cycles
+;; div.d takes 5-18 cycles
+(define_insn_reservation "ls2_fdiv" 9
+ (and (eq_attr "cpu" "loongson_2e,loongson_2f")
+ (eq_attr "type" "fdiv"))
+ "ls2_falu2,ls2_falu2_core*7")
+
+;; Reservation for floating-point sqrt instructions.
+;; These instructions use the SRT algorithm and hence take a variable amount
+;; of cycles:
+;; sqrt.s takes 5-17 cycles
+;; sqrt.d takes 5-32 cycles
+(define_insn_reservation "ls2_fsqrt" 15
+ (and (eq_attr "cpu" "loongson_2e,loongson_2f")
+ (eq_attr "type" "fsqrt"))
+ "ls2_falu2,ls2_falu2_core*13")
+
+;; Two consecutive ALU instructions.
+(define_insn_reservation "ls2_multi" 4
+ (and (eq_attr "cpu" "loongson_2e,loongson_2f")
+ (eq_attr "type" "multi"))
+ "(ls2_alu1,ls2_alu2_core)|(ls2_alu2,ls2_alu1_core)")
+
+;; Reservation for everything else. Normally, this reservation
+;; will only be used to handle cases like compiling for non-loongson
+;; CPUs with -mtune=loongson2?.
+;;
+;; This reservation depends upon the fact that DFA will check
+;; reservations in the same order as they appear in the file.
+(define_insn_reservation "ls2_unknown" 1
+ (eq_attr "cpu" "loongson_2e,loongson_2f")
+ "ls2_alu1_core+ls2_alu2_core+ls2_falu1_core+ls2_falu2_core+ls2_mem")
diff --git a/gcc-4.9/gcc/config/mips/loongson3a.md b/gcc-4.9/gcc/config/mips/loongson3a.md
new file mode 100644
index 000000000..6fc3d3c34
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/loongson3a.md
@@ -0,0 +1,137 @@
+;; Pipeline model for Loongson-3A cores.
+
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Uncomment the following line to output automata for debugging.
+;; (automata_option "v")
+
+;; Automaton for integer instructions.
+(define_automaton "ls3a_a_alu")
+
+;; Automaton for floating-point instructions.
+(define_automaton "ls3a_a_falu")
+
+;; Automaton for memory operations.
+(define_automaton "ls3a_a_mem")
+
+;; Describe the resources.
+
+(define_cpu_unit "ls3a_alu1" "ls3a_a_alu")
+(define_cpu_unit "ls3a_alu2" "ls3a_a_alu")
+(define_cpu_unit "ls3a_mem" "ls3a_a_mem")
+(define_cpu_unit "ls3a_falu1" "ls3a_a_falu")
+(define_cpu_unit "ls3a_falu2" "ls3a_a_falu")
+
+;; Describe instruction reservations.
+
+(define_insn_reservation "ls3a_arith" 1
+ (and (eq_attr "cpu" "loongson_3a")
+ (eq_attr "type" "arith,clz,const,logical,
+ move,nop,shift,signext,slt"))
+ "ls3a_alu1 | ls3a_alu2")
+
+(define_insn_reservation "ls3a_branch" 1
+ (and (eq_attr "cpu" "loongson_3a")
+ (eq_attr "type" "branch,jump,call,condmove,trap"))
+ "ls3a_alu1")
+
+(define_insn_reservation "ls3a_mfhilo" 1
+ (and (eq_attr "cpu" "loongson_3a")
+ (eq_attr "type" "mfhi,mflo,mthi,mtlo"))
+ "ls3a_alu2")
+
+;; Operation imul3nc is fully pipelined.
+(define_insn_reservation "ls3a_imul3nc" 5
+ (and (eq_attr "cpu" "loongson_3a")
+ (eq_attr "type" "imul3nc"))
+ "ls3a_alu2")
+
+(define_insn_reservation "ls3a_imul" 7
+ (and (eq_attr "cpu" "loongson_3a")
+ (eq_attr "type" "imul,imadd"))
+ "ls3a_alu2 * 7")
+
+(define_insn_reservation "ls3a_idiv_si" 12
+ (and (eq_attr "cpu" "loongson_3a")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "SI")))
+ "ls3a_alu2 * 12")
+
+(define_insn_reservation "ls3a_idiv_di" 25
+ (and (eq_attr "cpu" "loongson_3a")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "ls3a_alu2 * 25")
+
+(define_insn_reservation "ls3a_load" 3
+ (and (eq_attr "cpu" "loongson_3a")
+ (eq_attr "type" "load"))
+ "ls3a_mem")
+
+(define_insn_reservation "ls3a_fpload" 4
+ (and (eq_attr "cpu" "loongson_3a")
+ (eq_attr "type" "load,mfc,mtc"))
+ "ls3a_mem")
+
+(define_insn_reservation "ls3a_prefetch" 0
+ (and (eq_attr "cpu" "loongson_3a")
+ (eq_attr "type" "prefetch,prefetchx"))
+ "ls3a_mem")
+
+(define_insn_reservation "ls3a_store" 0
+ (and (eq_attr "cpu" "loongson_3a")
+ (eq_attr "type" "store,fpstore,fpidxstore"))
+ "ls3a_mem")
+
+;; All the fp operations can be executed in FALU1. Only fp add,
+;; sub, mul, madd can be executed in FALU2. Try FALU2 firstly.
+(define_insn_reservation "ls3a_fadd" 6
+ (and (eq_attr "cpu" "loongson_3a")
+ (eq_attr "type" "fadd,fmul,fmadd"))
+ "ls3a_falu2 | ls3a_falu1")
+
+(define_insn_reservation "ls3a_fcmp" 2
+ (and (eq_attr "cpu" "loongson_3a")
+ (eq_attr "type" "fabs,fcmp,fmove,fneg"))
+ "ls3a_falu1")
+
+(define_insn_reservation "ls3a_fcvt" 4
+ (and (eq_attr "cpu" "loongson_3a")
+ (eq_attr "type" "fcvt"))
+ "ls3a_falu1")
+
+(define_insn_reservation "ls3a_fdiv_sf" 12
+ (and (eq_attr "cpu" "loongson_3a")
+ (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt")
+ (eq_attr "mode" "SF")))
+ "ls3a_falu1 * 12")
+
+(define_insn_reservation "ls3a_fdiv_df" 19
+ (and (eq_attr "cpu" "loongson_3a")
+ (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt")
+ (eq_attr "mode" "DF")))
+ "ls3a_falu1 * 19")
+
+;; Force single-dispatch for unknown or multi.
+(define_insn_reservation "ls3a_unknown" 1
+ (and (eq_attr "cpu" "loongson_3a")
+ (eq_attr "type" "unknown,multi,atomic,syncloop"))
+ "ls3a_alu1 + ls3a_alu2 + ls3a_falu1 + ls3a_falu2 + ls3a_mem")
+
+;; End of DFA-based pipeline description for loongson_3a
diff --git a/gcc-4.9/gcc/config/mips/micromips.md b/gcc-4.9/gcc/config/mips/micromips.md
new file mode 100644
index 000000000..3504044f9
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/micromips.md
@@ -0,0 +1,138 @@
+;; Copyright (C) 2013-2014 Free Software Foundation, Inc.
+;;
+;; micromips.md Machine Description for the microMIPS instruction set
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_insn "*store_word_multiple"
+ [(match_parallel 0 ""
+ [(set (match_operand:SI 1 "memory_operand")
+ (match_operand:SI 2 "register_operand"))])]
+ "TARGET_MICROMIPS
+ && umips_save_restore_pattern_p (true, operands[0])"
+ { return umips_output_save_restore (true, operands[0]); }
+ [(set_attr "type" "multimem")
+ (set_attr "mode" "SI")
+ (set_attr "can_delay" "no")])
+
+(define_insn "*load_word_multiple"
+ [(match_parallel 0 ""
+ [(set (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "memory_operand"))])]
+ "TARGET_MICROMIPS
+ && umips_save_restore_pattern_p (false, operands[0])"
+ { return umips_output_save_restore (false, operands[0]); }
+ [(set_attr "type" "multimem")
+ (set_attr "mode" "SI")
+ (set_attr "can_delay" "no")])
+
+;; For LWP.
+(define_peephole2
+ [(set (match_operand:SI 0 "d_operand" "")
+ (match_operand:SI 1 "non_volatile_mem_operand" ""))
+ (set (match_operand:SI 2 "d_operand" "")
+ (match_operand:SI 3 "non_volatile_mem_operand" ""))]
+ "TARGET_MICROMIPS
+ && umips_load_store_pair_p (true, operands)"
+ [(parallel [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))])])
+
+;; The behavior of the LWP insn is undefined if placed in a delay slot.
+(define_insn "*lwp"
+ [(parallel [(set (match_operand:SI 0 "d_operand")
+ (match_operand:SI 1 "non_volatile_mem_operand"))
+ (set (match_operand:SI 2 "d_operand")
+ (match_operand:SI 3 "non_volatile_mem_operand"))])]
+
+ "TARGET_MICROMIPS
+ && umips_load_store_pair_p (true, operands)"
+{
+ umips_output_load_store_pair (true, operands);
+ return "";
+}
+ [(set_attr "type" "load")
+ (set_attr "mode" "SI")
+ (set_attr "can_delay" "no")])
+
+;; For SWP.
+(define_peephole2
+ [(set (match_operand:SI 0 "non_volatile_mem_operand" "")
+ (match_operand:SI 1 "d_operand" ""))
+ (set (match_operand:SI 2 "non_volatile_mem_operand" "")
+ (match_operand:SI 3 "d_operand" ""))]
+ "TARGET_MICROMIPS
+ && umips_load_store_pair_p (false, operands)"
+ [(parallel [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))])])
+
+;; The behavior of the SWP insn is undefined if placed in a delay slot.
+(define_insn "*swp"
+ [(parallel [(set (match_operand:SI 0 "non_volatile_mem_operand")
+ (match_operand:SI 1 "d_operand"))
+ (set (match_operand:SI 2 "non_volatile_mem_operand")
+ (match_operand:SI 3 "d_operand"))])]
+
+ "TARGET_MICROMIPS
+ && umips_load_store_pair_p (false, operands)"
+{
+ umips_output_load_store_pair (false, operands);
+ return "";
+}
+ [(set_attr "type" "store")
+ (set_attr "mode" "SI")
+ (set_attr "can_delay" "no")])
+
+;; For JRADDIUSP.
+(define_insn "jraddiusp"
+ [(parallel [(return)
+ (use (reg:SI 31))
+ (set (reg:SI 29)
+ (plus:SI (reg:SI 29)
+ (match_operand 0 "uw5_operand")))])]
+ "TARGET_MICROMIPS"
+ "jraddiusp\t%0"
+ [(set_attr "type" "trap")
+ (set_attr "can_delay" "no")
+ (set_attr "mode" "SI")])
+
+;; For MOVEP.
+(define_peephole2
+ [(set (match_operand:MOVEP1 0 "register_operand" "")
+ (match_operand:MOVEP1 1 "movep_src_operand" ""))
+ (set (match_operand:MOVEP2 2 "register_operand" "")
+ (match_operand:MOVEP2 3 "movep_src_operand" ""))]
+ "TARGET_MICROMIPS
+ && umips_movep_target_p (operands[0], operands[2])"
+ [(parallel [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))])])
+
+;; The behavior of the MOVEP insn is undefined if placed in a delay slot.
+(define_insn "*movep<MOVEP1:mode><MOVEP2:mode>"
+ [(parallel [(set (match_operand:MOVEP1 0 "register_operand")
+ (match_operand:MOVEP1 1 "movep_src_operand"))
+ (set (match_operand:MOVEP2 2 "register_operand")
+ (match_operand:MOVEP2 3 "movep_src_operand"))])]
+ "TARGET_MICROMIPS
+ && umips_movep_target_p (operands[0], operands[2])"
+{
+ if (REGNO (operands[0]) < REGNO (operands[2]))
+ return "movep\t%0,%2,%z1,%z3";
+ else
+ return "movep\t%2,%0,%z3,%z1";
+}
+ [(set_attr "type" "move")
+ (set_attr "mode" "<MODE>")
+ (set_attr "can_delay" "no")])
diff --git a/gcc-4.9/gcc/config/mips/mips-cpus.def b/gcc-4.9/gcc/config/mips/mips-cpus.def
new file mode 100644
index 000000000..07fbf9c7e
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips-cpus.def
@@ -0,0 +1,154 @@
+/* MIPS CPU names.
+ Copyright (C) 1989-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* A table describing all the processors GCC knows about. The first
+ mention of an ISA level is taken as the canonical name for that
+ ISA.
+
+ To ease comparison, please keep this table in the same order
+ as GAS's mips_cpu_info_table. Please also make sure that
+ MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
+ options correctly.
+
+ Before including this file, define a macro:
+
+ MIPS_CPU (NAME, CPU, ISA, FLAGS)
+
+ where the arguments are the fields of struct mips_cpu_info. */
+
+/* Entries for generic ISAs. */
+MIPS_CPU ("mips1", PROCESSOR_R3000, 1, 0)
+MIPS_CPU ("mips2", PROCESSOR_R6000, 2, 0)
+MIPS_CPU ("mips3", PROCESSOR_R4000, 3, 0)
+MIPS_CPU ("mips4", PROCESSOR_R8000, 4, 0)
+/* Prefer not to use branch-likely instructions for generic MIPS32rX
+ and MIPS64rX code. The instructions were officially deprecated
+ in revisions 2 and earlier, but revision 3 is likely to downgrade
+ that to a recommendation to avoid the instructions in code that
+ isn't tuned to a specific processor. */
+MIPS_CPU ("mips32", PROCESSOR_4KC, 32, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("mips32r2", PROCESSOR_74KF2_1, 33, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("mips64", PROCESSOR_5KC, 64, PTF_AVOID_BRANCHLIKELY)
+/* ??? For now just tune the generic MIPS64r2 for 5KC as well. */
+MIPS_CPU ("mips64r2", PROCESSOR_5KC, 65, PTF_AVOID_BRANCHLIKELY)
+
+/* MIPS I processors. */
+MIPS_CPU ("r3000", PROCESSOR_R3000, 1, 0)
+MIPS_CPU ("r2000", PROCESSOR_R3000, 1, 0)
+MIPS_CPU ("r3900", PROCESSOR_R3900, 1, 0)
+
+/* MIPS II processors. */
+MIPS_CPU ("r6000", PROCESSOR_R6000, 2, 0)
+
+/* MIPS III processors. */
+MIPS_CPU ("r4000", PROCESSOR_R4000, 3, 0)
+MIPS_CPU ("vr4100", PROCESSOR_R4100, 3, 0)
+MIPS_CPU ("vr4111", PROCESSOR_R4111, 3, 0)
+MIPS_CPU ("vr4120", PROCESSOR_R4120, 3, 0)
+MIPS_CPU ("vr4130", PROCESSOR_R4130, 3, 0)
+MIPS_CPU ("vr4300", PROCESSOR_R4300, 3, 0)
+MIPS_CPU ("r4400", PROCESSOR_R4000, 3, 0)
+MIPS_CPU ("r4600", PROCESSOR_R4600, 3, 0)
+MIPS_CPU ("orion", PROCESSOR_R4600, 3, 0)
+MIPS_CPU ("r4650", PROCESSOR_R4650, 3, 0)
+MIPS_CPU ("r4700", PROCESSOR_R4700, 3, 0)
+MIPS_CPU ("r5900", PROCESSOR_R5900, 3, 0)
+/* ST Loongson 2E/2F processors. */
+MIPS_CPU ("loongson2e", PROCESSOR_LOONGSON_2E, 3, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("loongson2f", PROCESSOR_LOONGSON_2F, 3, PTF_AVOID_BRANCHLIKELY)
+
+/* MIPS IV processors. */
+MIPS_CPU ("r8000", PROCESSOR_R8000, 4, 0)
+MIPS_CPU ("r10000", PROCESSOR_R10000, 4, 0)
+MIPS_CPU ("r12000", PROCESSOR_R10000, 4, 0)
+MIPS_CPU ("r14000", PROCESSOR_R10000, 4, 0)
+MIPS_CPU ("r16000", PROCESSOR_R10000, 4, 0)
+MIPS_CPU ("vr5000", PROCESSOR_R5000, 4, 0)
+MIPS_CPU ("vr5400", PROCESSOR_R5400, 4, 0)
+MIPS_CPU ("vr5500", PROCESSOR_R5500, 4, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("rm7000", PROCESSOR_R7000, 4, 0)
+MIPS_CPU ("rm9000", PROCESSOR_R9000, 4, 0)
+
+/* MIPS32 processors. */
+MIPS_CPU ("4kc", PROCESSOR_4KC, 32, 0)
+MIPS_CPU ("4km", PROCESSOR_4KC, 32, 0)
+MIPS_CPU ("4kp", PROCESSOR_4KP, 32, 0)
+MIPS_CPU ("4ksc", PROCESSOR_4KC, 32, 0)
+
+/* MIPS32 Release 2 processors. */
+MIPS_CPU ("m4k", PROCESSOR_M4K, 33, 0)
+MIPS_CPU ("m14kc", PROCESSOR_M4K, 33, 0)
+MIPS_CPU ("m14k", PROCESSOR_M4K, 33, 0)
+MIPS_CPU ("m14ke", PROCESSOR_M4K, 33, 0)
+MIPS_CPU ("m14kec", PROCESSOR_M4K, 33, 0)
+MIPS_CPU ("4kec", PROCESSOR_4KC, 33, 0)
+MIPS_CPU ("4kem", PROCESSOR_4KC, 33, 0)
+MIPS_CPU ("4kep", PROCESSOR_4KP, 33, 0)
+MIPS_CPU ("4ksd", PROCESSOR_4KC, 33, 0)
+
+MIPS_CPU ("24kc", PROCESSOR_24KC, 33, 0)
+MIPS_CPU ("24kf2_1", PROCESSOR_24KF2_1, 33, 0)
+MIPS_CPU ("24kf", PROCESSOR_24KF2_1, 33, 0)
+MIPS_CPU ("24kf1_1", PROCESSOR_24KF1_1, 33, 0)
+MIPS_CPU ("24kfx", PROCESSOR_24KF1_1, 33, 0)
+MIPS_CPU ("24kx", PROCESSOR_24KF1_1, 33, 0)
+
+MIPS_CPU ("24kec", PROCESSOR_24KC, 33, 0) /* 24K with DSP. */
+MIPS_CPU ("24kef2_1", PROCESSOR_24KF2_1, 33, 0)
+MIPS_CPU ("24kef", PROCESSOR_24KF2_1, 33, 0)
+MIPS_CPU ("24kef1_1", PROCESSOR_24KF1_1, 33, 0)
+MIPS_CPU ("24kefx", PROCESSOR_24KF1_1, 33, 0)
+MIPS_CPU ("24kex", PROCESSOR_24KF1_1, 33, 0)
+
+MIPS_CPU ("34kc", PROCESSOR_24KC, 33, 0) /* 34K with MT/DSP. */
+MIPS_CPU ("34kf2_1", PROCESSOR_24KF2_1, 33, 0)
+MIPS_CPU ("34kf", PROCESSOR_24KF2_1, 33, 0)
+MIPS_CPU ("34kf1_1", PROCESSOR_24KF1_1, 33, 0)
+MIPS_CPU ("34kfx", PROCESSOR_24KF1_1, 33, 0)
+MIPS_CPU ("34kx", PROCESSOR_24KF1_1, 33, 0)
+MIPS_CPU ("34kn", PROCESSOR_24KC, 33, 0) /* 34K with MT but no DSP. */
+
+MIPS_CPU ("74kc", PROCESSOR_74KC, 33, PTF_AVOID_IMADD) /* 74K with DSPr2. */
+MIPS_CPU ("74kf2_1", PROCESSOR_74KF2_1, 33, PTF_AVOID_IMADD)
+MIPS_CPU ("74kf", PROCESSOR_74KF2_1, 33, PTF_AVOID_IMADD)
+MIPS_CPU ("74kf1_1", PROCESSOR_74KF1_1, 33, PTF_AVOID_IMADD)
+MIPS_CPU ("74kfx", PROCESSOR_74KF1_1, 33, PTF_AVOID_IMADD)
+MIPS_CPU ("74kx", PROCESSOR_74KF1_1, 33, PTF_AVOID_IMADD)
+MIPS_CPU ("74kf3_2", PROCESSOR_74KF3_2, 33, PTF_AVOID_IMADD)
+
+MIPS_CPU ("1004kc", PROCESSOR_24KC, 33, 0) /* 1004K with MT/DSP. */
+MIPS_CPU ("1004kf2_1", PROCESSOR_24KF2_1, 33, 0)
+MIPS_CPU ("1004kf", PROCESSOR_24KF2_1, 33, 0)
+MIPS_CPU ("1004kf1_1", PROCESSOR_24KF1_1, 33, 0)
+
+/* MIPS64 processors. */
+MIPS_CPU ("5kc", PROCESSOR_5KC, 64, 0)
+MIPS_CPU ("5kf", PROCESSOR_5KF, 64, 0)
+MIPS_CPU ("20kc", PROCESSOR_20KC, 64, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("xlr", PROCESSOR_XLR, 64, PTF_AVOID_BRANCHLIKELY)
+
+/* MIPS64 Release 2 processors. */
+MIPS_CPU ("loongson3a", PROCESSOR_LOONGSON_3A, 65, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("octeon", PROCESSOR_OCTEON, 65, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("octeon+", PROCESSOR_OCTEON, 65, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("octeon2", PROCESSOR_OCTEON2, 65, PTF_AVOID_BRANCHLIKELY)
+MIPS_CPU ("xlp", PROCESSOR_XLP, 65, PTF_AVOID_BRANCHLIKELY)
diff --git a/gcc-4.9/gcc/config/mips/mips-dsp.md b/gcc-4.9/gcc/config/mips/mips-dsp.md
new file mode 100644
index 000000000..58c11fe9a
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips-dsp.md
@@ -0,0 +1,1205 @@
+;; Copyright (C) 2005-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; MIPS DSP ASE Revision 0.98 3/24/2005
+(define_c_enum "unspec" [
+ UNSPEC_ADDQ
+ UNSPEC_ADDQ_S
+ UNSPEC_SUBQ
+ UNSPEC_SUBQ_S
+ UNSPEC_ADDSC
+ UNSPEC_ADDWC
+ UNSPEC_MODSUB
+ UNSPEC_RADDU_W_QB
+ UNSPEC_ABSQ_S
+ UNSPEC_PRECRQ_QB_PH
+ UNSPEC_PRECRQ_PH_W
+ UNSPEC_PRECRQ_RS_PH_W
+ UNSPEC_PRECRQU_S_QB_PH
+ UNSPEC_PRECEQ_W_PHL
+ UNSPEC_PRECEQ_W_PHR
+ UNSPEC_PRECEQU_PH_QBL
+ UNSPEC_PRECEQU_PH_QBR
+ UNSPEC_PRECEQU_PH_QBLA
+ UNSPEC_PRECEQU_PH_QBRA
+ UNSPEC_PRECEU_PH_QBL
+ UNSPEC_PRECEU_PH_QBR
+ UNSPEC_PRECEU_PH_QBLA
+ UNSPEC_PRECEU_PH_QBRA
+ UNSPEC_SHLL
+ UNSPEC_SHLL_S
+ UNSPEC_SHRL_QB
+ UNSPEC_SHRA_PH
+ UNSPEC_SHRA_R
+ UNSPEC_MULEU_S_PH_QBL
+ UNSPEC_MULEU_S_PH_QBR
+ UNSPEC_MULQ_RS_PH
+ UNSPEC_MULEQ_S_W_PHL
+ UNSPEC_MULEQ_S_W_PHR
+ UNSPEC_DPAU_H_QBL
+ UNSPEC_DPAU_H_QBR
+ UNSPEC_DPSU_H_QBL
+ UNSPEC_DPSU_H_QBR
+ UNSPEC_DPAQ_S_W_PH
+ UNSPEC_DPSQ_S_W_PH
+ UNSPEC_MULSAQ_S_W_PH
+ UNSPEC_DPAQ_SA_L_W
+ UNSPEC_DPSQ_SA_L_W
+ UNSPEC_MAQ_S_W_PHL
+ UNSPEC_MAQ_S_W_PHR
+ UNSPEC_MAQ_SA_W_PHL
+ UNSPEC_MAQ_SA_W_PHR
+ UNSPEC_BITREV
+ UNSPEC_INSV
+ UNSPEC_REPL_QB
+ UNSPEC_REPL_PH
+ UNSPEC_CMP_EQ
+ UNSPEC_CMP_LT
+ UNSPEC_CMP_LE
+ UNSPEC_CMPGU_EQ_QB
+ UNSPEC_CMPGU_LT_QB
+ UNSPEC_CMPGU_LE_QB
+ UNSPEC_PICK
+ UNSPEC_PACKRL_PH
+ UNSPEC_EXTR_W
+ UNSPEC_EXTR_R_W
+ UNSPEC_EXTR_RS_W
+ UNSPEC_EXTR_S_H
+ UNSPEC_EXTP
+ UNSPEC_EXTPDP
+ UNSPEC_SHILO
+ UNSPEC_MTHLIP
+ UNSPEC_WRDSP
+ UNSPEC_RDDSP
+])
+
+(define_constants
+ [(CCDSP_PO_REGNUM 182)
+ (CCDSP_SC_REGNUM 183)
+ (CCDSP_CA_REGNUM 184)
+ (CCDSP_OU_REGNUM 185)
+ (CCDSP_CC_REGNUM 186)
+ (CCDSP_EF_REGNUM 187)])
+
+;; This mode iterator allows si, v2hi, v4qi for all possible modes in DSP ASE.
+(define_mode_iterator DSP [(SI "ISA_HAS_DSP")
+ (V2HI "ISA_HAS_DSP")
+ (V4QI "ISA_HAS_DSP")])
+
+;; This mode iterator allows v2hi, v4qi for vector/SIMD data.
+(define_mode_iterator DSPV [(V2HI "ISA_HAS_DSP")
+ (V4QI "ISA_HAS_DSP")])
+
+;; This mode iterator allows si, v2hi for Q31 and V2Q15 fixed-point data.
+(define_mode_iterator DSPQ [(SI "ISA_HAS_DSP")
+ (V2HI "ISA_HAS_DSP")])
+
+;; DSP instructions use q for fixed-point data, and u for integer in the infix.
+(define_mode_attr dspfmt1 [(SI "q") (V2HI "q") (V4QI "u")])
+
+;; DSP instructions use nothing for fixed-point data, and u for integer in
+;; the infix.
+(define_mode_attr dspfmt1_1 [(SI "") (V2HI "") (V4QI "u")])
+
+;; DSP instructions use w, ph, qb in the postfix.
+(define_mode_attr dspfmt2 [(SI "w") (V2HI "ph") (V4QI "qb")])
+
+;; DSP shift masks for SI, V2HI, V4QI.
+(define_mode_attr dspshift_mask [(SI "0x1f") (V2HI "0xf") (V4QI "0x7")])
+
+;; MIPS DSP ASE Revision 0.98 3/24/2005
+;; Table 2-1. MIPS DSP ASE Instructions: Arithmetic
+;; ADDQ*
+(define_insn "add<DSPV:mode>3"
+ [(parallel
+ [(set (match_operand:DSPV 0 "register_operand" "=d")
+ (plus:DSPV (match_operand:DSPV 1 "register_operand" "d")
+ (match_operand:DSPV 2 "register_operand" "d")))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDQ))])]
+ "ISA_HAS_DSP"
+ "add<DSPV:dspfmt1>.<DSPV:dspfmt2>\t%0,%1,%2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_add<DSP:dspfmt1>_s_<DSP:dspfmt2>"
+ [(parallel
+ [(set (match_operand:DSP 0 "register_operand" "=d")
+ (unspec:DSP [(match_operand:DSP 1 "register_operand" "d")
+ (match_operand:DSP 2 "register_operand" "d")]
+ UNSPEC_ADDQ_S))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDQ_S))])]
+ "ISA_HAS_DSP"
+ "add<DSP:dspfmt1>_s.<DSP:dspfmt2>\t%0,%1,%2"
+ [(set_attr "type" "dspalusat")
+ (set_attr "mode" "SI")])
+
+;; SUBQ*
+(define_insn "sub<DSPV:mode>3"
+ [(parallel
+ [(set (match_operand:DSPV 0 "register_operand" "=d")
+ (minus:DSPV (match_operand:DSPV 1 "register_operand" "d")
+ (match_operand:DSPV 2 "register_operand" "d")))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SUBQ))])]
+ "ISA_HAS_DSP"
+ "sub<DSPV:dspfmt1>.<DSPV:dspfmt2>\t%0,%1,%2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_sub<DSP:dspfmt1>_s_<DSP:dspfmt2>"
+ [(parallel
+ [(set (match_operand:DSP 0 "register_operand" "=d")
+ (unspec:DSP [(match_operand:DSP 1 "register_operand" "d")
+ (match_operand:DSP 2 "register_operand" "d")]
+ UNSPEC_SUBQ_S))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SUBQ_S))])]
+ "ISA_HAS_DSP"
+ "sub<DSP:dspfmt1>_s.<DSP:dspfmt2>\t%0,%1,%2"
+ [(set_attr "type" "dspalusat")
+ (set_attr "mode" "SI")])
+
+;; ADDSC
+(define_insn "mips_addsc"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d")]
+ UNSPEC_ADDSC))
+ (set (reg:CCDSP CCDSP_CA_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDSC))])]
+ "ISA_HAS_DSP"
+ "addsc\t%0,%1,%2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; ADDWC
+(define_insn "mips_addwc"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d")
+ (reg:CCDSP CCDSP_CA_REGNUM)]
+ UNSPEC_ADDWC))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDWC))])]
+ "ISA_HAS_DSP"
+ "addwc\t%0,%1,%2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; MODSUB
+(define_insn "mips_modsub"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d")]
+ UNSPEC_MODSUB))]
+ "ISA_HAS_DSP"
+ "modsub\t%0,%1,%2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; RADDU*
+(define_insn "mips_raddu_w_qb"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:V4QI 1 "register_operand" "d")]
+ UNSPEC_RADDU_W_QB))]
+ "ISA_HAS_DSP"
+ "raddu.w.qb\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; ABSQ*
+(define_insn "mips_absq_s_<DSPQ:dspfmt2>"
+ [(parallel
+ [(set (match_operand:DSPQ 0 "register_operand" "=d")
+ (unspec:DSPQ [(match_operand:DSPQ 1 "register_operand" "d")]
+ UNSPEC_ABSQ_S))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1)] UNSPEC_ABSQ_S))])]
+ "ISA_HAS_DSP"
+ "absq_s.<DSPQ:dspfmt2>\t%0,%1"
+ [(set_attr "type" "dspalusat")
+ (set_attr "mode" "SI")])
+
+;; PRECRQ*
+(define_insn "mips_precrq_qb_ph"
+ [(set (match_operand:V4QI 0 "register_operand" "=d")
+ (unspec:V4QI [(match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")]
+ UNSPEC_PRECRQ_QB_PH))]
+ "ISA_HAS_DSP"
+ "precrq.qb.ph\t%0,%1,%2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_precrq_ph_w"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d")]
+ UNSPEC_PRECRQ_PH_W))]
+ "ISA_HAS_DSP"
+ "precrq.ph.w\t%0,%1,%2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_precrq_rs_ph_w"
+ [(parallel
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d")]
+ UNSPEC_PRECRQ_RS_PH_W))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)]
+ UNSPEC_PRECRQ_RS_PH_W))])]
+ "ISA_HAS_DSP"
+ "precrq_rs.ph.w\t%0,%1,%2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; PRECRQU*
+(define_insn "mips_precrqu_s_qb_ph"
+ [(parallel
+ [(set (match_operand:V4QI 0 "register_operand" "=d")
+ (unspec:V4QI [(match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")]
+ UNSPEC_PRECRQU_S_QB_PH))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)]
+ UNSPEC_PRECRQU_S_QB_PH))])]
+ "ISA_HAS_DSP"
+ "precrqu_s.qb.ph\t%0,%1,%2"
+ [(set_attr "type" "dspalusat")
+ (set_attr "mode" "SI")])
+
+;; PRECEQ*
+(define_insn "mips_preceq_w_phl"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:V2HI 1 "register_operand" "d")]
+ UNSPEC_PRECEQ_W_PHL))]
+ "ISA_HAS_DSP"
+ "preceq.w.phl\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_preceq_w_phr"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:V2HI 1 "register_operand" "d")]
+ UNSPEC_PRECEQ_W_PHR))]
+ "ISA_HAS_DSP"
+ "preceq.w.phr\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; PRECEQU*
+(define_insn "mips_precequ_ph_qbl"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V4QI 1 "register_operand" "d")]
+ UNSPEC_PRECEQU_PH_QBL))]
+ "ISA_HAS_DSP"
+ "precequ.ph.qbl\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_precequ_ph_qbr"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V4QI 1 "register_operand" "d")]
+ UNSPEC_PRECEQU_PH_QBR))]
+ "ISA_HAS_DSP"
+ "precequ.ph.qbr\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_precequ_ph_qbla"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V4QI 1 "register_operand" "d")]
+ UNSPEC_PRECEQU_PH_QBLA))]
+ "ISA_HAS_DSP"
+ "precequ.ph.qbla\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_precequ_ph_qbra"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V4QI 1 "register_operand" "d")]
+ UNSPEC_PRECEQU_PH_QBRA))]
+ "ISA_HAS_DSP"
+ "precequ.ph.qbra\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; PRECEU*
+(define_insn "mips_preceu_ph_qbl"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V4QI 1 "register_operand" "d")]
+ UNSPEC_PRECEU_PH_QBL))]
+ "ISA_HAS_DSP"
+ "preceu.ph.qbl\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_preceu_ph_qbr"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V4QI 1 "register_operand" "d")]
+ UNSPEC_PRECEU_PH_QBR))]
+ "ISA_HAS_DSP"
+ "preceu.ph.qbr\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_preceu_ph_qbla"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V4QI 1 "register_operand" "d")]
+ UNSPEC_PRECEU_PH_QBLA))]
+ "ISA_HAS_DSP"
+ "preceu.ph.qbla\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_preceu_ph_qbra"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V4QI 1 "register_operand" "d")]
+ UNSPEC_PRECEU_PH_QBRA))]
+ "ISA_HAS_DSP"
+ "preceu.ph.qbra\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; Table 2-2. MIPS DSP ASE Instructions: Shift
+;; SHLL*
+(define_insn "mips_shll_<DSPV:dspfmt2>"
+ [(parallel
+ [(set (match_operand:DSPV 0 "register_operand" "=d,d")
+ (unspec:DSPV [(match_operand:DSPV 1 "register_operand" "d,d")
+ (match_operand:SI 2 "arith_operand" "I,d")]
+ UNSPEC_SHLL))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SHLL))])]
+ "ISA_HAS_DSP"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2])
+ & ~(unsigned HOST_WIDE_INT) <DSPV:dspshift_mask>)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & <DSPV:dspshift_mask>);
+ return "shll.<DSPV:dspfmt2>\t%0,%1,%2";
+ }
+ return "shllv.<DSPV:dspfmt2>\t%0,%1,%2";
+}
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_shll_s_<DSPQ:dspfmt2>"
+ [(parallel
+ [(set (match_operand:DSPQ 0 "register_operand" "=d,d")
+ (unspec:DSPQ [(match_operand:DSPQ 1 "register_operand" "d,d")
+ (match_operand:SI 2 "arith_operand" "I,d")]
+ UNSPEC_SHLL_S))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SHLL_S))])]
+ "ISA_HAS_DSP"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2])
+ & ~(unsigned HOST_WIDE_INT) <DSPQ:dspshift_mask>)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & <DSPQ:dspshift_mask>);
+ return "shll_s.<DSPQ:dspfmt2>\t%0,%1,%2";
+ }
+ return "shllv_s.<DSPQ:dspfmt2>\t%0,%1,%2";
+}
+ [(set_attr "type" "dspalusat")
+ (set_attr "mode" "SI")])
+
+;; SHRL*
+(define_insn "mips_shrl_qb"
+ [(set (match_operand:V4QI 0 "register_operand" "=d,d")
+ (unspec:V4QI [(match_operand:V4QI 1 "register_operand" "d,d")
+ (match_operand:SI 2 "arith_operand" "I,d")]
+ UNSPEC_SHRL_QB))]
+ "ISA_HAS_DSP"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2]) & ~(unsigned HOST_WIDE_INT) 0x7)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x7);
+ return "shrl.qb\t%0,%1,%2";
+ }
+ return "shrlv.qb\t%0,%1,%2";
+}
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; SHRA*
+(define_insn "mips_shra_ph"
+ [(set (match_operand:V2HI 0 "register_operand" "=d,d")
+ (unspec:V2HI [(match_operand:V2HI 1 "register_operand" "d,d")
+ (match_operand:SI 2 "arith_operand" "I,d")]
+ UNSPEC_SHRA_PH))]
+ "ISA_HAS_DSP"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2]) & ~(unsigned HOST_WIDE_INT) 0xf)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xf);
+ return "shra.ph\t%0,%1,%2";
+ }
+ return "shrav.ph\t%0,%1,%2";
+}
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_shra_r_<DSPQ:dspfmt2>"
+ [(set (match_operand:DSPQ 0 "register_operand" "=d,d")
+ (unspec:DSPQ [(match_operand:DSPQ 1 "register_operand" "d,d")
+ (match_operand:SI 2 "arith_operand" "I,d")]
+ UNSPEC_SHRA_R))]
+ "ISA_HAS_DSP"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2])
+ & ~(unsigned HOST_WIDE_INT) <DSPQ:dspshift_mask>)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & <DSPQ:dspshift_mask>);
+ return "shra_r.<DSPQ:dspfmt2>\t%0,%1,%2";
+ }
+ return "shrav_r.<DSPQ:dspfmt2>\t%0,%1,%2";
+}
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; Table 2-3. MIPS DSP ASE Instructions: Multiply
+;; MULEU*
+(define_insn "mips_muleu_s_ph_qbl"
+ [(parallel
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V4QI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")]
+ UNSPEC_MULEU_S_PH_QBL))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_MULEU_S_PH_QBL))
+ (clobber (match_scratch:DI 3 "=x"))])]
+ "ISA_HAS_DSP"
+ "muleu_s.ph.qbl\t%0,%1,%2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_muleu_s_ph_qbr"
+ [(parallel
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V4QI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")]
+ UNSPEC_MULEU_S_PH_QBR))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_MULEU_S_PH_QBR))
+ (clobber (match_scratch:DI 3 "=x"))])]
+ "ISA_HAS_DSP"
+ "muleu_s.ph.qbr\t%0,%1,%2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "SI")])
+
+;; MULQ*
+(define_insn "mips_mulq_rs_ph"
+ [(parallel
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")]
+ UNSPEC_MULQ_RS_PH))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_MULQ_RS_PH))
+ (clobber (match_scratch:DI 3 "=x"))])]
+ "ISA_HAS_DSP"
+ "mulq_rs.ph\t%0,%1,%2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "SI")])
+
+;; MULEQ*
+(define_insn "mips_muleq_s_w_phl"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")]
+ UNSPEC_MULEQ_S_W_PHL))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_MULEQ_S_W_PHL))
+ (clobber (match_scratch:DI 3 "=x"))])]
+ "ISA_HAS_DSP"
+ "muleq_s.w.phl\t%0,%1,%2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_muleq_s_w_phr"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")]
+ UNSPEC_MULEQ_S_W_PHR))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_MULEQ_S_W_PHR))
+ (clobber (match_scratch:DI 3 "=x"))])]
+ "ISA_HAS_DSP"
+ "muleq_s.w.phr\t%0,%1,%2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "SI")])
+
+;; DPAU*
+(define_insn "mips_dpau_h_qbl"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V4QI 2 "register_operand" "d")
+ (match_operand:V4QI 3 "register_operand" "d")]
+ UNSPEC_DPAU_H_QBL))]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "dpau.h.qbl\t%q0,%2,%3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_dpau_h_qbr"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V4QI 2 "register_operand" "d")
+ (match_operand:V4QI 3 "register_operand" "d")]
+ UNSPEC_DPAU_H_QBR))]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "dpau.h.qbr\t%q0,%2,%3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+;; DPSU*
+(define_insn "mips_dpsu_h_qbl"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V4QI 2 "register_operand" "d")
+ (match_operand:V4QI 3 "register_operand" "d")]
+ UNSPEC_DPSU_H_QBL))]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "dpsu.h.qbl\t%q0,%2,%3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_dpsu_h_qbr"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V4QI 2 "register_operand" "d")
+ (match_operand:V4QI 3 "register_operand" "d")]
+ UNSPEC_DPSU_H_QBR))]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "dpsu.h.qbr\t%q0,%2,%3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+;; DPAQ*
+(define_insn "mips_dpaq_s_w_ph"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "register_operand" "d")
+ (match_operand:V2HI 3 "register_operand" "d")]
+ UNSPEC_DPAQ_S_W_PH))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_DPAQ_S_W_PH))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "dpaq_s.w.ph\t%q0,%2,%3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+;; DPSQ*
+(define_insn "mips_dpsq_s_w_ph"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "register_operand" "d")
+ (match_operand:V2HI 3 "register_operand" "d")]
+ UNSPEC_DPSQ_S_W_PH))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_DPSQ_S_W_PH))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "dpsq_s.w.ph\t%q0,%2,%3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+;; MULSAQ*
+(define_insn "mips_mulsaq_s_w_ph"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "register_operand" "d")
+ (match_operand:V2HI 3 "register_operand" "d")]
+ UNSPEC_MULSAQ_S_W_PH))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_MULSAQ_S_W_PH))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "mulsaq_s.w.ph\t%q0,%2,%3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+;; DPAQ*
+(define_insn "mips_dpaq_sa_l_w"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "d")
+ (match_operand:SI 3 "register_operand" "d")]
+ UNSPEC_DPAQ_SA_L_W))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_DPAQ_SA_L_W))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "dpaq_sa.l.w\t%q0,%2,%3"
+ [(set_attr "type" "dspmacsat")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+;; DPSQ*
+(define_insn "mips_dpsq_sa_l_w"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "d")
+ (match_operand:SI 3 "register_operand" "d")]
+ UNSPEC_DPSQ_SA_L_W))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_DPSQ_SA_L_W))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "dpsq_sa.l.w\t%q0,%2,%3"
+ [(set_attr "type" "dspmacsat")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+;; MAQ*
+(define_insn "mips_maq_s_w_phl"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "register_operand" "d")
+ (match_operand:V2HI 3 "register_operand" "d")]
+ UNSPEC_MAQ_S_W_PHL))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_MAQ_S_W_PHL))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "maq_s.w.phl\t%q0,%2,%3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_maq_s_w_phr"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "register_operand" "d")
+ (match_operand:V2HI 3 "register_operand" "d")]
+ UNSPEC_MAQ_S_W_PHR))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_MAQ_S_W_PHR))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "maq_s.w.phr\t%q0,%2,%3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+;; MAQ_SA*
+(define_insn "mips_maq_sa_w_phl"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "register_operand" "d")
+ (match_operand:V2HI 3 "register_operand" "d")]
+ UNSPEC_MAQ_SA_W_PHL))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_MAQ_SA_W_PHL))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "maq_sa.w.phl\t%q0,%2,%3"
+ [(set_attr "type" "dspmacsat")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_maq_sa_w_phr"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "register_operand" "d")
+ (match_operand:V2HI 3 "register_operand" "d")]
+ UNSPEC_MAQ_SA_W_PHR))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_MAQ_SA_W_PHR))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "maq_sa.w.phr\t%q0,%2,%3"
+ [(set_attr "type" "dspmacsat")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+;; Table 2-4. MIPS DSP ASE Instructions: General Bit/Manipulation
+;; BITREV
+(define_insn "mips_bitrev"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "d")]
+ UNSPEC_BITREV))]
+ "ISA_HAS_DSP"
+ "bitrev\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; INSV
+(define_insn "mips_insv"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "d")
+ (reg:CCDSP CCDSP_SC_REGNUM)
+ (reg:CCDSP CCDSP_PO_REGNUM)]
+ UNSPEC_INSV))]
+ "ISA_HAS_DSP"
+ "insv\t%0,%2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; REPL*
+(define_insn "mips_repl_qb"
+ [(set (match_operand:V4QI 0 "register_operand" "=d,d")
+ (unspec:V4QI [(match_operand:SI 1 "arith_operand" "I,d")]
+ UNSPEC_REPL_QB))]
+ "ISA_HAS_DSP"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[1]) & ~(unsigned HOST_WIDE_INT) 0xff)
+ operands[1] = GEN_INT (INTVAL (operands[1]) & 0xff);
+ return "repl.qb\t%0,%1";
+ }
+ return "replv.qb\t%0,%1";
+}
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_repl_ph"
+ [(set (match_operand:V2HI 0 "register_operand" "=d,d")
+ (unspec:V2HI [(match_operand:SI 1 "reg_imm10_operand" "YB,d")]
+ UNSPEC_REPL_PH))]
+ "ISA_HAS_DSP"
+ "@
+ repl.ph\t%0,%1
+ replv.ph\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; Table 2-5. MIPS DSP ASE Instructions: Compare-Pick
+;; CMPU.* CMP.*
+(define_insn "mips_cmp<DSPV:dspfmt1_1>_eq_<DSPV:dspfmt2>"
+ [(set (reg:CCDSP CCDSP_CC_REGNUM)
+ (unspec:CCDSP [(match_operand:DSPV 0 "register_operand" "d")
+ (match_operand:DSPV 1 "register_operand" "d")
+ (reg:CCDSP CCDSP_CC_REGNUM)]
+ UNSPEC_CMP_EQ))]
+ "ISA_HAS_DSP"
+ "cmp<DSPV:dspfmt1_1>.eq.<DSPV:dspfmt2>\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_cmp<DSPV:dspfmt1_1>_lt_<DSPV:dspfmt2>"
+ [(set (reg:CCDSP CCDSP_CC_REGNUM)
+ (unspec:CCDSP [(match_operand:DSPV 0 "register_operand" "d")
+ (match_operand:DSPV 1 "register_operand" "d")
+ (reg:CCDSP CCDSP_CC_REGNUM)]
+ UNSPEC_CMP_LT))]
+ "ISA_HAS_DSP"
+ "cmp<DSPV:dspfmt1_1>.lt.<DSPV:dspfmt2>\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_cmp<DSPV:dspfmt1_1>_le_<DSPV:dspfmt2>"
+ [(set (reg:CCDSP CCDSP_CC_REGNUM)
+ (unspec:CCDSP [(match_operand:DSPV 0 "register_operand" "d")
+ (match_operand:DSPV 1 "register_operand" "d")
+ (reg:CCDSP CCDSP_CC_REGNUM)]
+ UNSPEC_CMP_LE))]
+ "ISA_HAS_DSP"
+ "cmp<DSPV:dspfmt1_1>.le.<DSPV:dspfmt2>\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_cmpgu_eq_qb"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:V4QI 1 "register_operand" "d")
+ (match_operand:V4QI 2 "register_operand" "d")]
+ UNSPEC_CMPGU_EQ_QB))]
+ "ISA_HAS_DSP"
+ "cmpgu.eq.qb\t%0,%1,%2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_cmpgu_lt_qb"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:V4QI 1 "register_operand" "d")
+ (match_operand:V4QI 2 "register_operand" "d")]
+ UNSPEC_CMPGU_LT_QB))]
+ "ISA_HAS_DSP"
+ "cmpgu.lt.qb\t%0,%1,%2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_cmpgu_le_qb"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:V4QI 1 "register_operand" "d")
+ (match_operand:V4QI 2 "register_operand" "d")]
+ UNSPEC_CMPGU_LE_QB))]
+ "ISA_HAS_DSP"
+ "cmpgu.le.qb\t%0,%1,%2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; PICK*
+(define_insn "mips_pick_<DSPV:dspfmt2>"
+ [(set (match_operand:DSPV 0 "register_operand" "=d")
+ (unspec:DSPV [(match_operand:DSPV 1 "register_operand" "d")
+ (match_operand:DSPV 2 "register_operand" "d")
+ (reg:CCDSP CCDSP_CC_REGNUM)]
+ UNSPEC_PICK))]
+ "ISA_HAS_DSP"
+ "pick.<DSPV:dspfmt2>\t%0,%1,%2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; PACKRL*
+(define_insn "mips_packrl_ph"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")]
+ UNSPEC_PACKRL_PH))]
+ "ISA_HAS_DSP"
+ "packrl.ph\t%0,%1,%2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; Table 2-6. MIPS DSP ASE Instructions: Accumulator and DSPControl Access
+;; EXTR*
+(define_insn "mips_extr_w"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d,d")
+ (unspec:SI [(match_operand:DI 1 "register_operand" "a,a")
+ (match_operand:SI 2 "arith_operand" "I,d")]
+ UNSPEC_EXTR_W))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_EXTR_W))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2]) & ~(unsigned HOST_WIDE_INT) 0x1f)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+ return "extr.w\t%0,%q1,%2";
+ }
+ return "extrv.w\t%0,%q1,%2";
+}
+ [(set_attr "type" "accext")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_extr_r_w"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d,d")
+ (unspec:SI [(match_operand:DI 1 "register_operand" "a,a")
+ (match_operand:SI 2 "arith_operand" "I,d")]
+ UNSPEC_EXTR_R_W))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_EXTR_R_W))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2]) & ~(unsigned HOST_WIDE_INT) 0x1f)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+ return "extr_r.w\t%0,%q1,%2";
+ }
+ return "extrv_r.w\t%0,%q1,%2";
+}
+ [(set_attr "type" "accext")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_extr_rs_w"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d,d")
+ (unspec:SI [(match_operand:DI 1 "register_operand" "a,a")
+ (match_operand:SI 2 "arith_operand" "I,d")]
+ UNSPEC_EXTR_RS_W))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_EXTR_RS_W))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2]) & ~(unsigned HOST_WIDE_INT) 0x1f)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+ return "extr_rs.w\t%0,%q1,%2";
+ }
+ return "extrv_rs.w\t%0,%q1,%2";
+}
+ [(set_attr "type" "accext")
+ (set_attr "mode" "SI")])
+
+;; EXTR*_S.H
+(define_insn "mips_extr_s_h"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d,d")
+ (unspec:SI [(match_operand:DI 1 "register_operand" "a,a")
+ (match_operand:SI 2 "arith_operand" "I,d")]
+ UNSPEC_EXTR_S_H))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_EXTR_S_H))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2]) & ~(unsigned HOST_WIDE_INT) 0x1f)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+ return "extr_s.h\t%0,%q1,%2";
+ }
+ return "extrv_s.h\t%0,%q1,%2";
+}
+ [(set_attr "type" "accext")
+ (set_attr "mode" "SI")])
+
+;; EXTP*
+(define_insn "mips_extp"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d,d")
+ (unspec:SI [(match_operand:DI 1 "register_operand" "a,a")
+ (match_operand:SI 2 "arith_operand" "I,d")
+ (reg:CCDSP CCDSP_PO_REGNUM)]
+ UNSPEC_EXTP))
+ (set (reg:CCDSP CCDSP_EF_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_EXTP))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2]) & ~(unsigned HOST_WIDE_INT) 0x1f)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+ return "extp\t%0,%q1,%2";
+ }
+ return "extpv\t%0,%q1,%2";
+}
+ [(set_attr "type" "accext")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_extpdp"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d,d")
+ (unspec:SI [(match_operand:DI 1 "register_operand" "a,a")
+ (match_operand:SI 2 "arith_operand" "I,d")
+ (reg:CCDSP CCDSP_PO_REGNUM)]
+ UNSPEC_EXTPDP))
+ (set (reg:CCDSP CCDSP_PO_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)
+ (reg:CCDSP CCDSP_PO_REGNUM)] UNSPEC_EXTPDP))
+ (set (reg:CCDSP CCDSP_EF_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_EXTPDP))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2]) & ~(unsigned HOST_WIDE_INT) 0x1f)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+ return "extpdp\t%0,%q1,%2";
+ }
+ return "extpdpv\t%0,%q1,%2";
+}
+ [(set_attr "type" "accext")
+ (set_attr "mode" "SI")])
+
+;; SHILO*
+(define_insn "mips_shilo"
+ [(set (match_operand:DI 0 "register_operand" "=a,a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "arith_operand" "I,d")]
+ UNSPEC_SHILO))]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2]) < -32 || INTVAL (operands[2]) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
+ return "shilo\t%q0,%2";
+ }
+ return "shilov\t%q0,%2";
+}
+ [(set_attr "type" "accmod")
+ (set_attr "mode" "SI")])
+
+;; MTHLIP*
+(define_insn "mips_mthlip"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "d")
+ (reg:CCDSP CCDSP_PO_REGNUM)]
+ UNSPEC_MTHLIP))
+ (set (reg:CCDSP CCDSP_PO_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)
+ (reg:CCDSP CCDSP_PO_REGNUM)] UNSPEC_MTHLIP))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "mthlip\t%2,%q0"
+ [(set_attr "type" "accmod")
+ (set_attr "mode" "SI")])
+
+;; WRDSP
+(define_insn "mips_wrdsp"
+ [(parallel
+ [(set (reg:CCDSP CCDSP_PO_REGNUM)
+ (unspec:CCDSP [(match_operand:SI 0 "register_operand" "d")
+ (match_operand:SI 1 "const_uimm6_operand" "YA")]
+ UNSPEC_WRDSP))
+ (set (reg:CCDSP CCDSP_SC_REGNUM)
+ (unspec:CCDSP [(match_dup 0) (match_dup 1)] UNSPEC_WRDSP))
+ (set (reg:CCDSP CCDSP_CA_REGNUM)
+ (unspec:CCDSP [(match_dup 0) (match_dup 1)] UNSPEC_WRDSP))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 0) (match_dup 1)] UNSPEC_WRDSP))
+ (set (reg:CCDSP CCDSP_CC_REGNUM)
+ (unspec:CCDSP [(match_dup 0) (match_dup 1)] UNSPEC_WRDSP))
+ (set (reg:CCDSP CCDSP_EF_REGNUM)
+ (unspec:CCDSP [(match_dup 0) (match_dup 1)] UNSPEC_WRDSP))])]
+ "ISA_HAS_DSP"
+ "wrdsp\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; RDDSP
+(define_insn "mips_rddsp"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "const_uimm6_operand" "YA")
+ (reg:CCDSP CCDSP_PO_REGNUM)
+ (reg:CCDSP CCDSP_SC_REGNUM)
+ (reg:CCDSP CCDSP_CA_REGNUM)
+ (reg:CCDSP CCDSP_OU_REGNUM)
+ (reg:CCDSP CCDSP_CC_REGNUM)
+ (reg:CCDSP CCDSP_EF_REGNUM)]
+ UNSPEC_RDDSP))]
+ "ISA_HAS_DSP"
+ "rddsp\t%0,%1"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+;; Table 2-7. MIPS DSP ASE Instructions: Indexed-Load
+;; L*X
+(define_expand "mips_lbux"
+ [(match_operand:SI 0 "register_operand")
+ (match_operand 1 "pmode_register_operand")
+ (match_operand:SI 2 "register_operand")]
+ "ISA_HAS_DSP"
+{
+ operands[2] = convert_to_mode (Pmode, operands[2], false);
+ emit_insn (PMODE_INSN (gen_mips_lbux_extsi,
+ (operands[0], operands[1], operands[2])));
+ DONE;
+})
+
+(define_insn "mips_l<SHORT:size><u>x_ext<GPR:mode>_<P:mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (any_extend:GPR
+ (mem:SHORT (plus:P (match_operand:P 1 "register_operand" "d")
+ (match_operand:P 2 "register_operand" "d")))))]
+ "ISA_HAS_L<SHORT:SIZE><U>X"
+ "l<SHORT:size><u>x\t%0,%2(%1)"
+ [(set_attr "type" "load")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_expand "mips_lhx"
+ [(match_operand:SI 0 "register_operand")
+ (match_operand 1 "pmode_register_operand")
+ (match_operand:SI 2 "register_operand")]
+ "ISA_HAS_DSP"
+{
+ operands[2] = convert_to_mode (Pmode, operands[2], false);
+ emit_insn (PMODE_INSN (gen_mips_lhx_extsi,
+ (operands[0], operands[1], operands[2])));
+ DONE;
+})
+
+(define_expand "mips_l<size>x"
+ [(match_operand:GPR 0 "register_operand")
+ (match_operand 1 "pmode_register_operand")
+ (match_operand:SI 2 "register_operand")]
+ "ISA_HAS_DSP"
+{
+ operands[2] = convert_to_mode (Pmode, operands[2], false);
+ emit_insn (PMODE_INSN (gen_mips_l<size>x,
+ (operands[0], operands[1], operands[2])));
+ DONE;
+})
+
+(define_insn "mips_l<GPR:size>x_<P:mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (mem:GPR (plus:P (match_operand:P 1 "register_operand" "d")
+ (match_operand:P 2 "register_operand" "d"))))]
+ "ISA_HAS_L<GPR:SIZE>X"
+ "l<GPR:size>x\t%0,%2(%1)"
+ [(set_attr "type" "load")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*mips_lw<u>x_<P:mode>_ext"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (any_extend:DI
+ (mem:SI (plus:P (match_operand:P 1 "register_operand" "d")
+ (match_operand:P 2 "register_operand" "d")))))]
+ "ISA_HAS_LW<U>X && TARGET_64BIT"
+ "lw<u>x\t%0,%2(%1)"
+ [(set_attr "type" "load")
+ (set_attr "mode" "DI")])
+
+;; Table 2-8. MIPS DSP ASE Instructions: Branch
+;; BPOSGE32
+(define_insn "mips_bposge"
+ [(set (pc)
+ (if_then_else (ge (reg:CCDSP CCDSP_PO_REGNUM)
+ (match_operand:SI 1 "immediate_operand" "I"))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "ISA_HAS_DSP"
+ "%*bposge%1\t%0%/"
+ [(set_attr "type" "branch")])
+
+(define_expand "mips_madd<u>"
+ [(set (match_operand:DI 0 "register_operand")
+ (plus:DI
+ (mult:DI (any_extend:DI (match_operand:SI 2 "register_operand"))
+ (any_extend:DI (match_operand:SI 3 "register_operand")))
+ (match_operand:DI 1 "register_operand")))]
+ "ISA_HAS_DSP && !TARGET_64BIT")
+
+(define_expand "mips_msub<u>"
+ [(set (match_operand:DI 0 "register_operand")
+ (minus:DI
+ (match_operand:DI 1 "register_operand")
+ (mult:DI (any_extend:DI (match_operand:SI 2 "register_operand"))
+ (any_extend:DI (match_operand:SI 3 "register_operand")))))]
+ "ISA_HAS_DSP && !TARGET_64BIT")
diff --git a/gcc-4.9/gcc/config/mips/mips-dspr2.md b/gcc-4.9/gcc/config/mips/mips-dspr2.md
new file mode 100644
index 000000000..3fe401f9f
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips-dspr2.md
@@ -0,0 +1,632 @@
+;; Copyright (C) 2007-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+;;
+; MIPS DSP ASE REV 2 Revision 0.02 11/24/2006
+
+(define_c_enum "unspec" [
+ UNSPEC_ABSQ_S_QB
+ UNSPEC_ADDU_PH
+ UNSPEC_ADDU_S_PH
+ UNSPEC_ADDUH_QB
+ UNSPEC_ADDUH_R_QB
+ UNSPEC_APPEND
+ UNSPEC_BALIGN
+ UNSPEC_CMPGDU_EQ_QB
+ UNSPEC_CMPGDU_LT_QB
+ UNSPEC_CMPGDU_LE_QB
+ UNSPEC_DPA_W_PH
+ UNSPEC_DPS_W_PH
+ UNSPEC_MADD
+ UNSPEC_MADDU
+ UNSPEC_MSUB
+ UNSPEC_MSUBU
+ UNSPEC_MUL_PH
+ UNSPEC_MUL_S_PH
+ UNSPEC_MULQ_RS_W
+ UNSPEC_MULQ_S_PH
+ UNSPEC_MULQ_S_W
+ UNSPEC_MULSA_W_PH
+ UNSPEC_MULT
+ UNSPEC_MULTU
+ UNSPEC_PRECR_QB_PH
+ UNSPEC_PRECR_SRA_PH_W
+ UNSPEC_PRECR_SRA_R_PH_W
+ UNSPEC_PREPEND
+ UNSPEC_SHRA_QB
+ UNSPEC_SHRA_R_QB
+ UNSPEC_SHRL_PH
+ UNSPEC_SUBU_PH
+ UNSPEC_SUBU_S_PH
+ UNSPEC_SUBUH_QB
+ UNSPEC_SUBUH_R_QB
+ UNSPEC_ADDQH_PH
+ UNSPEC_ADDQH_R_PH
+ UNSPEC_ADDQH_W
+ UNSPEC_ADDQH_R_W
+ UNSPEC_SUBQH_PH
+ UNSPEC_SUBQH_R_PH
+ UNSPEC_SUBQH_W
+ UNSPEC_SUBQH_R_W
+ UNSPEC_DPAX_W_PH
+ UNSPEC_DPSX_W_PH
+ UNSPEC_DPAQX_S_W_PH
+ UNSPEC_DPAQX_SA_W_PH
+ UNSPEC_DPSQX_S_W_PH
+ UNSPEC_DPSQX_SA_W_PH
+])
+
+(define_insn "mips_absq_s_qb"
+ [(parallel
+ [(set (match_operand:V4QI 0 "register_operand" "=d")
+ (unspec:V4QI [(match_operand:V4QI 1 "reg_or_0_operand" "dYG")]
+ UNSPEC_ABSQ_S_QB))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1)] UNSPEC_ABSQ_S_QB))])]
+ "ISA_HAS_DSPR2"
+ "absq_s.qb\t%0,%z1"
+ [(set_attr "type" "dspalusat")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_addu_ph"
+ [(parallel
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (plus:V2HI (match_operand:V2HI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDU_PH))])]
+ "ISA_HAS_DSPR2"
+ "addu.ph\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_addu_s_ph"
+ [(parallel
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V2HI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_ADDU_S_PH))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDU_S_PH))])]
+ "ISA_HAS_DSPR2"
+ "addu_s.ph\t%0,%z1,%z2"
+ [(set_attr "type" "dspalusat")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_adduh_qb"
+ [(set (match_operand:V4QI 0 "register_operand" "=d")
+ (unspec:V4QI [(match_operand:V4QI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V4QI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_ADDUH_QB))]
+ "ISA_HAS_DSPR2"
+ "adduh.qb\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_adduh_r_qb"
+ [(set (match_operand:V4QI 0 "register_operand" "=d")
+ (unspec:V4QI [(match_operand:V4QI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V4QI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_ADDUH_R_QB))]
+ "ISA_HAS_DSPR2"
+ "adduh_r.qb\t%0,%z1,%z2"
+ [(set_attr "type" "dspalusat")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_append"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "reg_or_0_operand" "dJ")
+ (match_operand:SI 3 "const_int_operand" "n")]
+ UNSPEC_APPEND))]
+ "ISA_HAS_DSPR2"
+{
+ if (INTVAL (operands[3]) & ~(unsigned HOST_WIDE_INT) 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 31);
+ return "append\t%0,%z2,%3";
+}
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_balign"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "reg_or_0_operand" "dJ")
+ (match_operand:SI 3 "const_int_operand" "n")]
+ UNSPEC_BALIGN))]
+ "ISA_HAS_DSPR2"
+{
+ if (INTVAL (operands[3]) & ~(unsigned HOST_WIDE_INT) 3)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 3);
+ return "balign\t%0,%z2,%3";
+}
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_cmpgdu_eq_qb"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:V4QI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V4QI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_CMPGDU_EQ_QB))
+ (set (reg:CCDSP CCDSP_CC_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)
+ (reg:CCDSP CCDSP_CC_REGNUM)]
+ UNSPEC_CMPGDU_EQ_QB))])]
+ "ISA_HAS_DSPR2"
+ "cmpgdu.eq.qb\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_cmpgdu_lt_qb"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:V4QI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V4QI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_CMPGDU_LT_QB))
+ (set (reg:CCDSP CCDSP_CC_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)
+ (reg:CCDSP CCDSP_CC_REGNUM)]
+ UNSPEC_CMPGDU_LT_QB))])]
+ "ISA_HAS_DSPR2"
+ "cmpgdu.lt.qb\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_cmpgdu_le_qb"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:V4QI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V4QI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_CMPGDU_LE_QB))
+ (set (reg:CCDSP CCDSP_CC_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)
+ (reg:CCDSP CCDSP_CC_REGNUM)]
+ UNSPEC_CMPGDU_LE_QB))])]
+ "ISA_HAS_DSPR2"
+ "cmpgdu.le.qb\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_dpa_w_ph"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 3 "reg_or_0_operand" "dYG")]
+ UNSPEC_DPA_W_PH))]
+ "ISA_HAS_DSPR2 && !TARGET_64BIT"
+ "dpa.w.ph\t%q0,%z2,%z3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_dps_w_ph"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 3 "reg_or_0_operand" "dYG")]
+ UNSPEC_DPS_W_PH))]
+ "ISA_HAS_DSPR2 && !TARGET_64BIT"
+ "dps.w.ph\t%q0,%z2,%z3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+(define_insn "mulv2hi3"
+ [(parallel
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (mult:V2HI (match_operand:V2HI 1 "register_operand" "d")
+ (match_operand:V2HI 2 "register_operand" "d")))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_MUL_PH))
+ (clobber (match_scratch:DI 3 "=x"))])]
+ "ISA_HAS_DSPR2"
+ "mul.ph\t%0,%1,%2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_mul_s_ph"
+ [(parallel
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V2HI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_MUL_S_PH))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_MUL_S_PH))
+ (clobber (match_scratch:DI 3 "=x"))])]
+ "ISA_HAS_DSPR2"
+ "mul_s.ph\t%0,%z1,%z2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_mulq_rs_w"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "reg_or_0_operand" "dJ")
+ (match_operand:SI 2 "reg_or_0_operand" "dJ")]
+ UNSPEC_MULQ_RS_W))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_MULQ_RS_W))
+ (clobber (match_scratch:DI 3 "=x"))])]
+ "ISA_HAS_DSPR2"
+ "mulq_rs.w\t%0,%z1,%z2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_mulq_s_ph"
+ [(parallel
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V2HI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_MULQ_S_PH))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_MULQ_S_PH))
+ (clobber (match_scratch:DI 3 "=x"))])]
+ "ISA_HAS_DSPR2"
+ "mulq_s.ph\t%0,%z1,%z2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_mulq_s_w"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "reg_or_0_operand" "dJ")
+ (match_operand:SI 2 "reg_or_0_operand" "dJ")]
+ UNSPEC_MULQ_S_W))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_MULQ_S_W))
+ (clobber (match_scratch:DI 3 "=x"))])]
+ "ISA_HAS_DSPR2"
+ "mulq_s.w\t%0,%z1,%z2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_mulsa_w_ph"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 3 "reg_or_0_operand" "dYG")]
+ UNSPEC_MULSA_W_PH))]
+ "ISA_HAS_DSPR2 && !TARGET_64BIT"
+ "mulsa.w.ph\t%q0,%z2,%z3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_precr_qb_ph"
+ [(set (match_operand:V4QI 0 "register_operand" "=d")
+ (unspec:V4QI [(match_operand:V2HI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_PRECR_QB_PH))]
+ "ISA_HAS_DSPR2"
+ "precr.qb.ph\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_precr_sra_ph_w"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "reg_or_0_operand" "dJ")
+ (match_operand:SI 3 "const_int_operand" "n")]
+ UNSPEC_PRECR_SRA_PH_W))]
+ "ISA_HAS_DSPR2"
+{
+ if (INTVAL (operands[3]) & ~(unsigned HOST_WIDE_INT) 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 31);
+ return "precr_sra.ph.w\t%0,%z2,%3";
+}
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_precr_sra_r_ph_w"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "reg_or_0_operand" "dJ")
+ (match_operand:SI 3 "const_int_operand" "n")]
+ UNSPEC_PRECR_SRA_R_PH_W))]
+ "ISA_HAS_DSPR2"
+{
+ if (INTVAL (operands[3]) & ~(unsigned HOST_WIDE_INT) 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 31);
+ return "precr_sra_r.ph.w\t%0,%z2,%3";
+}
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_prepend"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "reg_or_0_operand" "dJ")
+ (match_operand:SI 3 "const_int_operand" "n")]
+ UNSPEC_PREPEND))]
+ "ISA_HAS_DSPR2"
+{
+ if (INTVAL (operands[3]) & ~(unsigned HOST_WIDE_INT) 31)
+ operands[3] = GEN_INT (INTVAL (operands[3]) & 31);
+ return "prepend\t%0,%z2,%3";
+}
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_shra_qb"
+ [(set (match_operand:V4QI 0 "register_operand" "=d,d")
+ (unspec:V4QI [(match_operand:V4QI 1 "reg_or_0_operand" "dYG,dYG")
+ (match_operand:SI 2 "arith_operand" "I,d")]
+ UNSPEC_SHRA_QB))]
+ "ISA_HAS_DSPR2"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2]) & ~(unsigned HOST_WIDE_INT) 7)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 7);
+ return "shra.qb\t%0,%z1,%2";
+ }
+ return "shrav.qb\t%0,%z1,%2";
+}
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+
+(define_insn "mips_shra_r_qb"
+ [(set (match_operand:V4QI 0 "register_operand" "=d,d")
+ (unspec:V4QI [(match_operand:V4QI 1 "reg_or_0_operand" "dYG,dYG")
+ (match_operand:SI 2 "arith_operand" "I,d")]
+ UNSPEC_SHRA_R_QB))]
+ "ISA_HAS_DSPR2"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2]) & ~(unsigned HOST_WIDE_INT) 7)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 7);
+ return "shra_r.qb\t%0,%z1,%2";
+ }
+ return "shrav_r.qb\t%0,%z1,%2";
+}
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_shrl_ph"
+ [(set (match_operand:V2HI 0 "register_operand" "=d,d")
+ (unspec:V2HI [(match_operand:V2HI 1 "reg_or_0_operand" "dYG,dYG")
+ (match_operand:SI 2 "arith_operand" "I,d")]
+ UNSPEC_SHRL_PH))]
+ "ISA_HAS_DSPR2"
+{
+ if (which_alternative == 0)
+ {
+ if (INTVAL (operands[2]) & ~(unsigned HOST_WIDE_INT) 15)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 15);
+ return "shrl.ph\t%0,%z1,%2";
+ }
+ return "shrlv.ph\t%0,%z1,%2";
+}
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_subu_ph"
+ [(parallel
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V2HI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_SUBU_PH))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SUBU_PH))])]
+ "ISA_HAS_DSPR2"
+ "subu.ph\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_subu_s_ph"
+ [(parallel
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V2HI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_SUBU_S_PH))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SUBU_S_PH))])]
+ "ISA_HAS_DSPR2"
+ "subu_s.ph\t%0,%z1,%z2"
+ [(set_attr "type" "dspalusat")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_subuh_qb"
+ [(set (match_operand:V4QI 0 "register_operand" "=d")
+ (unspec:V4QI [(match_operand:V4QI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V4QI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_SUBUH_QB))]
+ "ISA_HAS_DSPR2"
+ "subuh.qb\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_subuh_r_qb"
+ [(set (match_operand:V4QI 0 "register_operand" "=d")
+ (unspec:V4QI [(match_operand:V4QI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V4QI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_SUBUH_R_QB))]
+ "ISA_HAS_DSPR2"
+ "subuh_r.qb\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_addqh_ph"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V2HI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_ADDQH_PH))]
+ "ISA_HAS_DSPR2"
+ "addqh.ph\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_addqh_r_ph"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V2HI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_ADDQH_R_PH))]
+ "ISA_HAS_DSPR2"
+ "addqh_r.ph\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_addqh_w"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "reg_or_0_operand" "dJ")
+ (match_operand:SI 2 "reg_or_0_operand" "dJ")]
+ UNSPEC_ADDQH_W))]
+ "ISA_HAS_DSPR2"
+ "addqh.w\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_addqh_r_w"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "reg_or_0_operand" "dJ")
+ (match_operand:SI 2 "reg_or_0_operand" "dJ")]
+ UNSPEC_ADDQH_R_W))]
+ "ISA_HAS_DSPR2"
+ "addqh_r.w\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_subqh_ph"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V2HI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_SUBQH_PH))]
+ "ISA_HAS_DSPR2"
+ "subqh.ph\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_subqh_r_ph"
+ [(set (match_operand:V2HI 0 "register_operand" "=d")
+ (unspec:V2HI [(match_operand:V2HI 1 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")]
+ UNSPEC_SUBQH_R_PH))]
+ "ISA_HAS_DSPR2"
+ "subqh_r.ph\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_subqh_w"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "reg_or_0_operand" "dJ")
+ (match_operand:SI 2 "reg_or_0_operand" "dJ")]
+ UNSPEC_SUBQH_W))]
+ "ISA_HAS_DSPR2"
+ "subqh.w\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_subqh_r_w"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "reg_or_0_operand" "dJ")
+ (match_operand:SI 2 "reg_or_0_operand" "dJ")]
+ UNSPEC_SUBQH_R_W))]
+ "ISA_HAS_DSPR2"
+ "subqh_r.w\t%0,%z1,%z2"
+ [(set_attr "type" "dspalu")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_dpax_w_ph"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 3 "reg_or_0_operand" "dYG")]
+ UNSPEC_DPAX_W_PH))]
+ "ISA_HAS_DSPR2 && !TARGET_64BIT"
+ "dpax.w.ph\t%q0,%z2,%z3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_dpsx_w_ph"
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 3 "reg_or_0_operand" "dYG")]
+ UNSPEC_DPSX_W_PH))]
+ "ISA_HAS_DSPR2 && !TARGET_64BIT"
+ "dpsx.w.ph\t%q0,%z2,%z3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_dpaqx_s_w_ph"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 3 "reg_or_0_operand" "dYG")]
+ UNSPEC_DPAQX_S_W_PH))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_DPAQX_S_W_PH))])]
+ "ISA_HAS_DSPR2 && !TARGET_64BIT"
+ "dpaqx_s.w.ph\t%q0,%z2,%z3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_dpaqx_sa_w_ph"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 3 "reg_or_0_operand" "dYG")]
+ UNSPEC_DPAQX_SA_W_PH))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_DPAQX_SA_W_PH))])]
+ "ISA_HAS_DSPR2 && !TARGET_64BIT"
+ "dpaqx_sa.w.ph\t%q0,%z2,%z3"
+ [(set_attr "type" "dspmacsat")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_dpsqx_s_w_ph"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 3 "reg_or_0_operand" "dYG")]
+ UNSPEC_DPSQX_S_W_PH))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_DPSQX_S_W_PH))])]
+ "ISA_HAS_DSPR2 && !TARGET_64BIT"
+ "dpsqx_s.w.ph\t%q0,%z2,%z3"
+ [(set_attr "type" "dspmac")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+(define_insn "mips_dpsqx_sa_w_ph"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "=a")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "0")
+ (match_operand:V2HI 2 "reg_or_0_operand" "dYG")
+ (match_operand:V2HI 3 "reg_or_0_operand" "dYG")]
+ UNSPEC_DPSQX_SA_W_PH))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_DPSQX_SA_W_PH))])]
+ "ISA_HAS_DSPR2 && !TARGET_64BIT"
+ "dpsqx_sa.w.ph\t%q0,%z2,%z3"
+ [(set_attr "type" "dspmacsat")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
diff --git a/gcc-4.9/gcc/config/mips/mips-fixed.md b/gcc-4.9/gcc/config/mips/mips-fixed.md
new file mode 100644
index 000000000..dbb60f4ca
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips-fixed.md
@@ -0,0 +1,156 @@
+;; Copyright (C) 2007-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+;;
+;; This file contains MIPS instructions that support fixed-point operations.
+
+;; All supported fixed-point modes
+(define_mode_iterator FIXED [(QQ "") (HQ "") (SQ "") (DQ "TARGET_64BIT")
+ (UQQ "") (UHQ "") (USQ "") (UDQ "TARGET_64BIT")
+ (HA "") (SA "") (DA "TARGET_64BIT")
+ (UHA "") (USA "") (UDA "TARGET_64BIT")])
+
+;; For signed add/sub with saturation
+(define_mode_iterator ADDSUB [(HQ "") (SQ "") (HA "") (SA "") (V2HQ "")
+ (V2HA "")])
+(define_mode_attr addsubfmt [(HQ "ph") (SQ "w") (HA "ph") (SA "w")
+ (V2HQ "ph") (V2HA "ph")])
+
+;; For unsigned add/sub with saturation
+(define_mode_iterator UADDSUB [(UQQ "ISA_HAS_DSP") (UHQ "ISA_HAS_DSPR2")
+ (UHA "ISA_HAS_DSPR2") (V4UQQ "ISA_HAS_DSP")
+ (V2UHQ "ISA_HAS_DSPR2") (V2UHA "ISA_HAS_DSPR2")])
+(define_mode_attr uaddsubfmt [(UQQ "qb") (UHQ "ph") (UHA "ph")
+ (V4UQQ "qb") (V2UHQ "ph") (V2UHA "ph")])
+
+;; For signed multiplication with saturation
+(define_mode_iterator MULQ [(V2HQ "ISA_HAS_DSP") (HQ "ISA_HAS_DSP")
+ (SQ "ISA_HAS_DSPR2")])
+(define_mode_attr mulqfmt [(V2HQ "ph") (HQ "ph") (SQ "w")])
+
+(define_insn "add<mode>3"
+ [(set (match_operand:FIXED 0 "register_operand" "=d")
+ (plus:FIXED (match_operand:FIXED 1 "register_operand" "d")
+ (match_operand:FIXED 2 "register_operand" "d")))]
+ ""
+ "<d>addu\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "<IMODE>")])
+
+(define_insn "usadd<mode>3"
+ [(parallel
+ [(set (match_operand:UADDSUB 0 "register_operand" "=d")
+ (us_plus:UADDSUB (match_operand:UADDSUB 1 "register_operand" "d")
+ (match_operand:UADDSUB 2 "register_operand" "d")))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDQ_S))])]
+ ""
+ "addu_s.<uaddsubfmt>\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "<IMODE>")])
+
+(define_insn "ssadd<mode>3"
+ [(parallel
+ [(set (match_operand:ADDSUB 0 "register_operand" "=d")
+ (ss_plus:ADDSUB (match_operand:ADDSUB 1 "register_operand" "d")
+ (match_operand:ADDSUB 2 "register_operand" "d")))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_ADDQ_S))])]
+ "ISA_HAS_DSP"
+ "addq_s.<addsubfmt>\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "<IMODE>")])
+
+(define_insn "sub<mode>3"
+ [(set (match_operand:FIXED 0 "register_operand" "=d")
+ (minus:FIXED (match_operand:FIXED 1 "register_operand" "d")
+ (match_operand:FIXED 2 "register_operand" "d")))]
+ ""
+ "<d>subu\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "<IMODE>")])
+
+(define_insn "ussub<mode>3"
+ [(parallel
+ [(set (match_operand:UADDSUB 0 "register_operand" "=d")
+ (us_minus:UADDSUB (match_operand:UADDSUB 1 "register_operand" "d")
+ (match_operand:UADDSUB 2 "register_operand" "d")))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SUBQ_S))])]
+ ""
+ "subu_s.<uaddsubfmt>\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "<IMODE>")])
+
+(define_insn "sssub<mode>3"
+ [(parallel
+ [(set (match_operand:ADDSUB 0 "register_operand" "=d")
+ (ss_minus:ADDSUB (match_operand:ADDSUB 1 "register_operand" "d")
+ (match_operand:ADDSUB 2 "register_operand" "d")))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_SUBQ_S))])]
+ "ISA_HAS_DSP"
+ "subq_s.<addsubfmt>\t%0,%1,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "<IMODE>")])
+
+(define_insn "ssmul<mode>3"
+ [(parallel
+ [(set (match_operand:MULQ 0 "register_operand" "=d")
+ (ss_mult:MULQ (match_operand:MULQ 1 "register_operand" "d")
+ (match_operand:MULQ 2 "register_operand" "d")))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2)] UNSPEC_MULQ_RS_PH))
+ (clobber (match_scratch:DI 3 "=x"))])]
+ ""
+ "mulq_rs.<mulqfmt>\t%0,%1,%2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "<IMODE>")])
+
+(define_insn "ssmaddsqdq4"
+ [(parallel
+ [(set (match_operand:DQ 0 "register_operand" "=a")
+ (ss_plus:DQ
+ (ss_mult:DQ (sat_fract:DQ (match_operand:SQ 1
+ "register_operand" "d"))
+ (sat_fract:DQ (match_operand:SQ 2
+ "register_operand" "d")))
+ (match_operand:DQ 3 "register_operand" "0")))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_DPAQ_SA_L_W))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "dpaq_sa.l.w\t%q0,%1,%2"
+ [(set_attr "type" "imadd")
+ (set_attr "mode" "SI")])
+
+(define_insn "ssmsubsqdq4"
+ [(parallel
+ [(set (match_operand:DQ 0 "register_operand" "=a")
+ (ss_minus:DQ
+ (match_operand:DQ 3 "register_operand" "0")
+ (ss_mult:DQ (sat_fract:DQ (match_operand:SQ 1
+ "register_operand" "d"))
+ (sat_fract:DQ (match_operand:SQ 2
+ "register_operand" "d")))))
+ (set (reg:CCDSP CCDSP_OU_REGNUM)
+ (unspec:CCDSP [(match_dup 1) (match_dup 2) (match_dup 3)]
+ UNSPEC_DPSQ_SA_L_W))])]
+ "ISA_HAS_DSP && !TARGET_64BIT"
+ "dpsq_sa.l.w\t%q0,%1,%2"
+ [(set_attr "type" "imadd")
+ (set_attr "mode" "SI")])
diff --git a/gcc-4.9/gcc/config/mips/mips-ftypes.def b/gcc-4.9/gcc/config/mips/mips-ftypes.def
new file mode 100644
index 000000000..078a595a8
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips-ftypes.def
@@ -0,0 +1,129 @@
+/* Definitions of prototypes for MIPS built-in functions. -*- C -*-
+ Copyright (C) 2007-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Invoke DEF_MIPS_FTYPE (NARGS, LIST) for each prototype used by
+ MIPS built-in functions, where:
+
+ NARGS is the number of arguments.
+ LIST contains the return-type code followed by the codes for each
+ argument type.
+
+ Argument- and return-type codes are either modes or one of the following:
+
+ VOID for void_type_node
+ INT for integer_type_node
+ POINTER for ptr_type_node
+
+ (we don't use PTR because that's a ANSI-compatibillity macro).
+
+ Please keep this list lexicographically sorted by the LIST argument. */
+DEF_MIPS_FTYPE (1, (DF, DF))
+DEF_MIPS_FTYPE (2, (DF, DF, DF))
+
+DEF_MIPS_FTYPE (2, (DI, DI, DI))
+DEF_MIPS_FTYPE (2, (DI, DI, SI))
+DEF_MIPS_FTYPE (3, (DI, DI, SI, SI))
+DEF_MIPS_FTYPE (3, (DI, DI, USI, USI))
+DEF_MIPS_FTYPE (3, (DI, DI, V2HI, V2HI))
+DEF_MIPS_FTYPE (3, (DI, DI, V4QI, V4QI))
+DEF_MIPS_FTYPE (2, (DI, SI, SI))
+DEF_MIPS_FTYPE (2, (DI, USI, USI))
+
+DEF_MIPS_FTYPE (2, (INT, DF, DF))
+DEF_MIPS_FTYPE (2, (INT, SF, SF))
+DEF_MIPS_FTYPE (2, (INT, V2SF, V2SF))
+DEF_MIPS_FTYPE (4, (INT, V2SF, V2SF, V2SF, V2SF))
+
+DEF_MIPS_FTYPE (2, (SI, DI, SI))
+DEF_MIPS_FTYPE (2, (SI, POINTER, SI))
+DEF_MIPS_FTYPE (2, (DI, POINTER, SI))
+DEF_MIPS_FTYPE (1, (SI, SI))
+DEF_MIPS_FTYPE (2, (SI, SI, SI))
+DEF_MIPS_FTYPE (3, (SI, SI, SI, SI))
+DEF_MIPS_FTYPE (1, (SI, V2HI))
+DEF_MIPS_FTYPE (2, (SI, V2HI, V2HI))
+DEF_MIPS_FTYPE (1, (SI, V4QI))
+DEF_MIPS_FTYPE (2, (SI, V4QI, V4QI))
+DEF_MIPS_FTYPE (1, (SI, VOID))
+
+DEF_MIPS_FTYPE (1, (SF, SF))
+DEF_MIPS_FTYPE (2, (SF, SF, SF))
+DEF_MIPS_FTYPE (1, (SF, V2SF))
+
+DEF_MIPS_FTYPE (2, (UDI, UDI, UDI))
+DEF_MIPS_FTYPE (2, (UDI, UV2SI, UV2SI))
+
+DEF_MIPS_FTYPE (1, (USI, VOID))
+
+DEF_MIPS_FTYPE (2, (UV2SI, UV2SI, UQI))
+DEF_MIPS_FTYPE (2, (UV2SI, UV2SI, UV2SI))
+
+DEF_MIPS_FTYPE (2, (UV4HI, UV4HI, UQI))
+DEF_MIPS_FTYPE (2, (UV4HI, UV4HI, USI))
+DEF_MIPS_FTYPE (3, (UV4HI, UV4HI, UV4HI, UQI))
+DEF_MIPS_FTYPE (3, (UV4HI, UV4HI, UV4HI, USI))
+DEF_MIPS_FTYPE (2, (UV4HI, UV4HI, UV4HI))
+DEF_MIPS_FTYPE (1, (UV4HI, UV8QI))
+DEF_MIPS_FTYPE (2, (UV4HI, UV8QI, UV8QI))
+
+DEF_MIPS_FTYPE (2, (UV8QI, UV4HI, UV4HI))
+DEF_MIPS_FTYPE (1, (UV8QI, UV8QI))
+DEF_MIPS_FTYPE (2, (UV8QI, UV8QI, UV8QI))
+
+DEF_MIPS_FTYPE (1, (V2HI, SI))
+DEF_MIPS_FTYPE (2, (V2HI, SI, SI))
+DEF_MIPS_FTYPE (3, (V2HI, SI, SI, SI))
+DEF_MIPS_FTYPE (1, (V2HI, V2HI))
+DEF_MIPS_FTYPE (2, (V2HI, V2HI, SI))
+DEF_MIPS_FTYPE (2, (V2HI, V2HI, V2HI))
+DEF_MIPS_FTYPE (1, (V2HI, V4QI))
+DEF_MIPS_FTYPE (2, (V2HI, V4QI, V2HI))
+
+DEF_MIPS_FTYPE (2, (V2SF, SF, SF))
+DEF_MIPS_FTYPE (1, (V2SF, V2SF))
+DEF_MIPS_FTYPE (2, (V2SF, V2SF, V2SF))
+DEF_MIPS_FTYPE (3, (V2SF, V2SF, V2SF, INT))
+DEF_MIPS_FTYPE (4, (V2SF, V2SF, V2SF, V2SF, V2SF))
+
+DEF_MIPS_FTYPE (2, (V2SI, V2SI, UQI))
+DEF_MIPS_FTYPE (2, (V2SI, V2SI, V2SI))
+DEF_MIPS_FTYPE (2, (V2SI, V4HI, V4HI))
+
+DEF_MIPS_FTYPE (2, (V4HI, V2SI, V2SI))
+DEF_MIPS_FTYPE (2, (V4HI, V4HI, UQI))
+DEF_MIPS_FTYPE (2, (V4HI, V4HI, USI))
+DEF_MIPS_FTYPE (2, (V4HI, V4HI, V4HI))
+DEF_MIPS_FTYPE (3, (V4HI, V4HI, V4HI, UQI))
+DEF_MIPS_FTYPE (3, (V4HI, V4HI, V4HI, USI))
+
+DEF_MIPS_FTYPE (1, (V4QI, SI))
+DEF_MIPS_FTYPE (2, (V4QI, V2HI, V2HI))
+DEF_MIPS_FTYPE (1, (V4QI, V4QI))
+DEF_MIPS_FTYPE (2, (V4QI, V4QI, SI))
+DEF_MIPS_FTYPE (2, (V4QI, V4QI, V4QI))
+
+DEF_MIPS_FTYPE (2, (V8QI, V4HI, V4HI))
+DEF_MIPS_FTYPE (1, (V8QI, V8QI))
+DEF_MIPS_FTYPE (2, (V8QI, V8QI, V8QI))
+
+DEF_MIPS_FTYPE (2, (VOID, SI, CVPOINTER))
+DEF_MIPS_FTYPE (2, (VOID, SI, SI))
+DEF_MIPS_FTYPE (1, (VOID, USI))
+DEF_MIPS_FTYPE (2, (VOID, V2HI, V2HI))
+DEF_MIPS_FTYPE (2, (VOID, V4QI, V4QI))
diff --git a/gcc-4.9/gcc/config/mips/mips-modes.def b/gcc-4.9/gcc/config/mips/mips-modes.def
new file mode 100644
index 000000000..fa1d1e7d6
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips-modes.def
@@ -0,0 +1,48 @@
+/* MIPS extra machine modes.
+ Copyright (C) 2003-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+FLOAT_MODE (TF, 16, ieee_quad_format);
+
+/* Vector modes. */
+VECTOR_MODES (INT, 4); /* V4QI V2HI */
+VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */
+VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */
+
+/* Double-sized vector modes for vec_concat. */
+VECTOR_MODE (INT, QI, 16); /* V16QI */
+VECTOR_MODE (INT, HI, 8); /* V8HI */
+VECTOR_MODE (INT, SI, 4); /* V4SI */
+VECTOR_MODE (FLOAT, SF, 4); /* V4SF */
+
+VECTOR_MODES (FRACT, 4); /* V4QQ V2HQ */
+VECTOR_MODES (UFRACT, 4); /* V4UQQ V2UHQ */
+VECTOR_MODES (ACCUM, 4); /* V2HA */
+VECTOR_MODES (UACCUM, 4); /* V2UHA */
+
+/* Paired single comparison instructions use 2 or 4 CC. */
+CC_MODE (CCV2);
+ADJUST_BYTESIZE (CCV2, 8);
+ADJUST_ALIGNMENT (CCV2, 8);
+
+CC_MODE (CCV4);
+ADJUST_BYTESIZE (CCV4, 16);
+ADJUST_ALIGNMENT (CCV4, 16);
+
+/* For MIPS DSP control registers. */
+CC_MODE (CCDSP);
diff --git a/gcc-4.9/gcc/config/mips/mips-opts.h b/gcc-4.9/gcc/config/mips/mips-opts.h
new file mode 100644
index 000000000..be288d64c
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips-opts.h
@@ -0,0 +1,50 @@
+/* Definitions for option handling for MIPS.
+ Copyright (C) 1989-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef MIPS_OPTS_H
+#define MIPS_OPTS_H
+
+/* Enumerates the setting of the -mcode-readable option. */
+enum mips_code_readable_setting {
+ CODE_READABLE_NO,
+ CODE_READABLE_PCREL,
+ CODE_READABLE_YES
+};
+
+/* Enumerates the setting of the -mabs and -mnan options. */
+enum mips_ieee_754_setting {
+ MIPS_IEEE_754_DEFAULT,
+ MIPS_IEEE_754_LEGACY,
+ MIPS_IEEE_754_2008
+};
+
+/* Enumerates the setting of the -mr10k-cache-barrier option. */
+enum mips_r10k_cache_barrier_setting {
+ R10K_CACHE_BARRIER_NONE,
+ R10K_CACHE_BARRIER_STORE,
+ R10K_CACHE_BARRIER_LOAD_STORE
+};
+
+/* No enumeration is defined to index the -march= values (entries in
+ mips_cpu_info_table), with the type int being used instead, but we
+ need to distinguish the special "from-abi" and "native" values. */
+#define MIPS_ARCH_OPTION_FROM_ABI -1
+#define MIPS_ARCH_OPTION_NATIVE -2
+
+#endif
diff --git a/gcc-4.9/gcc/config/mips/mips-protos.h b/gcc-4.9/gcc/config/mips/mips-protos.h
new file mode 100644
index 000000000..3d59b7b51
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips-protos.h
@@ -0,0 +1,362 @@
+/* Prototypes of target machine for GNU compiler. MIPS version.
+ Copyright (C) 1989-2014 Free Software Foundation, Inc.
+ Contributed by A. Lichnewsky (lich@inria.inria.fr).
+ Changed by Michael Meissner (meissner@osf.org).
+ 64-bit r4000 support by Ian Lance Taylor (ian@cygnus.com) and
+ Brendan Eich (brendan@microunity.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_MIPS_PROTOS_H
+#define GCC_MIPS_PROTOS_H
+
+/* Describes how a symbol is used.
+
+ SYMBOL_CONTEXT_CALL
+ The symbol is used as the target of a call instruction.
+
+ SYMBOL_CONTEXT_LEA
+ The symbol is used in a load-address operation.
+
+ SYMBOL_CONTEXT_MEM
+ The symbol is used as the address in a MEM. */
+enum mips_symbol_context {
+ SYMBOL_CONTEXT_CALL,
+ SYMBOL_CONTEXT_LEA,
+ SYMBOL_CONTEXT_MEM
+};
+
+/* Classifies a SYMBOL_REF, LABEL_REF or UNSPEC address.
+
+ SYMBOL_ABSOLUTE
+ The symbol's value will be calculated using absolute relocations,
+ such as %hi and %lo.
+
+ SYMBOL_GP_RELATIVE
+ The symbol's value will be calculated by adding a 16-bit offset
+ from $gp.
+
+ SYMBOL_PC_RELATIVE
+ The symbol's value will be calculated using a MIPS16 PC-relative
+ calculation.
+
+ SYMBOL_GOT_PAGE_OFST
+ The symbol's value will be calculated by loading an address
+ from the GOT and then applying a 16-bit offset.
+
+ SYMBOL_GOT_DISP
+ The symbol's value will be loaded directly from the GOT.
+
+ SYMBOL_GOTOFF_PAGE
+ An UNSPEC wrapper around a SYMBOL_GOT_PAGE_OFST. It represents the
+ offset from _gp of the GOT entry.
+
+ SYMBOL_GOTOFF_DISP
+ An UNSPEC wrapper around a SYMBOL_GOT_DISP. It represents the
+ offset from _gp of the symbol's GOT entry.
+
+ SYMBOL_GOTOFF_CALL
+ Like SYMBOL_GOTOFF_DISP, but used when calling a global function.
+ The GOT entry is allowed to point to a stub rather than to the
+ function itself.
+
+ SYMBOL_GOTOFF_LOADGP
+ An UNSPEC wrapper around a function's address. It represents the
+ offset of _gp from the start of the function.
+
+ SYMBOL_TLS
+ A thread-local symbol.
+
+ SYMBOL_TLSGD
+ SYMBOL_TLSLDM
+ SYMBOL_DTPREL
+ SYMBOL_GOTTPREL
+ SYMBOL_TPREL
+ UNSPEC wrappers around SYMBOL_TLS, corresponding to the
+ thread-local storage relocation operators.
+
+ SYMBOL_64_HIGH
+ For a 64-bit symbolic address X, this is the value of
+ (%highest(X) << 16) + %higher(X).
+
+ SYMBOL_64_MID
+ For a 64-bit symbolic address X, this is the value of
+ (%higher(X) << 16) + %hi(X).
+
+ SYMBOL_64_LOW
+ For a 64-bit symbolic address X, this is the value of
+ (%hi(X) << 16) + %lo(X).
+
+ SYMBOL_HALF
+ An UNSPEC wrapper around any kind of address. It represents the
+ low 16 bits of that address. */
+enum mips_symbol_type {
+ SYMBOL_ABSOLUTE,
+ SYMBOL_GP_RELATIVE,
+ SYMBOL_PC_RELATIVE,
+ SYMBOL_GOT_PAGE_OFST,
+ SYMBOL_GOT_DISP,
+ SYMBOL_GOTOFF_PAGE,
+ SYMBOL_GOTOFF_DISP,
+ SYMBOL_GOTOFF_CALL,
+ SYMBOL_GOTOFF_LOADGP,
+ SYMBOL_TLS,
+ SYMBOL_TLSGD,
+ SYMBOL_TLSLDM,
+ SYMBOL_DTPREL,
+ SYMBOL_GOTTPREL,
+ SYMBOL_TPREL,
+ SYMBOL_64_HIGH,
+ SYMBOL_64_MID,
+ SYMBOL_64_LOW,
+ SYMBOL_HALF
+};
+#define NUM_SYMBOL_TYPES (SYMBOL_HALF + 1)
+
+/* Identifiers a style of $gp initialization sequence.
+
+ LOADGP_NONE
+ No initialization sequence is needed.
+
+ LOADGP_OLDABI
+ The o32 and o64 PIC sequence (the kind traditionally generated
+ by .cpload).
+
+ LOADGP_NEWABI
+ The n32 and n64 PIC sequence (the kind traditionally generated
+ by .cpsetup).
+
+ LOADGP_ABSOLUTE
+ The GNU absolute sequence, as generated by loadgp_absolute.
+
+ LOADGP_RTP
+ The VxWorks RTP PIC sequence, as generated by loadgp_rtp. */
+enum mips_loadgp_style {
+ LOADGP_NONE,
+ LOADGP_OLDABI,
+ LOADGP_NEWABI,
+ LOADGP_ABSOLUTE,
+ LOADGP_RTP
+};
+
+struct mips16e_save_restore_info;
+
+/* Classifies a type of call.
+
+ MIPS_CALL_NORMAL
+ A normal call or call_value pattern.
+
+ MIPS_CALL_SIBCALL
+ A sibcall or sibcall_value pattern.
+
+ MIPS_CALL_EPILOGUE
+ A call inserted in the epilogue. */
+enum mips_call_type {
+ MIPS_CALL_NORMAL,
+ MIPS_CALL_SIBCALL,
+ MIPS_CALL_EPILOGUE
+};
+
+/* Controls the conditions under which certain instructions are split.
+
+ SPLIT_IF_NECESSARY
+ Only perform splits that are necessary for correctness
+ (because no unsplit version exists).
+
+ SPLIT_FOR_SPEED
+ Perform splits that are necessary for correctness or
+ beneficial for code speed.
+
+ SPLIT_FOR_SIZE
+ Perform splits that are necessary for correctness or
+ beneficial for code size. */
+enum mips_split_type {
+ SPLIT_IF_NECESSARY,
+ SPLIT_FOR_SPEED,
+ SPLIT_FOR_SIZE
+};
+
+extern bool mips_symbolic_constant_p (rtx, enum mips_symbol_context,
+ enum mips_symbol_type *);
+extern int mips_regno_mode_ok_for_base_p (int, enum machine_mode, bool);
+extern bool mips_stack_address_p (rtx, enum machine_mode);
+extern int mips_address_insns (rtx, enum machine_mode, bool);
+extern int mips_const_insns (rtx);
+extern int mips_split_const_insns (rtx);
+extern int mips_load_store_insns (rtx, rtx);
+extern int mips_idiv_insns (void);
+extern rtx mips_emit_move (rtx, rtx);
+#ifdef RTX_CODE
+extern void mips_emit_binary (enum rtx_code, rtx, rtx, rtx);
+#endif
+extern rtx mips_pic_base_register (rtx);
+extern rtx mips_got_load (rtx, rtx, enum mips_symbol_type);
+extern bool mips_split_symbol (rtx, rtx, enum machine_mode, rtx *);
+extern rtx mips_unspec_address (rtx, enum mips_symbol_type);
+extern rtx mips_strip_unspec_address (rtx);
+extern void mips_move_integer (rtx, rtx, unsigned HOST_WIDE_INT);
+extern bool mips_legitimize_move (enum machine_mode, rtx, rtx);
+
+extern rtx mips_subword (rtx, bool);
+extern bool mips_split_move_p (rtx, rtx, enum mips_split_type);
+extern void mips_split_move (rtx, rtx, enum mips_split_type);
+extern bool mips_split_move_insn_p (rtx, rtx, rtx);
+extern void mips_split_move_insn (rtx, rtx, rtx);
+extern const char *mips_output_move (rtx, rtx);
+extern bool mips_cfun_has_cprestore_slot_p (void);
+extern bool mips_cprestore_address_p (rtx, bool);
+extern void mips_save_gp_to_cprestore_slot (rtx, rtx, rtx, rtx);
+extern void mips_restore_gp_from_cprestore_slot (rtx);
+#ifdef RTX_CODE
+extern void mips_expand_scc (rtx *);
+extern void mips_expand_conditional_branch (rtx *);
+extern void mips_expand_vcondv2sf (rtx, rtx, rtx, enum rtx_code, rtx, rtx);
+extern void mips_expand_conditional_move (rtx *);
+extern void mips_expand_conditional_trap (rtx);
+#endif
+extern bool mips_use_pic_fn_addr_reg_p (const_rtx);
+extern rtx mips_expand_call (enum mips_call_type, rtx, rtx, rtx, rtx, bool);
+extern void mips_split_call (rtx, rtx);
+extern bool mips_get_pic_call_symbol (rtx *, int);
+extern void mips_expand_fcc_reload (rtx, rtx, rtx);
+extern void mips_set_return_address (rtx, rtx);
+extern bool mips_move_by_pieces_p (unsigned HOST_WIDE_INT, unsigned int);
+extern bool mips_store_by_pieces_p (unsigned HOST_WIDE_INT, unsigned int);
+extern bool mips_expand_block_move (rtx, rtx, rtx);
+extern void mips_expand_synci_loop (rtx, rtx);
+
+extern void mips_init_cumulative_args (CUMULATIVE_ARGS *, tree);
+extern bool mips_pad_arg_upward (enum machine_mode, const_tree);
+extern bool mips_pad_reg_upward (enum machine_mode, tree);
+
+extern bool mips_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT,
+ HOST_WIDE_INT, bool);
+extern bool mips_expand_ins_as_unaligned_store (rtx, rtx, HOST_WIDE_INT,
+ HOST_WIDE_INT);
+extern bool mips_mem_fits_mode_p (enum machine_mode mode, rtx x);
+extern void mips_order_regs_for_local_alloc (void);
+extern HOST_WIDE_INT mips_debugger_offset (rtx, HOST_WIDE_INT);
+
+extern void mips_push_asm_switch (struct mips_asm_switch *);
+extern void mips_pop_asm_switch (struct mips_asm_switch *);
+extern void mips_output_external (FILE *, tree, const char *);
+extern void mips_output_ascii (FILE *, const char *, size_t);
+extern const char *mips_output_tls_reloc_directive (rtx *);
+extern void mips_output_aligned_decl_common (FILE *, tree, const char *,
+ unsigned HOST_WIDE_INT,
+ unsigned int);
+extern void mips_declare_common_object (FILE *, const char *,
+ const char *, unsigned HOST_WIDE_INT,
+ unsigned int, bool);
+extern void mips_declare_object (FILE *, const char *, const char *,
+ const char *, ...) ATTRIBUTE_PRINTF_4;
+extern void mips_declare_object_name (FILE *, const char *, tree);
+extern void mips_finish_declare_object (FILE *, tree, int, int);
+
+extern bool mips_small_data_pattern_p (rtx);
+extern rtx mips_rewrite_small_data (rtx);
+extern HOST_WIDE_INT mips_initial_elimination_offset (int, int);
+extern rtx mips_return_addr (int, rtx);
+extern bool mips_must_initialize_gp_p (void);
+extern enum mips_loadgp_style mips_current_loadgp_style (void);
+extern void mips_emit_save_slot_move (rtx, rtx, rtx);
+extern void mips_expand_prologue (void);
+extern void mips_expand_before_return (void);
+extern void mips_expand_epilogue (bool);
+extern bool mips_can_use_return_insn (void);
+
+extern bool mips_cannot_change_mode_class (enum machine_mode,
+ enum machine_mode, enum reg_class);
+extern bool mips_dangerous_for_la25_p (rtx);
+extern bool mips_modes_tieable_p (enum machine_mode, enum machine_mode);
+extern enum reg_class mips_secondary_reload_class (enum reg_class,
+ enum machine_mode,
+ rtx, bool);
+extern int mips_class_max_nregs (enum reg_class, enum machine_mode);
+
+extern int mips_adjust_insn_length (rtx, int);
+extern void mips_output_load_label (rtx);
+extern const char *mips_output_conditional_branch (rtx, rtx *, const char *,
+ const char *);
+extern const char *mips_output_order_conditional_branch (rtx, rtx *, bool);
+extern const char *mips_output_sync (void);
+extern const char *mips_output_sync_loop (rtx, rtx *);
+extern unsigned int mips_sync_loop_insns (rtx, rtx *);
+extern const char *mips_output_division (const char *, rtx *);
+extern const char *mips_output_probe_stack_range (rtx, rtx);
+extern unsigned int mips_hard_regno_nregs (int, enum machine_mode);
+extern bool mips_linked_madd_p (rtx, rtx);
+extern bool mips_store_data_bypass_p (rtx, rtx);
+extern int mips_dspalu_bypass_p (rtx, rtx);
+extern rtx mips_prefetch_cookie (rtx, rtx);
+
+extern const char *current_section_name (void);
+extern unsigned int current_section_flags (void);
+extern bool mips_use_ins_ext_p (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+
+extern const char *mips16e_output_save_restore (rtx, HOST_WIDE_INT);
+extern bool mips16e_save_restore_pattern_p (rtx, HOST_WIDE_INT,
+ struct mips16e_save_restore_info *);
+
+extern bool mask_low_and_shift_p (enum machine_mode, rtx, rtx, int);
+extern int mask_low_and_shift_len (enum machine_mode, rtx, rtx);
+extern bool and_operands_ok (enum machine_mode, rtx, rtx);
+
+union mips_gen_fn_ptrs
+{
+ rtx (*fn_6) (rtx, rtx, rtx, rtx, rtx, rtx);
+ rtx (*fn_5) (rtx, rtx, rtx, rtx, rtx);
+ rtx (*fn_4) (rtx, rtx, rtx, rtx);
+};
+
+extern void mips_expand_atomic_qihi (union mips_gen_fn_ptrs,
+ rtx, rtx, rtx, rtx);
+
+extern void mips_expand_vector_init (rtx, rtx);
+extern bool mips_expand_vec_perm_const (rtx op[4]);
+extern void mips_expand_vec_unpack (rtx op[2], bool, bool);
+extern void mips_expand_vec_reduc (rtx, rtx, rtx (*)(rtx, rtx, rtx));
+extern void mips_expand_vec_minmax (rtx, rtx, rtx,
+ rtx (*) (rtx, rtx, rtx), bool);
+
+extern bool mips_signed_immediate_p (unsigned HOST_WIDE_INT, int, int);
+extern bool mips_unsigned_immediate_p (unsigned HOST_WIDE_INT, int, int);
+extern const char *umips_output_save_restore (bool, rtx);
+extern bool umips_save_restore_pattern_p (bool, rtx);
+extern bool umips_load_store_pair_p (bool, rtx *);
+extern void umips_output_load_store_pair (bool, rtx *);
+extern bool umips_movep_target_p (rtx, rtx);
+extern bool umips_12bit_offset_address_p (rtx, enum machine_mode);
+extern bool lwsp_swsp_address_p (rtx, enum machine_mode);
+extern bool m16_based_address_p (rtx, enum machine_mode,
+ int (*)(rtx_def*, machine_mode));
+extern rtx mips_expand_thread_pointer (rtx);
+extern void mips16_expand_get_fcsr (rtx);
+extern void mips16_expand_set_fcsr (rtx);
+
+extern bool mips_eh_uses (unsigned int);
+extern bool mips_epilogue_uses (unsigned int);
+extern void mips_final_prescan_insn (rtx, rtx *, int);
+extern int mips_trampoline_code_size (void);
+extern void mips_function_profiler (FILE *);
+
+typedef rtx (*mulsidi3_gen_fn) (rtx, rtx, rtx);
+#ifdef RTX_CODE
+extern mulsidi3_gen_fn mips_mulsidi3_gen_fn (enum rtx_code);
+#endif
+
+#endif /* ! GCC_MIPS_PROTOS_H */
diff --git a/gcc-4.9/gcc/config/mips/mips-ps-3d.md b/gcc-4.9/gcc/config/mips/mips-ps-3d.md
new file mode 100644
index 000000000..549080b88
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips-ps-3d.md
@@ -0,0 +1,764 @@
+;; MIPS Paired-Single Floating and MIPS-3D Instructions.
+;; Copyright (C) 2004-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_c_enum "unspec" [
+ UNSPEC_MOVE_TF_PS
+ UNSPEC_C
+
+ ;; MIPS64/MIPS32R2 alnv.ps
+ UNSPEC_ALNV_PS
+
+ ;; MIPS-3D instructions
+ UNSPEC_CABS
+
+ UNSPEC_ADDR_PS
+ UNSPEC_CVT_PW_PS
+ UNSPEC_CVT_PS_PW
+ UNSPEC_MULR_PS
+ UNSPEC_ABS_PS
+
+ UNSPEC_RSQRT1
+ UNSPEC_RSQRT2
+ UNSPEC_RECIP1
+ UNSPEC_RECIP2
+ UNSPEC_SINGLE_CC
+ UNSPEC_SCC
+])
+
+(define_insn "*movcc_v2sf_<mode>"
+ [(set (match_operand:V2SF 0 "register_operand" "=f,f")
+ (if_then_else:V2SF
+ (match_operator:GPR 4 "equality_operator"
+ [(match_operand:GPR 1 "register_operand" "d,d")
+ (const_int 0)])
+ (match_operand:V2SF 2 "register_operand" "f,0")
+ (match_operand:V2SF 3 "register_operand" "0,f")))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+ "@
+ mov%T4.ps\t%0,%2,%1
+ mov%t4.ps\t%0,%3,%1"
+ [(set_attr "type" "condmove")
+ (set_attr "mode" "SF")])
+
+(define_insn "mips_cond_move_tf_ps"
+ [(set (match_operand:V2SF 0 "register_operand" "=f,f")
+ (unspec:V2SF [(match_operand:V2SF 1 "register_operand" "f,0")
+ (match_operand:V2SF 2 "register_operand" "0,f")
+ (match_operand:CCV2 3 "register_operand" "z,z")]
+ UNSPEC_MOVE_TF_PS))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+ "@
+ movt.ps\t%0,%1,%3
+ movf.ps\t%0,%2,%3"
+ [(set_attr "type" "condmove")
+ (set_attr "mode" "SF")])
+
+(define_expand "movv2sfcc"
+ [(set (match_dup 4) (match_operand 1 "comparison_operator"))
+ (set (match_operand:V2SF 0 "register_operand")
+ (if_then_else:V2SF (match_dup 5)
+ (match_operand:V2SF 2 "register_operand")
+ (match_operand:V2SF 3 "register_operand")))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ /* We can only support MOVN.PS and MOVZ.PS.
+ NOTE: MOVT.PS and MOVF.PS have different semantics from MOVN.PS and
+ MOVZ.PS. MOVT.PS and MOVF.PS depend on two CC values and move
+ each item independently. */
+
+ if (GET_MODE_CLASS (GET_MODE (XEXP (operands[1], 0))) != MODE_INT)
+ FAIL;
+
+ mips_expand_conditional_move (operands);
+ DONE;
+})
+
+(define_insn "vec_perm_const_ps"
+ [(set (match_operand:V2SF 0 "register_operand" "=f")
+ (vec_select:V2SF
+ (vec_concat:V4SF
+ (match_operand:V2SF 1 "register_operand" "f")
+ (match_operand:V2SF 2 "register_operand" "f"))
+ (parallel [(match_operand:SI 3 "const_0_or_1_operand" "")
+ (match_operand:SI 4 "const_2_or_3_operand" "")])))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ /* Let <op>L be the lower part of operand <op> and <op>U be the upper part.
+ The P[UL][UL].PS instruction always specifies the upper part of the
+ result first, so the instruction is:
+
+ P<aUL><bUL>.PS %0,<aop>,<bop>
+
+ where 0U == <aop><aUL> and 0L == <bop><bUL>.
+
+ GCC's vector indices are specified in memory order, which means
+ that vector element 0 is the lower part (L) on little-endian targets
+ and the upper part (U) on big-endian targets. vec_concat likewise
+ concatenates in memory order, which means that operand 3 (being
+ 0 or 1) selects part of operand 1 and operand 4 (being 2 or 3)
+ selects part of operand 2.
+
+ Let:
+
+ I3 = INTVAL (operands[3])
+ I4 = INTVAL (operands[4]) - 2
+
+ Taking the two endiannesses in turn:
+
+ Little-endian:
+
+ The semantics of the RTL pattern are:
+
+ { 0L, 0U } = { X[I3], X[I4 + 2] }, where X = { 1L, 1U, 2L, 2U }
+
+ so: 0L = { 1L, 1U }[I3] (= <bop><bUL>)
+ 0U = { 2L, 2U }[I4] (= <aop><aUL>)
+
+ <aop> = 2, <aUL> = I4 ? U : L
+ <bop> = 1, <bUL> = I3 ? U : L
+
+ [LL] !I4 && !I3 [UL] I4 && !I3
+ [LU] !I4 && I3 [UU] I4 && I3
+
+ Big-endian:
+
+ The semantics of the RTL pattern are:
+
+ { 0U, 0L } = { X[I3], X[I4 + 2] }, where X = { 1U, 1L, 2U, 2L }
+
+ so: 0U = { 1U, 1L }[I3] (= <aop><aUL>)
+ 0L = { 2U, 2L }[I4] (= <bop><bUL>)
+
+ <aop> = 1, <aUL> = I3 ? L : U
+ <bop> = 2, <bUL> = I4 ? L : U
+
+ [UU] !I3 && !I4 [UL] !I3 && I4
+ [LU] I3 && !I4 [LL] I3 && I4. */
+
+ static const char * const mnemonics[2][4] = {
+ /* LE */ { "pll.ps\t%0,%2,%1", "pul.ps\t%0,%2,%1",
+ "plu.ps\t%0,%2,%1", "puu.ps\t%0,%2,%1" },
+ /* BE */ { "puu.ps\t%0,%1,%2", "pul.ps\t%0,%1,%2",
+ "plu.ps\t%0,%1,%2", "pll.ps\t%0,%1,%2" },
+ };
+
+ unsigned mask = INTVAL (operands[3]) * 2 + (INTVAL (operands[4]) - 2);
+ return mnemonics[BYTES_BIG_ENDIAN][mask];
+}
+ [(set_attr "type" "fmove")
+ (set_attr "mode" "SF")])
+
+(define_expand "vec_perm_constv2sf"
+ [(match_operand:V2SF 0 "register_operand" "")
+ (match_operand:V2SF 1 "register_operand" "")
+ (match_operand:V2SF 2 "register_operand" "")
+ (match_operand:V2SI 3 "" "")]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ if (mips_expand_vec_perm_const (operands))
+ DONE;
+ else
+ FAIL;
+})
+
+;; Expanders for builtins. The instruction:
+;;
+;; P[UL][UL].PS <result>, <a>, <b>
+;;
+;; says that the upper part of <result> is taken from half of <a> and
+;; the lower part of <result> is taken from half of <b>. This means
+;; that the P[UL][UL].PS operand order matches memory order on big-endian
+;; targets; <a> is element 0 of the V2SF result while <b> is element 1.
+;; However, the P[UL][UL].PS operand order is the reverse of memory order
+;; on little-endian targets; <a> is element 1 of the V2SF result while
+;; <b> is element 0. The arguments to vec_perm_const_ps are always in
+;; memory order.
+;;
+;; Similarly, "U" corresponds to element 0 on big-endian targets but
+;; to element 1 on little-endian targets.
+
+(define_expand "mips_puu_ps"
+ [(match_operand:V2SF 0 "register_operand" "")
+ (match_operand:V2SF 1 "register_operand" "")
+ (match_operand:V2SF 2 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_vec_perm_const_ps (operands[0], operands[1], operands[2],
+ const0_rtx, const2_rtx));
+ else
+ emit_insn (gen_vec_perm_const_ps (operands[0], operands[2], operands[1],
+ const1_rtx, GEN_INT (3)));
+ DONE;
+})
+
+(define_expand "mips_pul_ps"
+ [(match_operand:V2SF 0 "register_operand" "")
+ (match_operand:V2SF 1 "register_operand" "")
+ (match_operand:V2SF 2 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_vec_perm_const_ps (operands[0], operands[1], operands[2],
+ const0_rtx, GEN_INT (3)));
+ else
+ emit_insn (gen_vec_perm_const_ps (operands[0], operands[2], operands[1],
+ const0_rtx, GEN_INT (3)));
+ DONE;
+})
+
+(define_expand "mips_plu_ps"
+ [(match_operand:V2SF 0 "register_operand" "")
+ (match_operand:V2SF 1 "register_operand" "")
+ (match_operand:V2SF 2 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_vec_perm_const_ps (operands[0], operands[1], operands[2],
+ const1_rtx, const2_rtx));
+ else
+ emit_insn (gen_vec_perm_const_ps (operands[0], operands[2], operands[1],
+ const1_rtx, const2_rtx));
+ DONE;
+})
+
+(define_expand "mips_pll_ps"
+ [(match_operand:V2SF 0 "register_operand" "")
+ (match_operand:V2SF 1 "register_operand" "")
+ (match_operand:V2SF 2 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_vec_perm_const_ps (operands[0], operands[1], operands[2],
+ const1_rtx, GEN_INT (3)));
+ else
+ emit_insn (gen_vec_perm_const_ps (operands[0], operands[2], operands[1],
+ const0_rtx, const2_rtx));
+ DONE;
+})
+
+; vec_init
+(define_expand "vec_initv2sf"
+ [(match_operand:V2SF 0 "register_operand")
+ (match_operand:V2SF 1 "")]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ mips_expand_vector_init (operands[0], operands[1]);
+ DONE;
+})
+
+(define_insn "vec_concatv2sf"
+ [(set (match_operand:V2SF 0 "register_operand" "=f")
+ (vec_concat:V2SF
+ (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ if (BYTES_BIG_ENDIAN)
+ return "cvt.ps.s\t%0,%1,%2";
+ else
+ return "cvt.ps.s\t%0,%2,%1";
+}
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")])
+
+;; ??? This is only generated if we perform a vector operation that has to be
+;; emulated. There is no other way to get a vector mode bitfield extract
+;; currently.
+
+(define_insn "vec_extractv2sf"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (vec_select:SF (match_operand:V2SF 1 "register_operand" "f")
+ (parallel
+ [(match_operand 2 "const_0_or_1_operand" "")])))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ if (INTVAL (operands[2]) == !BYTES_BIG_ENDIAN)
+ return "cvt.s.pu\t%0,%1";
+ else
+ return "cvt.s.pl\t%0,%1";
+}
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")])
+
+;; ??? This is only generated if we disable the vec_init pattern. There is
+;; no other way to get a vector mode bitfield store currently.
+
+(define_expand "vec_setv2sf"
+ [(set (match_operand:V2SF 0 "register_operand" "")
+ (vec_select:V2SF
+ (vec_concat:V4SF
+ (match_operand:SF 1 "register_operand" "")
+ (match_dup 0))
+ (parallel [(match_operand 2 "const_0_or_1_operand" "")
+ (match_dup 3)])))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ /* We don't have an insert instruction, so we duplicate the float, and
+ then use a PUL instruction. */
+ rtx temp = gen_reg_rtx (V2SFmode);
+ emit_insn (gen_vec_concatv2sf (temp, operands[1], operands[1]));
+ operands[1] = temp;
+ operands[3] = GEN_INT (1 - INTVAL (operands[2]) + 2);
+})
+
+; cvt.ps.s - Floating Point Convert Pair to Paired Single
+(define_expand "mips_cvt_ps_s"
+ [(match_operand:V2SF 0 "register_operand")
+ (match_operand:SF 1 "register_operand")
+ (match_operand:SF 2 "register_operand")]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_vec_concatv2sf (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_vec_concatv2sf (operands[0], operands[2], operands[1]));
+ DONE;
+})
+
+; cvt.s.pl - Floating Point Convert Pair Lower to Single Floating Point
+(define_expand "mips_cvt_s_pl"
+ [(set (match_operand:SF 0 "register_operand")
+ (vec_select:SF (match_operand:V2SF 1 "register_operand")
+ (parallel [(match_dup 2)])))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+ { operands[2] = GEN_INT (BYTES_BIG_ENDIAN); })
+
+; cvt.s.pu - Floating Point Convert Pair Upper to Single Floating Point
+(define_expand "mips_cvt_s_pu"
+ [(set (match_operand:SF 0 "register_operand")
+ (vec_select:SF (match_operand:V2SF 1 "register_operand")
+ (parallel [(match_dup 2)])))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+ { operands[2] = GEN_INT (!BYTES_BIG_ENDIAN); })
+
+; alnv.ps - Floating Point Align Variable
+(define_insn "mips_alnv_ps"
+ [(set (match_operand:V2SF 0 "register_operand" "=f")
+ (unspec:V2SF [(match_operand:V2SF 1 "register_operand" "f")
+ (match_operand:V2SF 2 "register_operand" "f")
+ (match_operand:SI 3 "register_operand" "d")]
+ UNSPEC_ALNV_PS))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+ "alnv.ps\t%0,%1,%2,%3"
+ [(set_attr "type" "fmove")
+ (set_attr "mode" "SF")])
+
+; addr.ps - Floating Point Reduction Add
+(define_insn "mips_addr_ps"
+ [(set (match_operand:V2SF 0 "register_operand" "=f")
+ (unspec:V2SF [(match_operand:V2SF 1 "register_operand" "f")
+ (match_operand:V2SF 2 "register_operand" "f")]
+ UNSPEC_ADDR_PS))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "addr.ps\t%0,%1,%2"
+ [(set_attr "type" "fadd")
+ (set_attr "mode" "SF")])
+
+(define_insn "reduc_splus_v2sf"
+ [(set (match_operand:V2SF 0 "register_operand" "=f")
+ (unspec:V2SF [(match_operand:V2SF 1 "register_operand" "f")
+ (match_dup 1)]
+ UNSPEC_ADDR_PS))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "")
+
+; cvt.pw.ps - Floating Point Convert Paired Single to Paired Word
+(define_insn "mips_cvt_pw_ps"
+ [(set (match_operand:V2SF 0 "register_operand" "=f")
+ (unspec:V2SF [(match_operand:V2SF 1 "register_operand" "f")]
+ UNSPEC_CVT_PW_PS))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "cvt.pw.ps\t%0,%1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")])
+
+; cvt.ps.pw - Floating Point Convert Paired Word to Paired Single
+(define_insn "mips_cvt_ps_pw"
+ [(set (match_operand:V2SF 0 "register_operand" "=f")
+ (unspec:V2SF [(match_operand:V2SF 1 "register_operand" "f")]
+ UNSPEC_CVT_PS_PW))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "cvt.ps.pw\t%0,%1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")])
+
+; mulr.ps - Floating Point Reduction Multiply
+(define_insn "mips_mulr_ps"
+ [(set (match_operand:V2SF 0 "register_operand" "=f")
+ (unspec:V2SF [(match_operand:V2SF 1 "register_operand" "f")
+ (match_operand:V2SF 2 "register_operand" "f")]
+ UNSPEC_MULR_PS))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "mulr.ps\t%0,%1,%2"
+ [(set_attr "type" "fmul")
+ (set_attr "mode" "SF")])
+
+; abs.ps
+(define_expand "mips_abs_ps"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (unspec:V2SF [(match_operand:V2SF 1 "register_operand")]
+ UNSPEC_ABS_PS))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ /* If we can ignore NaNs, this operation is equivalent to the
+ rtl ABS code. */
+ if (!HONOR_NANS (V2SFmode))
+ {
+ emit_insn (gen_absv2sf2 (operands[0], operands[1]));
+ DONE;
+ }
+})
+
+(define_insn "*mips_abs_ps"
+ [(set (match_operand:V2SF 0 "register_operand" "=f")
+ (unspec:V2SF [(match_operand:V2SF 1 "register_operand" "f")]
+ UNSPEC_ABS_PS))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+ "abs.ps\t%0,%1"
+ [(set_attr "type" "fabs")
+ (set_attr "mode" "SF")])
+
+;----------------------------------------------------------------------------
+; Floating Point Comparisons for Scalars
+;----------------------------------------------------------------------------
+
+(define_insn "mips_cabs_cond_<fmt>"
+ [(set (match_operand:CC 0 "register_operand" "=z")
+ (unspec:CC [(match_operand:SCALARF 1 "register_operand" "f")
+ (match_operand:SCALARF 2 "register_operand" "f")
+ (match_operand 3 "const_int_operand" "")]
+ UNSPEC_CABS))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "cabs.%Y3.<fmt>\t%0,%1,%2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "FPSW")])
+
+
+;----------------------------------------------------------------------------
+; Floating Point Comparisons for Four Singles
+;----------------------------------------------------------------------------
+
+(define_insn_and_split "mips_c_cond_4s"
+ [(set (match_operand:CCV4 0 "register_operand" "=z")
+ (unspec:CCV4 [(match_operand:V2SF 1 "register_operand" "f")
+ (match_operand:V2SF 2 "register_operand" "f")
+ (match_operand:V2SF 3 "register_operand" "f")
+ (match_operand:V2SF 4 "register_operand" "f")
+ (match_operand 5 "const_int_operand" "")]
+ UNSPEC_C))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 6)
+ (unspec:CCV2 [(match_dup 1)
+ (match_dup 2)
+ (match_dup 5)]
+ UNSPEC_C))
+ (set (match_dup 7)
+ (unspec:CCV2 [(match_dup 3)
+ (match_dup 4)
+ (match_dup 5)]
+ UNSPEC_C))]
+{
+ operands[6] = simplify_gen_subreg (CCV2mode, operands[0], CCV4mode, 0);
+ operands[7] = simplify_gen_subreg (CCV2mode, operands[0], CCV4mode, 8);
+}
+ [(set_attr "type" "fcmp")
+ (set_attr "insn_count" "2")
+ (set_attr "mode" "FPSW")])
+
+(define_insn_and_split "mips_cabs_cond_4s"
+ [(set (match_operand:CCV4 0 "register_operand" "=z")
+ (unspec:CCV4 [(match_operand:V2SF 1 "register_operand" "f")
+ (match_operand:V2SF 2 "register_operand" "f")
+ (match_operand:V2SF 3 "register_operand" "f")
+ (match_operand:V2SF 4 "register_operand" "f")
+ (match_operand 5 "const_int_operand" "")]
+ UNSPEC_CABS))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 6)
+ (unspec:CCV2 [(match_dup 1)
+ (match_dup 2)
+ (match_dup 5)]
+ UNSPEC_CABS))
+ (set (match_dup 7)
+ (unspec:CCV2 [(match_dup 3)
+ (match_dup 4)
+ (match_dup 5)]
+ UNSPEC_CABS))]
+{
+ operands[6] = simplify_gen_subreg (CCV2mode, operands[0], CCV4mode, 0);
+ operands[7] = simplify_gen_subreg (CCV2mode, operands[0], CCV4mode, 8);
+}
+ [(set_attr "type" "fcmp")
+ (set_attr "insn_count" "2")
+ (set_attr "mode" "FPSW")])
+
+
+;----------------------------------------------------------------------------
+; Floating Point Comparisons for Paired Singles
+;----------------------------------------------------------------------------
+
+(define_insn "mips_c_cond_ps"
+ [(set (match_operand:CCV2 0 "register_operand" "=z")
+ (unspec:CCV2 [(match_operand:V2SF 1 "register_operand" "f")
+ (match_operand:V2SF 2 "register_operand" "f")
+ (match_operand 3 "const_int_operand" "")]
+ UNSPEC_C))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+ "c.%Y3.ps\t%0,%1,%2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "FPSW")])
+
+(define_insn "mips_cabs_cond_ps"
+ [(set (match_operand:CCV2 0 "register_operand" "=z")
+ (unspec:CCV2 [(match_operand:V2SF 1 "register_operand" "f")
+ (match_operand:V2SF 2 "register_operand" "f")
+ (match_operand 3 "const_int_operand" "")]
+ UNSPEC_CABS))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "cabs.%Y3.ps\t%0,%1,%2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "FPSW")])
+
+;; An expander for generating an scc operation.
+(define_expand "scc_ps"
+ [(set (match_operand:CCV2 0)
+ (unspec:CCV2 [(match_operand 1)] UNSPEC_SCC))])
+
+(define_insn "s<code>_ps"
+ [(set (match_operand:CCV2 0 "register_operand" "=z")
+ (unspec:CCV2
+ [(fcond (match_operand:V2SF 1 "register_operand" "f")
+ (match_operand:V2SF 2 "register_operand" "f"))]
+ UNSPEC_SCC))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+ "c.<fcond>.ps\t%0,%1,%2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "FPSW")])
+
+(define_insn "s<code>_ps"
+ [(set (match_operand:CCV2 0 "register_operand" "=z")
+ (unspec:CCV2
+ [(swapped_fcond (match_operand:V2SF 1 "register_operand" "f")
+ (match_operand:V2SF 2 "register_operand" "f"))]
+ UNSPEC_SCC))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+ "c.<swapped_fcond>.ps\t%0,%2,%1"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "FPSW")])
+
+;----------------------------------------------------------------------------
+; Floating Point Branch Instructions.
+;----------------------------------------------------------------------------
+
+; Branch on Any of Four Floating Point Condition Codes True
+(define_insn "bc1any4t"
+ [(set (pc)
+ (if_then_else (ne (match_operand:CCV4 1 "register_operand" "z")
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "%*bc1any4t\t%1,%0%/"
+ [(set_attr "type" "branch")])
+
+; Branch on Any of Four Floating Point Condition Codes False
+(define_insn "bc1any4f"
+ [(set (pc)
+ (if_then_else (ne (match_operand:CCV4 1 "register_operand" "z")
+ (const_int -1))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "%*bc1any4f\t%1,%0%/"
+ [(set_attr "type" "branch")])
+
+; Branch on Any of Two Floating Point Condition Codes True
+(define_insn "bc1any2t"
+ [(set (pc)
+ (if_then_else (ne (match_operand:CCV2 1 "register_operand" "z")
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "%*bc1any2t\t%1,%0%/"
+ [(set_attr "type" "branch")])
+
+; Branch on Any of Two Floating Point Condition Codes False
+(define_insn "bc1any2f"
+ [(set (pc)
+ (if_then_else (ne (match_operand:CCV2 1 "register_operand" "z")
+ (const_int -1))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "%*bc1any2f\t%1,%0%/"
+ [(set_attr "type" "branch")])
+
+; Used to access one register in a CCV2 pair. Operand 0 is the register
+; pair and operand 1 is the index of the register we want (a CONST_INT).
+(define_expand "single_cc"
+ [(ne (unspec:CC [(match_operand 0) (match_operand 1)] UNSPEC_SINGLE_CC)
+ (const_int 0))])
+
+; This is a normal floating-point branch pattern, but rather than check
+; a single CCmode register, it checks one register in a CCV2 pair.
+; Operand 2 is the register pair and operand 3 is the index of the
+; register we want.
+(define_insn "*branch_upper_lower"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "equality_operator"
+ [(unspec:CC [(match_operand:CCV2 2 "register_operand" "z")
+ (match_operand 3 "const_int_operand")]
+ UNSPEC_SINGLE_CC)
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT"
+{
+ operands[2]
+ = gen_rtx_REG (CCmode, REGNO (operands[2]) + INTVAL (operands[3]));
+ return mips_output_conditional_branch (insn, operands,
+ MIPS_BRANCH ("b%F1", "%2,%0"),
+ MIPS_BRANCH ("b%W1", "%2,%0"));
+}
+ [(set_attr "type" "branch")])
+
+; As above, but with the sense of the condition reversed.
+(define_insn "*branch_upper_lower_inverted"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "equality_operator"
+ [(unspec:CC [(match_operand:CCV2 2 "register_operand" "z")
+ (match_operand 3 "const_int_operand")]
+ UNSPEC_SINGLE_CC)
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_HARD_FLOAT"
+{
+ operands[2]
+ = gen_rtx_REG (CCmode, REGNO (operands[2]) + INTVAL (operands[3]));
+ return mips_output_conditional_branch (insn, operands,
+ MIPS_BRANCH ("b%W1", "%2,%0"),
+ MIPS_BRANCH ("b%F1", "%2,%0"));
+}
+ [(set_attr "type" "branch")])
+
+;----------------------------------------------------------------------------
+; Floating Point Reduced Precision Reciprocal Square Root Instructions.
+;----------------------------------------------------------------------------
+
+(define_insn "mips_rsqrt1_<fmt>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")]
+ UNSPEC_RSQRT1))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "rsqrt1.<fmt>\t%0,%1"
+ [(set_attr "type" "frsqrt1")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "mips_rsqrt2_<fmt>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")]
+ UNSPEC_RSQRT2))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "rsqrt2.<fmt>\t%0,%1,%2"
+ [(set_attr "type" "frsqrt2")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "mips_recip1_<fmt>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")]
+ UNSPEC_RECIP1))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "recip1.<fmt>\t%0,%1"
+ [(set_attr "type" "frdiv1")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "mips_recip2_<fmt>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")]
+ UNSPEC_RECIP2))]
+ "TARGET_HARD_FLOAT && TARGET_MIPS3D"
+ "recip2.<fmt>\t%0,%1,%2"
+ [(set_attr "type" "frdiv2")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_expand "vcondv2sfv2sf"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (if_then_else:V2SF
+ (match_operator 3 ""
+ [(match_operand:V2SF 4 "register_operand")
+ (match_operand:V2SF 5 "register_operand")])
+ (match_operand:V2SF 1 "register_operand")
+ (match_operand:V2SF 2 "register_operand")))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ mips_expand_vcondv2sf (operands[0], operands[1], operands[2],
+ GET_CODE (operands[3]), operands[4], operands[5]);
+ DONE;
+})
+
+(define_expand "sminv2sf3"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (smin:V2SF (match_operand:V2SF 1 "register_operand")
+ (match_operand:V2SF 2 "register_operand")))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ mips_expand_vcondv2sf (operands[0], operands[1], operands[2],
+ LE, operands[1], operands[2]);
+ DONE;
+})
+
+(define_expand "smaxv2sf3"
+ [(set (match_operand:V2SF 0 "register_operand")
+ (smax:V2SF (match_operand:V2SF 1 "register_operand")
+ (match_operand:V2SF 2 "register_operand")))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ mips_expand_vcondv2sf (operands[0], operands[1], operands[2],
+ LE, operands[2], operands[1]);
+ DONE;
+})
+
+(define_expand "reduc_smin_v2sf"
+ [(match_operand:V2SF 0 "register_operand")
+ (match_operand:V2SF 1 "register_operand")]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ mips_expand_vec_reduc (operands[0], operands[1], gen_sminv2sf3);
+ DONE;
+})
+
+(define_expand "reduc_smax_v2sf"
+ [(match_operand:V2SF 0 "register_operand")
+ (match_operand:V2SF 1 "register_operand")]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ mips_expand_vec_reduc (operands[0], operands[1], gen_smaxv2sf3);
+ DONE;
+})
diff --git a/gcc-4.9/gcc/config/mips/mips-tables.opt b/gcc-4.9/gcc/config/mips/mips-tables.opt
new file mode 100644
index 000000000..760b764e3
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips-tables.opt
@@ -0,0 +1,644 @@
+; -*- buffer-read-only: t -*-
+; Generated automatically by genopt.sh from mips-cpus.def.
+
+; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+Enum
+Name(mips_arch_opt_value) Type(int)
+Known MIPS CPUs (for use with the -march= and -mtune= options):
+
+Enum
+Name(mips_mips_opt_value) Type(int)
+Known MIPS ISA levels (for use with the -mips option):
+
+EnumValue
+Enum(mips_arch_opt_value) String(from-abi) Value(MIPS_ARCH_OPTION_FROM_ABI)
+
+EnumValue
+Enum(mips_arch_opt_value) String(native) Value(MIPS_ARCH_OPTION_NATIVE) DriverOnly
+
+EnumValue
+Enum(mips_arch_opt_value) String(mips1) Value(0) Canonical
+
+EnumValue
+Enum(mips_mips_opt_value) String(1) Value(0)
+
+EnumValue
+Enum(mips_arch_opt_value) String(mips2) Value(1) Canonical
+
+EnumValue
+Enum(mips_mips_opt_value) String(2) Value(1)
+
+EnumValue
+Enum(mips_arch_opt_value) String(mips3) Value(2) Canonical
+
+EnumValue
+Enum(mips_mips_opt_value) String(3) Value(2)
+
+EnumValue
+Enum(mips_arch_opt_value) String(mips4) Value(3) Canonical
+
+EnumValue
+Enum(mips_mips_opt_value) String(4) Value(3)
+
+EnumValue
+Enum(mips_arch_opt_value) String(mips32) Value(4) Canonical
+
+EnumValue
+Enum(mips_mips_opt_value) String(32) Value(4)
+
+EnumValue
+Enum(mips_arch_opt_value) String(mips32r2) Value(5) Canonical
+
+EnumValue
+Enum(mips_mips_opt_value) String(32r2) Value(5)
+
+EnumValue
+Enum(mips_arch_opt_value) String(mips64) Value(6) Canonical
+
+EnumValue
+Enum(mips_mips_opt_value) String(64) Value(6)
+
+EnumValue
+Enum(mips_arch_opt_value) String(mips64r2) Value(7) Canonical
+
+EnumValue
+Enum(mips_mips_opt_value) String(64r2) Value(7)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r3000) Value(8) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r3k) Value(8)
+
+EnumValue
+Enum(mips_arch_opt_value) String(3000) Value(8)
+
+EnumValue
+Enum(mips_arch_opt_value) String(3k) Value(8)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r2000) Value(9) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r2k) Value(9)
+
+EnumValue
+Enum(mips_arch_opt_value) String(2000) Value(9)
+
+EnumValue
+Enum(mips_arch_opt_value) String(2k) Value(9)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r3900) Value(10) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(3900) Value(10)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r6000) Value(11) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r6k) Value(11)
+
+EnumValue
+Enum(mips_arch_opt_value) String(6000) Value(11)
+
+EnumValue
+Enum(mips_arch_opt_value) String(6k) Value(11)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4000) Value(12) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4k) Value(12)
+
+EnumValue
+Enum(mips_arch_opt_value) String(4000) Value(12)
+
+EnumValue
+Enum(mips_arch_opt_value) String(4k) Value(12)
+
+EnumValue
+Enum(mips_arch_opt_value) String(vr4100) Value(13) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(4100) Value(13)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4100) Value(13)
+
+EnumValue
+Enum(mips_arch_opt_value) String(vr4111) Value(14) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(4111) Value(14)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4111) Value(14)
+
+EnumValue
+Enum(mips_arch_opt_value) String(vr4120) Value(15) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(4120) Value(15)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4120) Value(15)
+
+EnumValue
+Enum(mips_arch_opt_value) String(vr4130) Value(16) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(4130) Value(16)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4130) Value(16)
+
+EnumValue
+Enum(mips_arch_opt_value) String(vr4300) Value(17) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(4300) Value(17)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4300) Value(17)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4400) Value(18) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(4400) Value(18)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4600) Value(19) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(4600) Value(19)
+
+EnumValue
+Enum(mips_arch_opt_value) String(orion) Value(20) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4650) Value(21) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(4650) Value(21)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4700) Value(22) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(4700) Value(22)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r5900) Value(23) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(5900) Value(23)
+
+EnumValue
+Enum(mips_arch_opt_value) String(loongson2e) Value(24) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(loongson2f) Value(25) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r8000) Value(26) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r8k) Value(26)
+
+EnumValue
+Enum(mips_arch_opt_value) String(8000) Value(26)
+
+EnumValue
+Enum(mips_arch_opt_value) String(8k) Value(26)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r10000) Value(27) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r10k) Value(27)
+
+EnumValue
+Enum(mips_arch_opt_value) String(10000) Value(27)
+
+EnumValue
+Enum(mips_arch_opt_value) String(10k) Value(27)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r12000) Value(28) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r12k) Value(28)
+
+EnumValue
+Enum(mips_arch_opt_value) String(12000) Value(28)
+
+EnumValue
+Enum(mips_arch_opt_value) String(12k) Value(28)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r14000) Value(29) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r14k) Value(29)
+
+EnumValue
+Enum(mips_arch_opt_value) String(14000) Value(29)
+
+EnumValue
+Enum(mips_arch_opt_value) String(14k) Value(29)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r16000) Value(30) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r16k) Value(30)
+
+EnumValue
+Enum(mips_arch_opt_value) String(16000) Value(30)
+
+EnumValue
+Enum(mips_arch_opt_value) String(16k) Value(30)
+
+EnumValue
+Enum(mips_arch_opt_value) String(vr5000) Value(31) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(vr5k) Value(31)
+
+EnumValue
+Enum(mips_arch_opt_value) String(5000) Value(31)
+
+EnumValue
+Enum(mips_arch_opt_value) String(5k) Value(31)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r5000) Value(31)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r5k) Value(31)
+
+EnumValue
+Enum(mips_arch_opt_value) String(vr5400) Value(32) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(5400) Value(32)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r5400) Value(32)
+
+EnumValue
+Enum(mips_arch_opt_value) String(vr5500) Value(33) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(5500) Value(33)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r5500) Value(33)
+
+EnumValue
+Enum(mips_arch_opt_value) String(rm7000) Value(34) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(rm7k) Value(34)
+
+EnumValue
+Enum(mips_arch_opt_value) String(7000) Value(34)
+
+EnumValue
+Enum(mips_arch_opt_value) String(7k) Value(34)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r7000) Value(34)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r7k) Value(34)
+
+EnumValue
+Enum(mips_arch_opt_value) String(rm9000) Value(35) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(rm9k) Value(35)
+
+EnumValue
+Enum(mips_arch_opt_value) String(9000) Value(35)
+
+EnumValue
+Enum(mips_arch_opt_value) String(9k) Value(35)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r9000) Value(35)
+
+EnumValue
+Enum(mips_arch_opt_value) String(r9k) Value(35)
+
+EnumValue
+Enum(mips_arch_opt_value) String(4kc) Value(36) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4kc) Value(36)
+
+EnumValue
+Enum(mips_arch_opt_value) String(4km) Value(37) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4km) Value(37)
+
+EnumValue
+Enum(mips_arch_opt_value) String(4kp) Value(38) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4kp) Value(38)
+
+EnumValue
+Enum(mips_arch_opt_value) String(4ksc) Value(39) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4ksc) Value(39)
+
+EnumValue
+Enum(mips_arch_opt_value) String(m4k) Value(40) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(m14kc) Value(41) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(m14k) Value(42) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(m14ke) Value(43) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(m14kec) Value(44) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(4kec) Value(45) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4kec) Value(45)
+
+EnumValue
+Enum(mips_arch_opt_value) String(4kem) Value(46) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4kem) Value(46)
+
+EnumValue
+Enum(mips_arch_opt_value) String(4kep) Value(47) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4kep) Value(47)
+
+EnumValue
+Enum(mips_arch_opt_value) String(4ksd) Value(48) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r4ksd) Value(48)
+
+EnumValue
+Enum(mips_arch_opt_value) String(24kc) Value(49) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r24kc) Value(49)
+
+EnumValue
+Enum(mips_arch_opt_value) String(24kf2_1) Value(50) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r24kf2_1) Value(50)
+
+EnumValue
+Enum(mips_arch_opt_value) String(24kf) Value(51) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r24kf) Value(51)
+
+EnumValue
+Enum(mips_arch_opt_value) String(24kf1_1) Value(52) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r24kf1_1) Value(52)
+
+EnumValue
+Enum(mips_arch_opt_value) String(24kfx) Value(53) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r24kfx) Value(53)
+
+EnumValue
+Enum(mips_arch_opt_value) String(24kx) Value(54) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r24kx) Value(54)
+
+EnumValue
+Enum(mips_arch_opt_value) String(24kec) Value(55) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r24kec) Value(55)
+
+EnumValue
+Enum(mips_arch_opt_value) String(24kef2_1) Value(56) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r24kef2_1) Value(56)
+
+EnumValue
+Enum(mips_arch_opt_value) String(24kef) Value(57) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r24kef) Value(57)
+
+EnumValue
+Enum(mips_arch_opt_value) String(24kef1_1) Value(58) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r24kef1_1) Value(58)
+
+EnumValue
+Enum(mips_arch_opt_value) String(24kefx) Value(59) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r24kefx) Value(59)
+
+EnumValue
+Enum(mips_arch_opt_value) String(24kex) Value(60) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r24kex) Value(60)
+
+EnumValue
+Enum(mips_arch_opt_value) String(34kc) Value(61) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r34kc) Value(61)
+
+EnumValue
+Enum(mips_arch_opt_value) String(34kf2_1) Value(62) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r34kf2_1) Value(62)
+
+EnumValue
+Enum(mips_arch_opt_value) String(34kf) Value(63) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r34kf) Value(63)
+
+EnumValue
+Enum(mips_arch_opt_value) String(34kf1_1) Value(64) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r34kf1_1) Value(64)
+
+EnumValue
+Enum(mips_arch_opt_value) String(34kfx) Value(65) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r34kfx) Value(65)
+
+EnumValue
+Enum(mips_arch_opt_value) String(34kx) Value(66) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r34kx) Value(66)
+
+EnumValue
+Enum(mips_arch_opt_value) String(34kn) Value(67) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r34kn) Value(67)
+
+EnumValue
+Enum(mips_arch_opt_value) String(74kc) Value(68) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r74kc) Value(68)
+
+EnumValue
+Enum(mips_arch_opt_value) String(74kf2_1) Value(69) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r74kf2_1) Value(69)
+
+EnumValue
+Enum(mips_arch_opt_value) String(74kf) Value(70) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r74kf) Value(70)
+
+EnumValue
+Enum(mips_arch_opt_value) String(74kf1_1) Value(71) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r74kf1_1) Value(71)
+
+EnumValue
+Enum(mips_arch_opt_value) String(74kfx) Value(72) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r74kfx) Value(72)
+
+EnumValue
+Enum(mips_arch_opt_value) String(74kx) Value(73) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r74kx) Value(73)
+
+EnumValue
+Enum(mips_arch_opt_value) String(74kf3_2) Value(74) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r74kf3_2) Value(74)
+
+EnumValue
+Enum(mips_arch_opt_value) String(1004kc) Value(75) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r1004kc) Value(75)
+
+EnumValue
+Enum(mips_arch_opt_value) String(1004kf2_1) Value(76) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r1004kf2_1) Value(76)
+
+EnumValue
+Enum(mips_arch_opt_value) String(1004kf) Value(77) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r1004kf) Value(77)
+
+EnumValue
+Enum(mips_arch_opt_value) String(1004kf1_1) Value(78) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r1004kf1_1) Value(78)
+
+EnumValue
+Enum(mips_arch_opt_value) String(5kc) Value(79) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r5kc) Value(79)
+
+EnumValue
+Enum(mips_arch_opt_value) String(5kf) Value(80) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r5kf) Value(80)
+
+EnumValue
+Enum(mips_arch_opt_value) String(20kc) Value(81) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(r20kc) Value(81)
+
+EnumValue
+Enum(mips_arch_opt_value) String(sb1) Value(82) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(sb1a) Value(83) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(sr71000) Value(84) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(sr71k) Value(84)
+
+EnumValue
+Enum(mips_arch_opt_value) String(xlr) Value(85) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(loongson3a) Value(86) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(octeon) Value(87) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(octeon+) Value(88) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(octeon2) Value(89) Canonical
+
+EnumValue
+Enum(mips_arch_opt_value) String(xlp) Value(90) Canonical
+
diff --git a/gcc-4.9/gcc/config/mips/mips.c b/gcc-4.9/gcc/config/mips/mips.c
new file mode 100644
index 000000000..143169bc1
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips.c
@@ -0,0 +1,19139 @@
+/* Subroutines used for MIPS code generation.
+ Copyright (C) 1989-2014 Free Software Foundation, Inc.
+ Contributed by A. Lichnewsky, lich@inria.inria.fr.
+ Changes by Michael Meissner, meissner@osf.org.
+ 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
+ Brendan Eich, brendan@microunity.com.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-attr.h"
+#include "recog.h"
+#include "output.h"
+#include "tree.h"
+#include "varasm.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "function.h"
+#include "expr.h"
+#include "optabs.h"
+#include "libfuncs.h"
+#include "flags.h"
+#include "reload.h"
+#include "tm_p.h"
+#include "ggc.h"
+#include "gstab.h"
+#include "hash-table.h"
+#include "debug.h"
+#include "target.h"
+#include "target-def.h"
+#include "common/common-target.h"
+#include "langhooks.h"
+#include "sched-int.h"
+#include "pointer-set.h"
+#include "vec.h"
+#include "basic-block.h"
+#include "tree-ssa-alias.h"
+#include "internal-fn.h"
+#include "gimple-fold.h"
+#include "tree-eh.h"
+#include "gimple-expr.h"
+#include "is-a.h"
+#include "gimple.h"
+#include "gimplify.h"
+#include "bitmap.h"
+#include "diagnostic.h"
+#include "target-globals.h"
+#include "opts.h"
+#include "tree-pass.h"
+#include "context.h"
+
+/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
+#define UNSPEC_ADDRESS_P(X) \
+ (GET_CODE (X) == UNSPEC \
+ && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
+ && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
+
+/* Extract the symbol or label from UNSPEC wrapper X. */
+#define UNSPEC_ADDRESS(X) \
+ XVECEXP (X, 0, 0)
+
+/* Extract the symbol type from UNSPEC wrapper X. */
+#define UNSPEC_ADDRESS_TYPE(X) \
+ ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
+
+/* The maximum distance between the top of the stack frame and the
+ value $sp has when we save and restore registers.
+
+ The value for normal-mode code must be a SMALL_OPERAND and must
+ preserve the maximum stack alignment. We therefore use a value
+ of 0x7ff0 in this case.
+
+ microMIPS LWM and SWM support 12-bit offsets (from -0x800 to 0x7ff),
+ so we use a maximum of 0x7f0 for TARGET_MICROMIPS.
+
+ MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
+ up to 0x7f8 bytes and can usually save or restore all the registers
+ that we need to save or restore. (Note that we can only use these
+ instructions for o32, for which the stack alignment is 8 bytes.)
+
+ We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
+ RESTORE are not available. We can then use unextended instructions
+ to save and restore registers, and to allocate and deallocate the top
+ part of the frame. */
+#define MIPS_MAX_FIRST_STACK_STEP \
+ (!TARGET_COMPRESSION ? 0x7ff0 \
+ : TARGET_MICROMIPS || GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
+ : TARGET_64BIT ? 0x100 : 0x400)
+
+/* True if INSN is a mips.md pattern or asm statement. */
+/* ??? This test exists through the compiler, perhaps it should be
+ moved to rtl.h. */
+#define USEFUL_INSN_P(INSN) \
+ (NONDEBUG_INSN_P (INSN) \
+ && GET_CODE (PATTERN (INSN)) != USE \
+ && GET_CODE (PATTERN (INSN)) != CLOBBER)
+
+/* If INSN is a delayed branch sequence, return the first instruction
+ in the sequence, otherwise return INSN itself. */
+#define SEQ_BEGIN(INSN) \
+ (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
+ ? XVECEXP (PATTERN (INSN), 0, 0) \
+ : (INSN))
+
+/* Likewise for the last instruction in a delayed branch sequence. */
+#define SEQ_END(INSN) \
+ (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
+ ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
+ : (INSN))
+
+/* Execute the following loop body with SUBINSN set to each instruction
+ between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
+#define FOR_EACH_SUBINSN(SUBINSN, INSN) \
+ for ((SUBINSN) = SEQ_BEGIN (INSN); \
+ (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
+ (SUBINSN) = NEXT_INSN (SUBINSN))
+
+/* True if bit BIT is set in VALUE. */
+#define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
+
+/* Return the opcode for a ptr_mode load of the form:
+
+ l[wd] DEST, OFFSET(BASE). */
+#define MIPS_LOAD_PTR(DEST, OFFSET, BASE) \
+ (((ptr_mode == DImode ? 0x37 : 0x23) << 26) \
+ | ((BASE) << 21) \
+ | ((DEST) << 16) \
+ | (OFFSET))
+
+/* Return the opcode to move register SRC into register DEST. */
+#define MIPS_MOVE(DEST, SRC) \
+ ((TARGET_64BIT ? 0x2d : 0x21) \
+ | ((DEST) << 11) \
+ | ((SRC) << 21))
+
+/* Return the opcode for:
+
+ lui DEST, VALUE. */
+#define MIPS_LUI(DEST, VALUE) \
+ ((0xf << 26) | ((DEST) << 16) | (VALUE))
+
+/* Return the opcode to jump to register DEST. */
+#define MIPS_JR(DEST) \
+ (((DEST) << 21) | 0x8)
+
+/* Return the opcode for:
+
+ bal . + (1 + OFFSET) * 4. */
+#define MIPS_BAL(OFFSET) \
+ ((0x1 << 26) | (0x11 << 16) | (OFFSET))
+
+/* Return the usual opcode for a nop. */
+#define MIPS_NOP 0
+
+/* Classifies an address.
+
+ ADDRESS_REG
+ A natural register + offset address. The register satisfies
+ mips_valid_base_register_p and the offset is a const_arith_operand.
+
+ ADDRESS_LO_SUM
+ A LO_SUM rtx. The first operand is a valid base register and
+ the second operand is a symbolic address.
+
+ ADDRESS_CONST_INT
+ A signed 16-bit constant address.
+
+ ADDRESS_SYMBOLIC:
+ A constant symbolic address. */
+enum mips_address_type {
+ ADDRESS_REG,
+ ADDRESS_LO_SUM,
+ ADDRESS_CONST_INT,
+ ADDRESS_SYMBOLIC
+};
+
+/* Macros to create an enumeration identifier for a function prototype. */
+#define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
+#define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
+#define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
+#define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
+
+/* Classifies the prototype of a built-in function. */
+enum mips_function_type {
+#define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
+#include "config/mips/mips-ftypes.def"
+#undef DEF_MIPS_FTYPE
+ MIPS_MAX_FTYPE_MAX
+};
+
+/* Specifies how a built-in function should be converted into rtl. */
+enum mips_builtin_type {
+ /* The function corresponds directly to an .md pattern. The return
+ value is mapped to operand 0 and the arguments are mapped to
+ operands 1 and above. */
+ MIPS_BUILTIN_DIRECT,
+
+ /* The function corresponds directly to an .md pattern. There is no return
+ value and the arguments are mapped to operands 0 and above. */
+ MIPS_BUILTIN_DIRECT_NO_TARGET,
+
+ /* The function corresponds to a comparison instruction followed by
+ a mips_cond_move_tf_ps pattern. The first two arguments are the
+ values to compare and the second two arguments are the vector
+ operands for the movt.ps or movf.ps instruction (in assembly order). */
+ MIPS_BUILTIN_MOVF,
+ MIPS_BUILTIN_MOVT,
+
+ /* The function corresponds to a V2SF comparison instruction. Operand 0
+ of this instruction is the result of the comparison, which has mode
+ CCV2 or CCV4. The function arguments are mapped to operands 1 and
+ above. The function's return value is an SImode boolean that is
+ true under the following conditions:
+
+ MIPS_BUILTIN_CMP_ANY: one of the registers is true
+ MIPS_BUILTIN_CMP_ALL: all of the registers are true
+ MIPS_BUILTIN_CMP_LOWER: the first register is true
+ MIPS_BUILTIN_CMP_UPPER: the second register is true. */
+ MIPS_BUILTIN_CMP_ANY,
+ MIPS_BUILTIN_CMP_ALL,
+ MIPS_BUILTIN_CMP_UPPER,
+ MIPS_BUILTIN_CMP_LOWER,
+
+ /* As above, but the instruction only sets a single $fcc register. */
+ MIPS_BUILTIN_CMP_SINGLE,
+
+ /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
+ MIPS_BUILTIN_BPOSGE32
+};
+
+/* Invoke MACRO (COND) for each C.cond.fmt condition. */
+#define MIPS_FP_CONDITIONS(MACRO) \
+ MACRO (f), \
+ MACRO (un), \
+ MACRO (eq), \
+ MACRO (ueq), \
+ MACRO (olt), \
+ MACRO (ult), \
+ MACRO (ole), \
+ MACRO (ule), \
+ MACRO (sf), \
+ MACRO (ngle), \
+ MACRO (seq), \
+ MACRO (ngl), \
+ MACRO (lt), \
+ MACRO (nge), \
+ MACRO (le), \
+ MACRO (ngt)
+
+/* Enumerates the codes above as MIPS_FP_COND_<X>. */
+#define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
+enum mips_fp_condition {
+ MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
+};
+#undef DECLARE_MIPS_COND
+
+/* Index X provides the string representation of MIPS_FP_COND_<X>. */
+#define STRINGIFY(X) #X
+static const char *const mips_fp_conditions[] = {
+ MIPS_FP_CONDITIONS (STRINGIFY)
+};
+#undef STRINGIFY
+
+/* A class used to control a comdat-style stub that we output in each
+ translation unit that needs it. */
+class mips_one_only_stub {
+public:
+ virtual ~mips_one_only_stub () {}
+
+ /* Return the name of the stub. */
+ virtual const char *get_name () = 0;
+
+ /* Output the body of the function to asm_out_file. */
+ virtual void output_body () = 0;
+};
+
+/* Tuning information that is automatically derived from other sources
+ (such as the scheduler). */
+static struct {
+ /* The architecture and tuning settings that this structure describes. */
+ enum processor arch;
+ enum processor tune;
+
+ /* True if this structure describes MIPS16 settings. */
+ bool mips16_p;
+
+ /* True if the structure has been initialized. */
+ bool initialized_p;
+
+ /* True if "MULT $0, $0" is preferable to "MTLO $0; MTHI $0"
+ when optimizing for speed. */
+ bool fast_mult_zero_zero_p;
+} mips_tuning_info;
+
+/* Information about a function's frame layout. */
+struct GTY(()) mips_frame_info {
+ /* The size of the frame in bytes. */
+ HOST_WIDE_INT total_size;
+
+ /* The number of bytes allocated to variables. */
+ HOST_WIDE_INT var_size;
+
+ /* The number of bytes allocated to outgoing function arguments. */
+ HOST_WIDE_INT args_size;
+
+ /* The number of bytes allocated to the .cprestore slot, or 0 if there
+ is no such slot. */
+ HOST_WIDE_INT cprestore_size;
+
+ /* Bit X is set if the function saves or restores GPR X. */
+ unsigned int mask;
+
+ /* Likewise FPR X. */
+ unsigned int fmask;
+
+ /* Likewise doubleword accumulator X ($acX). */
+ unsigned int acc_mask;
+
+ /* The number of GPRs, FPRs, doubleword accumulators and COP0
+ registers saved. */
+ unsigned int num_gp;
+ unsigned int num_fp;
+ unsigned int num_acc;
+ unsigned int num_cop0_regs;
+
+ /* The offset of the topmost GPR, FPR, accumulator and COP0-register
+ save slots from the top of the frame, or zero if no such slots are
+ needed. */
+ HOST_WIDE_INT gp_save_offset;
+ HOST_WIDE_INT fp_save_offset;
+ HOST_WIDE_INT acc_save_offset;
+ HOST_WIDE_INT cop0_save_offset;
+
+ /* Likewise, but giving offsets from the bottom of the frame. */
+ HOST_WIDE_INT gp_sp_offset;
+ HOST_WIDE_INT fp_sp_offset;
+ HOST_WIDE_INT acc_sp_offset;
+ HOST_WIDE_INT cop0_sp_offset;
+
+ /* Similar, but the value passed to _mcount. */
+ HOST_WIDE_INT ra_fp_offset;
+
+ /* The offset of arg_pointer_rtx from the bottom of the frame. */
+ HOST_WIDE_INT arg_pointer_offset;
+
+ /* The offset of hard_frame_pointer_rtx from the bottom of the frame. */
+ HOST_WIDE_INT hard_frame_pointer_offset;
+};
+
+struct GTY(()) machine_function {
+ /* The next floating-point condition-code register to allocate
+ for ISA_HAS_8CC targets, relative to ST_REG_FIRST. */
+ unsigned int next_fcc;
+
+ /* The register returned by mips16_gp_pseudo_reg; see there for details. */
+ rtx mips16_gp_pseudo_rtx;
+
+ /* The number of extra stack bytes taken up by register varargs.
+ This area is allocated by the callee at the very top of the frame. */
+ int varargs_size;
+
+ /* The current frame information, calculated by mips_compute_frame_info. */
+ struct mips_frame_info frame;
+
+ /* The register to use as the function's global pointer, or INVALID_REGNUM
+ if the function doesn't need one. */
+ unsigned int global_pointer;
+
+ /* How many instructions it takes to load a label into $AT, or 0 if
+ this property hasn't yet been calculated. */
+ unsigned int load_label_num_insns;
+
+ /* True if mips_adjust_insn_length should ignore an instruction's
+ hazard attribute. */
+ bool ignore_hazard_length_p;
+
+ /* True if the whole function is suitable for .set noreorder and
+ .set nomacro. */
+ bool all_noreorder_p;
+
+ /* True if the function has "inflexible" and "flexible" references
+ to the global pointer. See mips_cfun_has_inflexible_gp_ref_p
+ and mips_cfun_has_flexible_gp_ref_p for details. */
+ bool has_inflexible_gp_insn_p;
+ bool has_flexible_gp_insn_p;
+
+ /* True if the function's prologue must load the global pointer
+ value into pic_offset_table_rtx and store the same value in
+ the function's cprestore slot (if any). Even if this value
+ is currently false, we may decide to set it to true later;
+ see mips_must_initialize_gp_p () for details. */
+ bool must_initialize_gp_p;
+
+ /* True if the current function must restore $gp after any potential
+ clobber. This value is only meaningful during the first post-epilogue
+ split_insns pass; see mips_must_initialize_gp_p () for details. */
+ bool must_restore_gp_when_clobbered_p;
+
+ /* True if this is an interrupt handler. */
+ bool interrupt_handler_p;
+
+ /* True if this is an interrupt handler that uses shadow registers. */
+ bool use_shadow_register_set_p;
+
+ /* True if this is an interrupt handler that should keep interrupts
+ masked. */
+ bool keep_interrupts_masked_p;
+
+ /* True if this is an interrupt handler that should use DERET
+ instead of ERET. */
+ bool use_debug_exception_return_p;
+};
+
+/* Information about a single argument. */
+struct mips_arg_info {
+ /* True if the argument is passed in a floating-point register, or
+ would have been if we hadn't run out of registers. */
+ bool fpr_p;
+
+ /* The number of words passed in registers, rounded up. */
+ unsigned int reg_words;
+
+ /* For EABI, the offset of the first register from GP_ARG_FIRST or
+ FP_ARG_FIRST. For other ABIs, the offset of the first register from
+ the start of the ABI's argument structure (see the CUMULATIVE_ARGS
+ comment for details).
+
+ The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
+ on the stack. */
+ unsigned int reg_offset;
+
+ /* The number of words that must be passed on the stack, rounded up. */
+ unsigned int stack_words;
+
+ /* The offset from the start of the stack overflow area of the argument's
+ first stack word. Only meaningful when STACK_WORDS is nonzero. */
+ unsigned int stack_offset;
+};
+
+/* Information about an address described by mips_address_type.
+
+ ADDRESS_CONST_INT
+ No fields are used.
+
+ ADDRESS_REG
+ REG is the base register and OFFSET is the constant offset.
+
+ ADDRESS_LO_SUM
+ REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
+ is the type of symbol it references.
+
+ ADDRESS_SYMBOLIC
+ SYMBOL_TYPE is the type of symbol that the address references. */
+struct mips_address_info {
+ enum mips_address_type type;
+ rtx reg;
+ rtx offset;
+ enum mips_symbol_type symbol_type;
+};
+
+/* One stage in a constant building sequence. These sequences have
+ the form:
+
+ A = VALUE[0]
+ A = A CODE[1] VALUE[1]
+ A = A CODE[2] VALUE[2]
+ ...
+
+ where A is an accumulator, each CODE[i] is a binary rtl operation
+ and each VALUE[i] is a constant integer. CODE[0] is undefined. */
+struct mips_integer_op {
+ enum rtx_code code;
+ unsigned HOST_WIDE_INT value;
+};
+
+/* The largest number of operations needed to load an integer constant.
+ The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
+ When the lowest bit is clear, we can try, but reject a sequence with
+ an extra SLL at the end. */
+#define MIPS_MAX_INTEGER_OPS 7
+
+/* Information about a MIPS16e SAVE or RESTORE instruction. */
+struct mips16e_save_restore_info {
+ /* The number of argument registers saved by a SAVE instruction.
+ 0 for RESTORE instructions. */
+ unsigned int nargs;
+
+ /* Bit X is set if the instruction saves or restores GPR X. */
+ unsigned int mask;
+
+ /* The total number of bytes to allocate. */
+ HOST_WIDE_INT size;
+};
+
+/* Costs of various operations on the different architectures. */
+
+struct mips_rtx_cost_data
+{
+ unsigned short fp_add;
+ unsigned short fp_mult_sf;
+ unsigned short fp_mult_df;
+ unsigned short fp_div_sf;
+ unsigned short fp_div_df;
+ unsigned short int_mult_si;
+ unsigned short int_mult_di;
+ unsigned short int_div_si;
+ unsigned short int_div_di;
+ unsigned short branch_cost;
+ unsigned short memory_latency;
+};
+
+/* Global variables for machine-dependent things. */
+
+/* The -G setting, or the configuration's default small-data limit if
+ no -G option is given. */
+static unsigned int mips_small_data_threshold;
+
+/* The number of file directives written by mips_output_filename. */
+int num_source_filenames;
+
+/* The name that appeared in the last .file directive written by
+ mips_output_filename, or "" if mips_output_filename hasn't
+ written anything yet. */
+const char *current_function_file = "";
+
+/* Arrays that map GCC register numbers to debugger register numbers. */
+int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
+int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
+
+/* Information about the current function's epilogue, used only while
+ expanding it. */
+static struct {
+ /* A list of queued REG_CFA_RESTORE notes. */
+ rtx cfa_restores;
+
+ /* The CFA is currently defined as CFA_REG + CFA_OFFSET. */
+ rtx cfa_reg;
+ HOST_WIDE_INT cfa_offset;
+
+ /* The offset of the CFA from the stack pointer while restoring
+ registers. */
+ HOST_WIDE_INT cfa_restore_sp_offset;
+} mips_epilogue;
+
+/* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs. */
+struct mips_asm_switch mips_noreorder = { "reorder", 0 };
+struct mips_asm_switch mips_nomacro = { "macro", 0 };
+struct mips_asm_switch mips_noat = { "at", 0 };
+
+/* True if we're writing out a branch-likely instruction rather than a
+ normal branch. */
+static bool mips_branch_likely;
+
+/* The current instruction-set architecture. */
+enum processor mips_arch;
+const struct mips_cpu_info *mips_arch_info;
+
+/* The processor that we should tune the code for. */
+enum processor mips_tune;
+const struct mips_cpu_info *mips_tune_info;
+
+/* The ISA level associated with mips_arch. */
+int mips_isa;
+
+/* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
+static const struct mips_cpu_info *mips_isa_option_info;
+
+/* Which cost information to use. */
+static const struct mips_rtx_cost_data *mips_cost;
+
+/* The ambient target flags, excluding MASK_MIPS16. */
+static int mips_base_target_flags;
+
+/* The default compression mode. */
+unsigned int mips_base_compression_flags;
+
+/* The ambient values of other global variables. */
+static int mips_base_schedule_insns; /* flag_schedule_insns */
+static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
+static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
+static int mips_base_align_loops; /* align_loops */
+static int mips_base_align_jumps; /* align_jumps */
+static int mips_base_align_functions; /* align_functions */
+
+/* Index [M][R] is true if register R is allowed to hold a value of mode M. */
+bool mips_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
+
+/* Index C is true if character C is a valid PRINT_OPERAND punctation
+ character. */
+static bool mips_print_operand_punct[256];
+
+static GTY (()) int mips_output_filename_first_time = 1;
+
+/* mips_split_p[X] is true if symbols of type X can be split by
+ mips_split_symbol. */
+bool mips_split_p[NUM_SYMBOL_TYPES];
+
+/* mips_split_hi_p[X] is true if the high parts of symbols of type X
+ can be split by mips_split_symbol. */
+bool mips_split_hi_p[NUM_SYMBOL_TYPES];
+
+/* mips_use_pcrel_pool_p[X] is true if symbols of type X should be
+ forced into a PC-relative constant pool. */
+bool mips_use_pcrel_pool_p[NUM_SYMBOL_TYPES];
+
+/* mips_lo_relocs[X] is the relocation to use when a symbol of type X
+ appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
+ if they are matched by a special .md file pattern. */
+const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
+
+/* Likewise for HIGHs. */
+const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
+
+/* Target state for MIPS16. */
+struct target_globals *mips16_globals;
+
+/* Cached value of can_issue_more. This is cached in mips_variable_issue hook
+ and returned from mips_sched_reorder2. */
+static int cached_can_issue_more;
+
+/* The stubs for various MIPS16 support functions, if used. */
+static mips_one_only_stub *mips16_rdhwr_stub;
+static mips_one_only_stub *mips16_get_fcsr_stub;
+static mips_one_only_stub *mips16_set_fcsr_stub;
+
+/* Index R is the smallest register class that contains register R. */
+const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = {
+ LEA_REGS, LEA_REGS, M16_REGS, V1_REG,
+ M16_REGS, M16_REGS, M16_REGS, M16_REGS,
+ LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
+ LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
+ M16_REGS, M16_REGS, LEA_REGS, LEA_REGS,
+ LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
+ T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
+ LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ MD0_REG, MD1_REG, NO_REGS, ST_REGS,
+ ST_REGS, ST_REGS, ST_REGS, ST_REGS,
+ ST_REGS, ST_REGS, ST_REGS, NO_REGS,
+ NO_REGS, FRAME_REGS, FRAME_REGS, NO_REGS,
+ COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
+ COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
+ COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
+ COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
+ COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
+ COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
+ COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
+ COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
+ COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
+ COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
+ COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
+ COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
+ COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
+ COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
+ COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
+ COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
+ COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
+ COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
+ COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
+ COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
+ COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
+ COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
+ COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
+ COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
+ DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
+ DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
+ ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
+};
+
+/* The value of TARGET_ATTRIBUTE_TABLE. */
+static const struct attribute_spec mips_attribute_table[] = {
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
+ om_diagnostic } */
+ { "long_call", 0, 0, false, true, true, NULL, false },
+ { "far", 0, 0, false, true, true, NULL, false },
+ { "near", 0, 0, false, true, true, NULL, false },
+ /* We would really like to treat "mips16" and "nomips16" as type
+ attributes, but GCC doesn't provide the hooks we need to support
+ the right conversion rules. As declaration attributes, they affect
+ code generation but don't carry other semantics. */
+ { "mips16", 0, 0, true, false, false, NULL, false },
+ { "nomips16", 0, 0, true, false, false, NULL, false },
+ { "micromips", 0, 0, true, false, false, NULL, false },
+ { "nomicromips", 0, 0, true, false, false, NULL, false },
+ { "nocompression", 0, 0, true, false, false, NULL, false },
+ /* Allow functions to be specified as interrupt handlers */
+ { "interrupt", 0, 0, false, true, true, NULL, false },
+ { "use_shadow_register_set", 0, 0, false, true, true, NULL, false },
+ { "keep_interrupts_masked", 0, 0, false, true, true, NULL, false },
+ { "use_debug_exception_return", 0, 0, false, true, true, NULL, false },
+ { NULL, 0, 0, false, false, false, NULL, false }
+};
+
+/* A table describing all the processors GCC knows about; see
+ mips-cpus.def for details. */
+static const struct mips_cpu_info mips_cpu_info_table[] = {
+#define MIPS_CPU(NAME, CPU, ISA, FLAGS) \
+ { NAME, CPU, ISA, FLAGS },
+#include "mips-cpus.def"
+#undef MIPS_CPU
+};
+
+/* Default costs. If these are used for a processor we should look
+ up the actual costs. */
+#define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
+ COSTS_N_INSNS (7), /* fp_mult_sf */ \
+ COSTS_N_INSNS (8), /* fp_mult_df */ \
+ COSTS_N_INSNS (23), /* fp_div_sf */ \
+ COSTS_N_INSNS (36), /* fp_div_df */ \
+ COSTS_N_INSNS (10), /* int_mult_si */ \
+ COSTS_N_INSNS (10), /* int_mult_di */ \
+ COSTS_N_INSNS (69), /* int_div_si */ \
+ COSTS_N_INSNS (69), /* int_div_di */ \
+ 2, /* branch_cost */ \
+ 4 /* memory_latency */
+
+/* Floating-point costs for processors without an FPU. Just assume that
+ all floating-point libcalls are very expensive. */
+#define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
+ COSTS_N_INSNS (256), /* fp_mult_sf */ \
+ COSTS_N_INSNS (256), /* fp_mult_df */ \
+ COSTS_N_INSNS (256), /* fp_div_sf */ \
+ COSTS_N_INSNS (256) /* fp_div_df */
+
+/* Costs to use when optimizing for size. */
+static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = {
+ COSTS_N_INSNS (1), /* fp_add */
+ COSTS_N_INSNS (1), /* fp_mult_sf */
+ COSTS_N_INSNS (1), /* fp_mult_df */
+ COSTS_N_INSNS (1), /* fp_div_sf */
+ COSTS_N_INSNS (1), /* fp_div_df */
+ COSTS_N_INSNS (1), /* int_mult_si */
+ COSTS_N_INSNS (1), /* int_mult_di */
+ COSTS_N_INSNS (1), /* int_div_si */
+ COSTS_N_INSNS (1), /* int_div_di */
+ 2, /* branch_cost */
+ 4 /* memory_latency */
+};
+
+/* Costs to use when optimizing for speed, indexed by processor. */
+static const struct mips_rtx_cost_data
+ mips_rtx_cost_data[NUM_PROCESSOR_VALUES] = {
+ { /* R3000 */
+ COSTS_N_INSNS (2), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult_sf */
+ COSTS_N_INSNS (5), /* fp_mult_df */
+ COSTS_N_INSNS (12), /* fp_div_sf */
+ COSTS_N_INSNS (19), /* fp_div_df */
+ COSTS_N_INSNS (12), /* int_mult_si */
+ COSTS_N_INSNS (12), /* int_mult_di */
+ COSTS_N_INSNS (35), /* int_div_si */
+ COSTS_N_INSNS (35), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 4KC */
+ SOFT_FP_COSTS,
+ COSTS_N_INSNS (6), /* int_mult_si */
+ COSTS_N_INSNS (6), /* int_mult_di */
+ COSTS_N_INSNS (36), /* int_div_si */
+ COSTS_N_INSNS (36), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 4KP */
+ SOFT_FP_COSTS,
+ COSTS_N_INSNS (36), /* int_mult_si */
+ COSTS_N_INSNS (36), /* int_mult_di */
+ COSTS_N_INSNS (37), /* int_div_si */
+ COSTS_N_INSNS (37), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 5KC */
+ SOFT_FP_COSTS,
+ COSTS_N_INSNS (4), /* int_mult_si */
+ COSTS_N_INSNS (11), /* int_mult_di */
+ COSTS_N_INSNS (36), /* int_div_si */
+ COSTS_N_INSNS (68), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 5KF */
+ COSTS_N_INSNS (4), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult_sf */
+ COSTS_N_INSNS (5), /* fp_mult_df */
+ COSTS_N_INSNS (17), /* fp_div_sf */
+ COSTS_N_INSNS (32), /* fp_div_df */
+ COSTS_N_INSNS (4), /* int_mult_si */
+ COSTS_N_INSNS (11), /* int_mult_di */
+ COSTS_N_INSNS (36), /* int_div_si */
+ COSTS_N_INSNS (68), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 20KC */
+ COSTS_N_INSNS (4), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult_sf */
+ COSTS_N_INSNS (5), /* fp_mult_df */
+ COSTS_N_INSNS (17), /* fp_div_sf */
+ COSTS_N_INSNS (32), /* fp_div_df */
+ COSTS_N_INSNS (4), /* int_mult_si */
+ COSTS_N_INSNS (7), /* int_mult_di */
+ COSTS_N_INSNS (42), /* int_div_si */
+ COSTS_N_INSNS (72), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 24KC */
+ SOFT_FP_COSTS,
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (41), /* int_div_si */
+ COSTS_N_INSNS (41), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 24KF2_1 */
+ COSTS_N_INSNS (8), /* fp_add */
+ COSTS_N_INSNS (8), /* fp_mult_sf */
+ COSTS_N_INSNS (10), /* fp_mult_df */
+ COSTS_N_INSNS (34), /* fp_div_sf */
+ COSTS_N_INSNS (64), /* fp_div_df */
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (41), /* int_div_si */
+ COSTS_N_INSNS (41), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 24KF1_1 */
+ COSTS_N_INSNS (4), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult_sf */
+ COSTS_N_INSNS (5), /* fp_mult_df */
+ COSTS_N_INSNS (17), /* fp_div_sf */
+ COSTS_N_INSNS (32), /* fp_div_df */
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (41), /* int_div_si */
+ COSTS_N_INSNS (41), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 74KC */
+ SOFT_FP_COSTS,
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (41), /* int_div_si */
+ COSTS_N_INSNS (41), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 74KF2_1 */
+ COSTS_N_INSNS (8), /* fp_add */
+ COSTS_N_INSNS (8), /* fp_mult_sf */
+ COSTS_N_INSNS (10), /* fp_mult_df */
+ COSTS_N_INSNS (34), /* fp_div_sf */
+ COSTS_N_INSNS (64), /* fp_div_df */
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (41), /* int_div_si */
+ COSTS_N_INSNS (41), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 74KF1_1 */
+ COSTS_N_INSNS (4), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult_sf */
+ COSTS_N_INSNS (5), /* fp_mult_df */
+ COSTS_N_INSNS (17), /* fp_div_sf */
+ COSTS_N_INSNS (32), /* fp_div_df */
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (41), /* int_div_si */
+ COSTS_N_INSNS (41), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* 74KF3_2 */
+ COSTS_N_INSNS (6), /* fp_add */
+ COSTS_N_INSNS (6), /* fp_mult_sf */
+ COSTS_N_INSNS (7), /* fp_mult_df */
+ COSTS_N_INSNS (25), /* fp_div_sf */
+ COSTS_N_INSNS (48), /* fp_div_df */
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (41), /* int_div_si */
+ COSTS_N_INSNS (41), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* Loongson-2E */
+ DEFAULT_COSTS
+ },
+ { /* Loongson-2F */
+ DEFAULT_COSTS
+ },
+ { /* Loongson-3A */
+ DEFAULT_COSTS
+ },
+ { /* M4k */
+ DEFAULT_COSTS
+ },
+ /* Octeon */
+ {
+ SOFT_FP_COSTS,
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (72), /* int_div_si */
+ COSTS_N_INSNS (72), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ /* Octeon II */
+ {
+ SOFT_FP_COSTS,
+ COSTS_N_INSNS (6), /* int_mult_si */
+ COSTS_N_INSNS (6), /* int_mult_di */
+ COSTS_N_INSNS (18), /* int_div_si */
+ COSTS_N_INSNS (35), /* int_div_di */
+ 4, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* R3900 */
+ COSTS_N_INSNS (2), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult_sf */
+ COSTS_N_INSNS (5), /* fp_mult_df */
+ COSTS_N_INSNS (12), /* fp_div_sf */
+ COSTS_N_INSNS (19), /* fp_div_df */
+ COSTS_N_INSNS (2), /* int_mult_si */
+ COSTS_N_INSNS (2), /* int_mult_di */
+ COSTS_N_INSNS (35), /* int_div_si */
+ COSTS_N_INSNS (35), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* R6000 */
+ COSTS_N_INSNS (3), /* fp_add */
+ COSTS_N_INSNS (5), /* fp_mult_sf */
+ COSTS_N_INSNS (6), /* fp_mult_df */
+ COSTS_N_INSNS (15), /* fp_div_sf */
+ COSTS_N_INSNS (16), /* fp_div_df */
+ COSTS_N_INSNS (17), /* int_mult_si */
+ COSTS_N_INSNS (17), /* int_mult_di */
+ COSTS_N_INSNS (38), /* int_div_si */
+ COSTS_N_INSNS (38), /* int_div_di */
+ 2, /* branch_cost */
+ 6 /* memory_latency */
+ },
+ { /* R4000 */
+ COSTS_N_INSNS (6), /* fp_add */
+ COSTS_N_INSNS (7), /* fp_mult_sf */
+ COSTS_N_INSNS (8), /* fp_mult_df */
+ COSTS_N_INSNS (23), /* fp_div_sf */
+ COSTS_N_INSNS (36), /* fp_div_df */
+ COSTS_N_INSNS (10), /* int_mult_si */
+ COSTS_N_INSNS (10), /* int_mult_di */
+ COSTS_N_INSNS (69), /* int_div_si */
+ COSTS_N_INSNS (69), /* int_div_di */
+ 2, /* branch_cost */
+ 6 /* memory_latency */
+ },
+ { /* R4100 */
+ DEFAULT_COSTS
+ },
+ { /* R4111 */
+ DEFAULT_COSTS
+ },
+ { /* R4120 */
+ DEFAULT_COSTS
+ },
+ { /* R4130 */
+ /* The only costs that appear to be updated here are
+ integer multiplication. */
+ SOFT_FP_COSTS,
+ COSTS_N_INSNS (4), /* int_mult_si */
+ COSTS_N_INSNS (6), /* int_mult_di */
+ COSTS_N_INSNS (69), /* int_div_si */
+ COSTS_N_INSNS (69), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* R4300 */
+ DEFAULT_COSTS
+ },
+ { /* R4600 */
+ DEFAULT_COSTS
+ },
+ { /* R4650 */
+ DEFAULT_COSTS
+ },
+ { /* R4700 */
+ DEFAULT_COSTS
+ },
+ { /* R5000 */
+ COSTS_N_INSNS (6), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult_sf */
+ COSTS_N_INSNS (5), /* fp_mult_df */
+ COSTS_N_INSNS (23), /* fp_div_sf */
+ COSTS_N_INSNS (36), /* fp_div_df */
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (5), /* int_mult_di */
+ COSTS_N_INSNS (36), /* int_div_si */
+ COSTS_N_INSNS (36), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* R5400 */
+ COSTS_N_INSNS (6), /* fp_add */
+ COSTS_N_INSNS (5), /* fp_mult_sf */
+ COSTS_N_INSNS (6), /* fp_mult_df */
+ COSTS_N_INSNS (30), /* fp_div_sf */
+ COSTS_N_INSNS (59), /* fp_div_df */
+ COSTS_N_INSNS (3), /* int_mult_si */
+ COSTS_N_INSNS (4), /* int_mult_di */
+ COSTS_N_INSNS (42), /* int_div_si */
+ COSTS_N_INSNS (74), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* R5500 */
+ COSTS_N_INSNS (6), /* fp_add */
+ COSTS_N_INSNS (5), /* fp_mult_sf */
+ COSTS_N_INSNS (6), /* fp_mult_df */
+ COSTS_N_INSNS (30), /* fp_div_sf */
+ COSTS_N_INSNS (59), /* fp_div_df */
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (9), /* int_mult_di */
+ COSTS_N_INSNS (42), /* int_div_si */
+ COSTS_N_INSNS (74), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* R5900 */
+ COSTS_N_INSNS (4), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult_sf */
+ COSTS_N_INSNS (256), /* fp_mult_df */
+ COSTS_N_INSNS (8), /* fp_div_sf */
+ COSTS_N_INSNS (256), /* fp_div_df */
+ COSTS_N_INSNS (4), /* int_mult_si */
+ COSTS_N_INSNS (256), /* int_mult_di */
+ COSTS_N_INSNS (37), /* int_div_si */
+ COSTS_N_INSNS (256), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* R7000 */
+ /* The only costs that are changed here are
+ integer multiplication. */
+ COSTS_N_INSNS (6), /* fp_add */
+ COSTS_N_INSNS (7), /* fp_mult_sf */
+ COSTS_N_INSNS (8), /* fp_mult_df */
+ COSTS_N_INSNS (23), /* fp_div_sf */
+ COSTS_N_INSNS (36), /* fp_div_df */
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (9), /* int_mult_di */
+ COSTS_N_INSNS (69), /* int_div_si */
+ COSTS_N_INSNS (69), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* R8000 */
+ DEFAULT_COSTS
+ },
+ { /* R9000 */
+ /* The only costs that are changed here are
+ integer multiplication. */
+ COSTS_N_INSNS (6), /* fp_add */
+ COSTS_N_INSNS (7), /* fp_mult_sf */
+ COSTS_N_INSNS (8), /* fp_mult_df */
+ COSTS_N_INSNS (23), /* fp_div_sf */
+ COSTS_N_INSNS (36), /* fp_div_df */
+ COSTS_N_INSNS (3), /* int_mult_si */
+ COSTS_N_INSNS (8), /* int_mult_di */
+ COSTS_N_INSNS (69), /* int_div_si */
+ COSTS_N_INSNS (69), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* R1x000 */
+ COSTS_N_INSNS (2), /* fp_add */
+ COSTS_N_INSNS (2), /* fp_mult_sf */
+ COSTS_N_INSNS (2), /* fp_mult_df */
+ COSTS_N_INSNS (12), /* fp_div_sf */
+ COSTS_N_INSNS (19), /* fp_div_df */
+ COSTS_N_INSNS (5), /* int_mult_si */
+ COSTS_N_INSNS (9), /* int_mult_di */
+ COSTS_N_INSNS (34), /* int_div_si */
+ COSTS_N_INSNS (66), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* SB1 */
+ /* These costs are the same as the SB-1A below. */
+ COSTS_N_INSNS (4), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult_sf */
+ COSTS_N_INSNS (4), /* fp_mult_df */
+ COSTS_N_INSNS (24), /* fp_div_sf */
+ COSTS_N_INSNS (32), /* fp_div_df */
+ COSTS_N_INSNS (3), /* int_mult_si */
+ COSTS_N_INSNS (4), /* int_mult_di */
+ COSTS_N_INSNS (36), /* int_div_si */
+ COSTS_N_INSNS (68), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* SB1-A */
+ /* These costs are the same as the SB-1 above. */
+ COSTS_N_INSNS (4), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult_sf */
+ COSTS_N_INSNS (4), /* fp_mult_df */
+ COSTS_N_INSNS (24), /* fp_div_sf */
+ COSTS_N_INSNS (32), /* fp_div_df */
+ COSTS_N_INSNS (3), /* int_mult_si */
+ COSTS_N_INSNS (4), /* int_mult_di */
+ COSTS_N_INSNS (36), /* int_div_si */
+ COSTS_N_INSNS (68), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* SR71000 */
+ DEFAULT_COSTS
+ },
+ { /* XLR */
+ SOFT_FP_COSTS,
+ COSTS_N_INSNS (8), /* int_mult_si */
+ COSTS_N_INSNS (8), /* int_mult_di */
+ COSTS_N_INSNS (72), /* int_div_si */
+ COSTS_N_INSNS (72), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ },
+ { /* XLP */
+ /* These costs are the same as 5KF above. */
+ COSTS_N_INSNS (4), /* fp_add */
+ COSTS_N_INSNS (4), /* fp_mult_sf */
+ COSTS_N_INSNS (5), /* fp_mult_df */
+ COSTS_N_INSNS (17), /* fp_div_sf */
+ COSTS_N_INSNS (32), /* fp_div_df */
+ COSTS_N_INSNS (4), /* int_mult_si */
+ COSTS_N_INSNS (11), /* int_mult_di */
+ COSTS_N_INSNS (36), /* int_div_si */
+ COSTS_N_INSNS (68), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ }
+};
+
+static rtx mips_find_pic_call_symbol (rtx, rtx, bool);
+static int mips_register_move_cost (enum machine_mode, reg_class_t,
+ reg_class_t);
+static unsigned int mips_function_arg_boundary (enum machine_mode, const_tree);
+
+/* This hash table keeps track of implicit "mips16" and "nomips16" attributes
+ for -mflip_mips16. It maps decl names onto a boolean mode setting. */
+struct GTY (()) mflip_mips16_entry {
+ const char *name;
+ bool mips16_p;
+};
+static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
+
+/* Hash table callbacks for mflip_mips16_htab. */
+
+static hashval_t
+mflip_mips16_htab_hash (const void *entry)
+{
+ return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
+}
+
+static int
+mflip_mips16_htab_eq (const void *entry, const void *name)
+{
+ return strcmp (((const struct mflip_mips16_entry *) entry)->name,
+ (const char *) name) == 0;
+}
+
+/* True if -mflip-mips16 should next add an attribute for the default MIPS16
+ mode, false if it should next add an attribute for the opposite mode. */
+static GTY(()) bool mips16_flipper;
+
+/* DECL is a function that needs a default "mips16" or "nomips16" attribute
+ for -mflip-mips16. Return true if it should use "mips16" and false if
+ it should use "nomips16". */
+
+static bool
+mflip_mips16_use_mips16_p (tree decl)
+{
+ struct mflip_mips16_entry *entry;
+ const char *name;
+ hashval_t hash;
+ void **slot;
+ bool base_is_mips16 = (mips_base_compression_flags & MASK_MIPS16) != 0;
+
+ /* Use the opposite of the command-line setting for anonymous decls. */
+ if (!DECL_NAME (decl))
+ return !base_is_mips16;
+
+ if (!mflip_mips16_htab)
+ mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
+ mflip_mips16_htab_eq, NULL);
+
+ name = IDENTIFIER_POINTER (DECL_NAME (decl));
+ hash = htab_hash_string (name);
+ slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
+ entry = (struct mflip_mips16_entry *) *slot;
+ if (!entry)
+ {
+ mips16_flipper = !mips16_flipper;
+ entry = ggc_alloc_mflip_mips16_entry ();
+ entry->name = name;
+ entry->mips16_p = mips16_flipper ? !base_is_mips16 : base_is_mips16;
+ *slot = entry;
+ }
+ return entry->mips16_p;
+}
+
+/* Predicates to test for presence of "near" and "far"/"long_call"
+ attributes on the given TYPE. */
+
+static bool
+mips_near_type_p (const_tree type)
+{
+ return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
+}
+
+static bool
+mips_far_type_p (const_tree type)
+{
+ return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
+ || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
+}
+
+
+/* Check if the interrupt attribute is set for a function. */
+
+static bool
+mips_interrupt_type_p (tree type)
+{
+ return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type)) != NULL;
+}
+
+/* Check if the attribute to use shadow register set is set for a function. */
+
+static bool
+mips_use_shadow_register_set_p (tree type)
+{
+ return lookup_attribute ("use_shadow_register_set",
+ TYPE_ATTRIBUTES (type)) != NULL;
+}
+
+/* Check if the attribute to keep interrupts masked is set for a function. */
+
+static bool
+mips_keep_interrupts_masked_p (tree type)
+{
+ return lookup_attribute ("keep_interrupts_masked",
+ TYPE_ATTRIBUTES (type)) != NULL;
+}
+
+/* Check if the attribute to use debug exception return is set for
+ a function. */
+
+static bool
+mips_use_debug_exception_return_p (tree type)
+{
+ return lookup_attribute ("use_debug_exception_return",
+ TYPE_ATTRIBUTES (type)) != NULL;
+}
+
+/* Return the set of compression modes that are explicitly required
+ by the attributes in ATTRIBUTES. */
+
+static unsigned int
+mips_get_compress_on_flags (tree attributes)
+{
+ unsigned int flags = 0;
+
+ if (lookup_attribute ("mips16", attributes) != NULL)
+ flags |= MASK_MIPS16;
+
+ if (lookup_attribute ("micromips", attributes) != NULL)
+ flags |= MASK_MICROMIPS;
+
+ return flags;
+}
+
+/* Return the set of compression modes that are explicitly forbidden
+ by the attributes in ATTRIBUTES. */
+
+static unsigned int
+mips_get_compress_off_flags (tree attributes)
+{
+ unsigned int flags = 0;
+
+ if (lookup_attribute ("nocompression", attributes) != NULL)
+ flags |= MASK_MIPS16 | MASK_MICROMIPS;
+
+ if (lookup_attribute ("nomips16", attributes) != NULL)
+ flags |= MASK_MIPS16;
+
+ if (lookup_attribute ("nomicromips", attributes) != NULL)
+ flags |= MASK_MICROMIPS;
+
+ return flags;
+}
+
+/* Return the compression mode that should be used for function DECL.
+ Return the ambient setting if DECL is null. */
+
+static unsigned int
+mips_get_compress_mode (tree decl)
+{
+ unsigned int flags, force_on;
+
+ flags = mips_base_compression_flags;
+ if (decl)
+ {
+ /* Nested functions must use the same frame pointer as their
+ parent and must therefore use the same ISA mode. */
+ tree parent = decl_function_context (decl);
+ if (parent)
+ decl = parent;
+ force_on = mips_get_compress_on_flags (DECL_ATTRIBUTES (decl));
+ if (force_on)
+ return force_on;
+ flags &= ~mips_get_compress_off_flags (DECL_ATTRIBUTES (decl));
+ }
+ return flags;
+}
+
+/* Return the attribute name associated with MASK_MIPS16 and MASK_MICROMIPS
+ flags FLAGS. */
+
+static const char *
+mips_get_compress_on_name (unsigned int flags)
+{
+ if (flags == MASK_MIPS16)
+ return "mips16";
+ return "micromips";
+}
+
+/* Return the attribute name that forbids MASK_MIPS16 and MASK_MICROMIPS
+ flags FLAGS. */
+
+static const char *
+mips_get_compress_off_name (unsigned int flags)
+{
+ if (flags == MASK_MIPS16)
+ return "nomips16";
+ if (flags == MASK_MICROMIPS)
+ return "nomicromips";
+ return "nocompression";
+}
+
+/* Implement TARGET_COMP_TYPE_ATTRIBUTES. */
+
+static int
+mips_comp_type_attributes (const_tree type1, const_tree type2)
+{
+ /* Disallow mixed near/far attributes. */
+ if (mips_far_type_p (type1) && mips_near_type_p (type2))
+ return 0;
+ if (mips_near_type_p (type1) && mips_far_type_p (type2))
+ return 0;
+ return 1;
+}
+
+/* Implement TARGET_INSERT_ATTRIBUTES. */
+
+static void
+mips_insert_attributes (tree decl, tree *attributes)
+{
+ const char *name;
+ unsigned int compression_flags, nocompression_flags;
+
+ /* Check for "mips16" and "nomips16" attributes. */
+ compression_flags = mips_get_compress_on_flags (*attributes);
+ nocompression_flags = mips_get_compress_off_flags (*attributes);
+
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ {
+ if (nocompression_flags)
+ error ("%qs attribute only applies to functions",
+ mips_get_compress_off_name (nocompression_flags));
+
+ if (compression_flags)
+ error ("%qs attribute only applies to functions",
+ mips_get_compress_on_name (nocompression_flags));
+ }
+ else
+ {
+ compression_flags |= mips_get_compress_on_flags (DECL_ATTRIBUTES (decl));
+ nocompression_flags |=
+ mips_get_compress_off_flags (DECL_ATTRIBUTES (decl));
+
+ if (compression_flags && nocompression_flags)
+ error ("%qE cannot have both %qs and %qs attributes",
+ DECL_NAME (decl), mips_get_compress_on_name (compression_flags),
+ mips_get_compress_off_name (nocompression_flags));
+
+ if (compression_flags & MASK_MIPS16
+ && compression_flags & MASK_MICROMIPS)
+ error ("%qE cannot have both %qs and %qs attributes",
+ DECL_NAME (decl), "mips16", "micromips");
+
+ if (TARGET_FLIP_MIPS16
+ && !DECL_ARTIFICIAL (decl)
+ && compression_flags == 0
+ && nocompression_flags == 0)
+ {
+ /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
+ "mips16" attribute, arbitrarily pick one. We must pick the same
+ setting for duplicate declarations of a function. */
+ name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
+ *attributes = tree_cons (get_identifier (name), NULL, *attributes);
+ name = "nomicromips";
+ *attributes = tree_cons (get_identifier (name), NULL, *attributes);
+ }
+ }
+}
+
+/* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
+
+static tree
+mips_merge_decl_attributes (tree olddecl, tree newdecl)
+{
+ unsigned int diff;
+
+ diff = (mips_get_compress_on_flags (DECL_ATTRIBUTES (olddecl))
+ ^ mips_get_compress_on_flags (DECL_ATTRIBUTES (newdecl)));
+ if (diff)
+ error ("%qE redeclared with conflicting %qs attributes",
+ DECL_NAME (newdecl), mips_get_compress_on_name (diff));
+
+ diff = (mips_get_compress_off_flags (DECL_ATTRIBUTES (olddecl))
+ ^ mips_get_compress_off_flags (DECL_ATTRIBUTES (newdecl)));
+ if (diff)
+ error ("%qE redeclared with conflicting %qs attributes",
+ DECL_NAME (newdecl), mips_get_compress_off_name (diff));
+
+ return merge_attributes (DECL_ATTRIBUTES (olddecl),
+ DECL_ATTRIBUTES (newdecl));
+}
+
+/* Implement TARGET_CAN_INLINE_P. */
+
+static bool
+mips_can_inline_p (tree caller, tree callee)
+{
+ if (mips_get_compress_mode (callee) != mips_get_compress_mode (caller))
+ return false;
+ return default_target_can_inline_p (caller, callee);
+}
+
+/* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
+ and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
+
+static void
+mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
+{
+ if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
+ {
+ *base_ptr = XEXP (x, 0);
+ *offset_ptr = INTVAL (XEXP (x, 1));
+ }
+ else
+ {
+ *base_ptr = x;
+ *offset_ptr = 0;
+ }
+}
+
+static unsigned int mips_build_integer (struct mips_integer_op *,
+ unsigned HOST_WIDE_INT);
+
+/* A subroutine of mips_build_integer, with the same interface.
+ Assume that the final action in the sequence should be a left shift. */
+
+static unsigned int
+mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
+{
+ unsigned int i, shift;
+
+ /* Shift VALUE right until its lowest bit is set. Shift arithmetically
+ since signed numbers are easier to load than unsigned ones. */
+ shift = 0;
+ while ((value & 1) == 0)
+ value /= 2, shift++;
+
+ i = mips_build_integer (codes, value);
+ codes[i].code = ASHIFT;
+ codes[i].value = shift;
+ return i + 1;
+}
+
+/* As for mips_build_shift, but assume that the final action will be
+ an IOR or PLUS operation. */
+
+static unsigned int
+mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
+{
+ unsigned HOST_WIDE_INT high;
+ unsigned int i;
+
+ high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
+ if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
+ {
+ /* The constant is too complex to load with a simple LUI/ORI pair,
+ so we want to give the recursive call as many trailing zeros as
+ possible. In this case, we know bit 16 is set and that the
+ low 16 bits form a negative number. If we subtract that number
+ from VALUE, we will clear at least the lowest 17 bits, maybe more. */
+ i = mips_build_integer (codes, CONST_HIGH_PART (value));
+ codes[i].code = PLUS;
+ codes[i].value = CONST_LOW_PART (value);
+ }
+ else
+ {
+ /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
+ bits gives a value with at least 17 trailing zeros. */
+ i = mips_build_integer (codes, high);
+ codes[i].code = IOR;
+ codes[i].value = value & 0xffff;
+ }
+ return i + 1;
+}
+
+/* Fill CODES with a sequence of rtl operations to load VALUE.
+ Return the number of operations needed. */
+
+static unsigned int
+mips_build_integer (struct mips_integer_op *codes,
+ unsigned HOST_WIDE_INT value)
+{
+ if (SMALL_OPERAND (value)
+ || SMALL_OPERAND_UNSIGNED (value)
+ || LUI_OPERAND (value))
+ {
+ /* The value can be loaded with a single instruction. */
+ codes[0].code = UNKNOWN;
+ codes[0].value = value;
+ return 1;
+ }
+ else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
+ {
+ /* Either the constant is a simple LUI/ORI combination or its
+ lowest bit is set. We don't want to shift in this case. */
+ return mips_build_lower (codes, value);
+ }
+ else if ((value & 0xffff) == 0)
+ {
+ /* The constant will need at least three actions. The lowest
+ 16 bits are clear, so the final action will be a shift. */
+ return mips_build_shift (codes, value);
+ }
+ else
+ {
+ /* The final action could be a shift, add or inclusive OR.
+ Rather than use a complex condition to select the best
+ approach, try both mips_build_shift and mips_build_lower
+ and pick the one that gives the shortest sequence.
+ Note that this case is only used once per constant. */
+ struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
+ unsigned int cost, alt_cost;
+
+ cost = mips_build_shift (codes, value);
+ alt_cost = mips_build_lower (alt_codes, value);
+ if (alt_cost < cost)
+ {
+ memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
+ cost = alt_cost;
+ }
+ return cost;
+ }
+}
+
+/* Implement TARGET_LEGITIMATE_CONSTANT_P. */
+
+static bool
+mips_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
+{
+ return mips_const_insns (x) > 0;
+}
+
+/* Return a SYMBOL_REF for a MIPS16 function called NAME. */
+
+static rtx
+mips16_stub_function (const char *name)
+{
+ rtx x;
+
+ x = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
+ SYMBOL_REF_FLAGS (x) |= (SYMBOL_FLAG_EXTERNAL | SYMBOL_FLAG_FUNCTION);
+ return x;
+}
+
+/* Return a legitimate call address for STUB, given that STUB is a MIPS16
+ support function. */
+
+static rtx
+mips16_stub_call_address (mips_one_only_stub *stub)
+{
+ rtx fn = mips16_stub_function (stub->get_name ());
+ SYMBOL_REF_FLAGS (fn) |= SYMBOL_FLAG_LOCAL;
+ if (!call_insn_operand (fn, VOIDmode))
+ fn = force_reg (Pmode, fn);
+ return fn;
+}
+
+/* A stub for moving the thread pointer into TLS_GET_TP_REGNUM. */
+
+class mips16_rdhwr_one_only_stub : public mips_one_only_stub
+{
+ virtual const char *get_name ();
+ virtual void output_body ();
+};
+
+const char *
+mips16_rdhwr_one_only_stub::get_name ()
+{
+ return "__mips16_rdhwr";
+}
+
+void
+mips16_rdhwr_one_only_stub::output_body ()
+{
+ fprintf (asm_out_file,
+ "\t.set\tpush\n"
+ "\t.set\tmips32r2\n"
+ "\t.set\tnoreorder\n"
+ "\trdhwr\t$3,$29\n"
+ "\t.set\tpop\n"
+ "\tj\t$31\n");
+}
+
+/* A stub for moving the FCSR into GET_FCSR_REGNUM. */
+class mips16_get_fcsr_one_only_stub : public mips_one_only_stub
+{
+ virtual const char *get_name ();
+ virtual void output_body ();
+};
+
+const char *
+mips16_get_fcsr_one_only_stub::get_name ()
+{
+ return "__mips16_get_fcsr";
+}
+
+void
+mips16_get_fcsr_one_only_stub::output_body ()
+{
+ fprintf (asm_out_file,
+ "\tcfc1\t%s,$31\n"
+ "\tj\t$31\n", reg_names[GET_FCSR_REGNUM]);
+}
+
+/* A stub for moving SET_FCSR_REGNUM into the FCSR. */
+class mips16_set_fcsr_one_only_stub : public mips_one_only_stub
+{
+ virtual const char *get_name ();
+ virtual void output_body ();
+};
+
+const char *
+mips16_set_fcsr_one_only_stub::get_name ()
+{
+ return "__mips16_set_fcsr";
+}
+
+void
+mips16_set_fcsr_one_only_stub::output_body ()
+{
+ fprintf (asm_out_file,
+ "\tctc1\t%s,$31\n"
+ "\tj\t$31\n", reg_names[SET_FCSR_REGNUM]);
+}
+
+/* Return true if symbols of type TYPE require a GOT access. */
+
+static bool
+mips_got_symbol_type_p (enum mips_symbol_type type)
+{
+ switch (type)
+ {
+ case SYMBOL_GOT_PAGE_OFST:
+ case SYMBOL_GOT_DISP:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* Return true if X is a thread-local symbol. */
+
+static bool
+mips_tls_symbol_p (rtx x)
+{
+ return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
+}
+
+/* Return true if SYMBOL_REF X is associated with a global symbol
+ (in the STB_GLOBAL sense). */
+
+static bool
+mips_global_symbol_p (const_rtx x)
+{
+ const_tree decl = SYMBOL_REF_DECL (x);
+
+ if (!decl)
+ return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x);
+
+ /* Weakref symbols are not TREE_PUBLIC, but their targets are global
+ or weak symbols. Relocations in the object file will be against
+ the target symbol, so it's that symbol's binding that matters here. */
+ return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
+}
+
+/* Return true if function X is a libgcc MIPS16 stub function. */
+
+static bool
+mips16_stub_function_p (const_rtx x)
+{
+ return (GET_CODE (x) == SYMBOL_REF
+ && strncmp (XSTR (x, 0), "__mips16_", 9) == 0);
+}
+
+/* Return true if function X is a locally-defined and locally-binding
+ MIPS16 function. */
+
+static bool
+mips16_local_function_p (const_rtx x)
+{
+ return (GET_CODE (x) == SYMBOL_REF
+ && SYMBOL_REF_LOCAL_P (x)
+ && !SYMBOL_REF_EXTERNAL_P (x)
+ && (mips_get_compress_mode (SYMBOL_REF_DECL (x)) & MASK_MIPS16));
+}
+
+/* Return true if SYMBOL_REF X binds locally. */
+
+static bool
+mips_symbol_binds_local_p (const_rtx x)
+{
+ return (SYMBOL_REF_DECL (x)
+ ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
+ : SYMBOL_REF_LOCAL_P (x));
+}
+
+/* Return true if rtx constants of mode MODE should be put into a small
+ data section. */
+
+static bool
+mips_rtx_constant_in_small_data_p (enum machine_mode mode)
+{
+ return (!TARGET_EMBEDDED_DATA
+ && TARGET_LOCAL_SDATA
+ && GET_MODE_SIZE (mode) <= mips_small_data_threshold);
+}
+
+/* Return true if X should not be moved directly into register $25.
+ We need this because many versions of GAS will treat "la $25,foo" as
+ part of a call sequence and so allow a global "foo" to be lazily bound. */
+
+bool
+mips_dangerous_for_la25_p (rtx x)
+{
+ return (!TARGET_EXPLICIT_RELOCS
+ && TARGET_USE_GOT
+ && GET_CODE (x) == SYMBOL_REF
+ && mips_global_symbol_p (x));
+}
+
+/* Return true if calls to X might need $25 to be valid on entry. */
+
+bool
+mips_use_pic_fn_addr_reg_p (const_rtx x)
+{
+ if (!TARGET_USE_PIC_FN_ADDR_REG)
+ return false;
+
+ /* MIPS16 stub functions are guaranteed not to use $25. */
+ if (mips16_stub_function_p (x))
+ return false;
+
+ if (GET_CODE (x) == SYMBOL_REF)
+ {
+ /* If PLTs and copy relocations are available, the static linker
+ will make sure that $25 is valid on entry to the target function. */
+ if (TARGET_ABICALLS_PIC0)
+ return false;
+
+ /* Locally-defined functions use absolute accesses to set up
+ the global pointer. */
+ if (TARGET_ABSOLUTE_ABICALLS
+ && mips_symbol_binds_local_p (x)
+ && !SYMBOL_REF_EXTERNAL_P (x))
+ return false;
+ }
+
+ return true;
+}
+
+/* Return the method that should be used to access SYMBOL_REF or
+ LABEL_REF X in context CONTEXT. */
+
+static enum mips_symbol_type
+mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
+{
+ if (TARGET_RTP_PIC)
+ return SYMBOL_GOT_DISP;
+
+ if (GET_CODE (x) == LABEL_REF)
+ {
+ /* Only return SYMBOL_PC_RELATIVE if we are generating MIPS16
+ code and if we know that the label is in the current function's
+ text section. LABEL_REFs are used for jump tables as well as
+ text labels, so we must check whether jump tables live in the
+ text section. */
+ if (TARGET_MIPS16_SHORT_JUMP_TABLES
+ && !LABEL_REF_NONLOCAL_P (x))
+ return SYMBOL_PC_RELATIVE;
+
+ if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
+ return SYMBOL_GOT_PAGE_OFST;
+
+ return SYMBOL_ABSOLUTE;
+ }
+
+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
+
+ if (SYMBOL_REF_TLS_MODEL (x))
+ return SYMBOL_TLS;
+
+ if (CONSTANT_POOL_ADDRESS_P (x))
+ {
+ if (TARGET_MIPS16_TEXT_LOADS)
+ return SYMBOL_PC_RELATIVE;
+
+ if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
+ return SYMBOL_PC_RELATIVE;
+
+ if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
+ return SYMBOL_GP_RELATIVE;
+ }
+
+ /* Do not use small-data accesses for weak symbols; they may end up
+ being zero. */
+ if (TARGET_GPOPT && SYMBOL_REF_SMALL_P (x) && !SYMBOL_REF_WEAK (x))
+ return SYMBOL_GP_RELATIVE;
+
+ /* Don't use GOT accesses for locally-binding symbols when -mno-shared
+ is in effect. */
+ if (TARGET_ABICALLS_PIC2
+ && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
+ {
+ /* There are three cases to consider:
+
+ - o32 PIC (either with or without explicit relocs)
+ - n32/n64 PIC without explicit relocs
+ - n32/n64 PIC with explicit relocs
+
+ In the first case, both local and global accesses will use an
+ R_MIPS_GOT16 relocation. We must correctly predict which of
+ the two semantics (local or global) the assembler and linker
+ will apply. The choice depends on the symbol's binding rather
+ than its visibility.
+
+ In the second case, the assembler will not use R_MIPS_GOT16
+ relocations, but it chooses between local and global accesses
+ in the same way as for o32 PIC.
+
+ In the third case we have more freedom since both forms of
+ access will work for any kind of symbol. However, there seems
+ little point in doing things differently. */
+ if (mips_global_symbol_p (x))
+ return SYMBOL_GOT_DISP;
+
+ return SYMBOL_GOT_PAGE_OFST;
+ }
+
+ return SYMBOL_ABSOLUTE;
+}
+
+/* Classify the base of symbolic expression X, given that X appears in
+ context CONTEXT. */
+
+static enum mips_symbol_type
+mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
+{
+ rtx offset;
+
+ split_const (x, &x, &offset);
+ if (UNSPEC_ADDRESS_P (x))
+ return UNSPEC_ADDRESS_TYPE (x);
+
+ return mips_classify_symbol (x, context);
+}
+
+/* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
+ is the alignment in bytes of SYMBOL_REF X. */
+
+static bool
+mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
+{
+ HOST_WIDE_INT align;
+
+ align = SYMBOL_REF_DECL (x) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x)) : 1;
+ return IN_RANGE (offset, 0, align - 1);
+}
+
+/* Return true if X is a symbolic constant that can be used in context
+ CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
+
+bool
+mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
+ enum mips_symbol_type *symbol_type)
+{
+ rtx offset;
+
+ split_const (x, &x, &offset);
+ if (UNSPEC_ADDRESS_P (x))
+ {
+ *symbol_type = UNSPEC_ADDRESS_TYPE (x);
+ x = UNSPEC_ADDRESS (x);
+ }
+ else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
+ {
+ *symbol_type = mips_classify_symbol (x, context);
+ if (*symbol_type == SYMBOL_TLS)
+ return false;
+ }
+ else
+ return false;
+
+ if (offset == const0_rtx)
+ return true;
+
+ /* Check whether a nonzero offset is valid for the underlying
+ relocations. */
+ switch (*symbol_type)
+ {
+ case SYMBOL_ABSOLUTE:
+ case SYMBOL_64_HIGH:
+ case SYMBOL_64_MID:
+ case SYMBOL_64_LOW:
+ /* If the target has 64-bit pointers and the object file only
+ supports 32-bit symbols, the values of those symbols will be
+ sign-extended. In this case we can't allow an arbitrary offset
+ in case the 32-bit value X + OFFSET has a different sign from X. */
+ if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
+ return offset_within_block_p (x, INTVAL (offset));
+
+ /* In other cases the relocations can handle any offset. */
+ return true;
+
+ case SYMBOL_PC_RELATIVE:
+ /* Allow constant pool references to be converted to LABEL+CONSTANT.
+ In this case, we no longer have access to the underlying constant,
+ but the original symbol-based access was known to be valid. */
+ if (GET_CODE (x) == LABEL_REF)
+ return true;
+
+ /* Fall through. */
+
+ case SYMBOL_GP_RELATIVE:
+ /* Make sure that the offset refers to something within the
+ same object block. This should guarantee that the final
+ PC- or GP-relative offset is within the 16-bit limit. */
+ return offset_within_block_p (x, INTVAL (offset));
+
+ case SYMBOL_GOT_PAGE_OFST:
+ case SYMBOL_GOTOFF_PAGE:
+ /* If the symbol is global, the GOT entry will contain the symbol's
+ address, and we will apply a 16-bit offset after loading it.
+ If the symbol is local, the linker should provide enough local
+ GOT entries for a 16-bit offset, but larger offsets may lead
+ to GOT overflow. */
+ return SMALL_INT (offset);
+
+ case SYMBOL_TPREL:
+ case SYMBOL_DTPREL:
+ /* There is no carry between the HI and LO REL relocations, so the
+ offset is only valid if we know it won't lead to such a carry. */
+ return mips_offset_within_alignment_p (x, INTVAL (offset));
+
+ case SYMBOL_GOT_DISP:
+ case SYMBOL_GOTOFF_DISP:
+ case SYMBOL_GOTOFF_CALL:
+ case SYMBOL_GOTOFF_LOADGP:
+ case SYMBOL_TLSGD:
+ case SYMBOL_TLSLDM:
+ case SYMBOL_GOTTPREL:
+ case SYMBOL_TLS:
+ case SYMBOL_HALF:
+ return false;
+ }
+ gcc_unreachable ();
+}
+
+/* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
+ single instruction. We rely on the fact that, in the worst case,
+ all instructions involved in a MIPS16 address calculation are usually
+ extended ones. */
+
+static int
+mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
+{
+ if (mips_use_pcrel_pool_p[(int) type])
+ {
+ if (mode == MAX_MACHINE_MODE)
+ /* LEAs will be converted into constant-pool references by
+ mips_reorg. */
+ type = SYMBOL_PC_RELATIVE;
+ else
+ /* The constant must be loaded and then dereferenced. */
+ return 0;
+ }
+
+ switch (type)
+ {
+ case SYMBOL_ABSOLUTE:
+ /* When using 64-bit symbols, we need 5 preparatory instructions,
+ such as:
+
+ lui $at,%highest(symbol)
+ daddiu $at,$at,%higher(symbol)
+ dsll $at,$at,16
+ daddiu $at,$at,%hi(symbol)
+ dsll $at,$at,16
+
+ The final address is then $at + %lo(symbol). With 32-bit
+ symbols we just need a preparatory LUI for normal mode and
+ a preparatory LI and SLL for MIPS16. */
+ return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
+
+ case SYMBOL_GP_RELATIVE:
+ /* Treat GP-relative accesses as taking a single instruction on
+ MIPS16 too; the copy of $gp can often be shared. */
+ return 1;
+
+ case SYMBOL_PC_RELATIVE:
+ /* PC-relative constants can be only be used with ADDIUPC,
+ DADDIUPC, LWPC and LDPC. */
+ if (mode == MAX_MACHINE_MODE
+ || GET_MODE_SIZE (mode) == 4
+ || GET_MODE_SIZE (mode) == 8)
+ return 1;
+
+ /* The constant must be loaded using ADDIUPC or DADDIUPC first. */
+ return 0;
+
+ case SYMBOL_GOT_DISP:
+ /* The constant will have to be loaded from the GOT before it
+ is used in an address. */
+ if (mode != MAX_MACHINE_MODE)
+ return 0;
+
+ /* Fall through. */
+
+ case SYMBOL_GOT_PAGE_OFST:
+ /* Unless -funit-at-a-time is in effect, we can't be sure whether the
+ local/global classification is accurate. The worst cases are:
+
+ (1) For local symbols when generating o32 or o64 code. The assembler
+ will use:
+
+ lw $at,%got(symbol)
+ nop
+
+ ...and the final address will be $at + %lo(symbol).
+
+ (2) For global symbols when -mxgot. The assembler will use:
+
+ lui $at,%got_hi(symbol)
+ (d)addu $at,$at,$gp
+
+ ...and the final address will be $at + %got_lo(symbol). */
+ return 3;
+
+ case SYMBOL_GOTOFF_PAGE:
+ case SYMBOL_GOTOFF_DISP:
+ case SYMBOL_GOTOFF_CALL:
+ case SYMBOL_GOTOFF_LOADGP:
+ case SYMBOL_64_HIGH:
+ case SYMBOL_64_MID:
+ case SYMBOL_64_LOW:
+ case SYMBOL_TLSGD:
+ case SYMBOL_TLSLDM:
+ case SYMBOL_DTPREL:
+ case SYMBOL_GOTTPREL:
+ case SYMBOL_TPREL:
+ case SYMBOL_HALF:
+ /* A 16-bit constant formed by a single relocation, or a 32-bit
+ constant formed from a high 16-bit relocation and a low 16-bit
+ relocation. Use mips_split_p to determine which. 32-bit
+ constants need an "lui; addiu" sequence for normal mode and
+ an "li; sll; addiu" sequence for MIPS16 mode. */
+ return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
+
+ case SYMBOL_TLS:
+ /* We don't treat a bare TLS symbol as a constant. */
+ return 0;
+ }
+ gcc_unreachable ();
+}
+
+/* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
+ to load symbols of type TYPE into a register. Return 0 if the given
+ type of symbol cannot be used as an immediate operand.
+
+ Otherwise, return the number of instructions needed to load or store
+ values of mode MODE to or from addresses of type TYPE. Return 0 if
+ the given type of symbol is not valid in addresses.
+
+ In both cases, instruction counts are based off BASE_INSN_LENGTH. */
+
+static int
+mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
+{
+ return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
+}
+
+/* A for_each_rtx callback. Stop the search if *X references a
+ thread-local symbol. */
+
+static int
+mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
+{
+ return mips_tls_symbol_p (*x);
+}
+
+/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
+
+static bool
+mips_cannot_force_const_mem (enum machine_mode mode, rtx x)
+{
+ enum mips_symbol_type type;
+ rtx base, offset;
+
+ /* There is no assembler syntax for expressing an address-sized
+ high part. */
+ if (GET_CODE (x) == HIGH)
+ return true;
+
+ /* As an optimization, reject constants that mips_legitimize_move
+ can expand inline.
+
+ Suppose we have a multi-instruction sequence that loads constant C
+ into register R. If R does not get allocated a hard register, and
+ R is used in an operand that allows both registers and memory
+ references, reload will consider forcing C into memory and using
+ one of the instruction's memory alternatives. Returning false
+ here will force it to use an input reload instead. */
+ if (CONST_INT_P (x) && mips_legitimate_constant_p (mode, x))
+ return true;
+
+ split_const (x, &base, &offset);
+ if (mips_symbolic_constant_p (base, SYMBOL_CONTEXT_LEA, &type))
+ {
+ /* See whether we explicitly want these symbols in the pool. */
+ if (mips_use_pcrel_pool_p[(int) type])
+ return false;
+
+ /* The same optimization as for CONST_INT. */
+ if (SMALL_INT (offset) && mips_symbol_insns (type, MAX_MACHINE_MODE) > 0)
+ return true;
+
+ /* If MIPS16 constant pools live in the text section, they should
+ not refer to anything that might need run-time relocation. */
+ if (TARGET_MIPS16_PCREL_LOADS && mips_got_symbol_type_p (type))
+ return true;
+ }
+
+ /* TLS symbols must be computed by mips_legitimize_move. */
+ if (for_each_rtx (&x, &mips_tls_symbol_ref_1, NULL))
+ return true;
+
+ return false;
+}
+
+/* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
+ constants when we're using a per-function constant pool. */
+
+static bool
+mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_rtx x ATTRIBUTE_UNUSED)
+{
+ return !TARGET_MIPS16_PCREL_LOADS;
+}
+
+/* Return true if register REGNO is a valid base register for mode MODE.
+ STRICT_P is true if REG_OK_STRICT is in effect. */
+
+int
+mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode,
+ bool strict_p)
+{
+ if (!HARD_REGISTER_NUM_P (regno))
+ {
+ if (!strict_p)
+ return true;
+ regno = reg_renumber[regno];
+ }
+
+ /* These fake registers will be eliminated to either the stack or
+ hard frame pointer, both of which are usually valid base registers.
+ Reload deals with the cases where the eliminated form isn't valid. */
+ if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
+ return true;
+
+ /* In MIPS16 mode, the stack pointer can only address word and doubleword
+ values, nothing smaller. There are two problems here:
+
+ (a) Instantiating virtual registers can introduce new uses of the
+ stack pointer. If these virtual registers are valid addresses,
+ the stack pointer should be too.
+
+ (b) Most uses of the stack pointer are not made explicit until
+ FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
+ We don't know until that stage whether we'll be eliminating to the
+ stack pointer (which needs the restriction) or the hard frame
+ pointer (which doesn't).
+
+ All in all, it seems more consistent to only enforce this restriction
+ during and after reload. */
+ if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
+ return !strict_p || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
+
+ return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
+}
+
+/* Return true if X is a valid base register for mode MODE.
+ STRICT_P is true if REG_OK_STRICT is in effect. */
+
+static bool
+mips_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
+{
+ if (!strict_p && GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+
+ return (REG_P (x)
+ && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
+}
+
+/* Return true if, for every base register BASE_REG, (plus BASE_REG X)
+ can address a value of mode MODE. */
+
+static bool
+mips_valid_offset_p (rtx x, enum machine_mode mode)
+{
+ /* Check that X is a signed 16-bit number. */
+ if (!const_arith_operand (x, Pmode))
+ return false;
+
+ /* We may need to split multiword moves, so make sure that every word
+ is accessible. */
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
+ return false;
+
+ return true;
+}
+
+/* Return true if a LO_SUM can address a value of mode MODE when the
+ LO_SUM symbol has type SYMBOL_TYPE. */
+
+static bool
+mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, enum machine_mode mode)
+{
+ /* Check that symbols of type SYMBOL_TYPE can be used to access values
+ of mode MODE. */
+ if (mips_symbol_insns (symbol_type, mode) == 0)
+ return false;
+
+ /* Check that there is a known low-part relocation. */
+ if (mips_lo_relocs[symbol_type] == NULL)
+ return false;
+
+ /* We may need to split multiword moves, so make sure that each word
+ can be accessed without inducing a carry. This is mainly needed
+ for o64, which has historically only guaranteed 64-bit alignment
+ for 128-bit types. */
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
+ return false;
+
+ return true;
+}
+
+/* Return true if X is a valid address for machine mode MODE. If it is,
+ fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
+ effect. */
+
+static bool
+mips_classify_address (struct mips_address_info *info, rtx x,
+ enum machine_mode mode, bool strict_p)
+{
+ switch (GET_CODE (x))
+ {
+ case REG:
+ case SUBREG:
+ info->type = ADDRESS_REG;
+ info->reg = x;
+ info->offset = const0_rtx;
+ return mips_valid_base_register_p (info->reg, mode, strict_p);
+
+ case PLUS:
+ info->type = ADDRESS_REG;
+ info->reg = XEXP (x, 0);
+ info->offset = XEXP (x, 1);
+ return (mips_valid_base_register_p (info->reg, mode, strict_p)
+ && mips_valid_offset_p (info->offset, mode));
+
+ case LO_SUM:
+ info->type = ADDRESS_LO_SUM;
+ info->reg = XEXP (x, 0);
+ info->offset = XEXP (x, 1);
+ /* We have to trust the creator of the LO_SUM to do something vaguely
+ sane. Target-independent code that creates a LO_SUM should also
+ create and verify the matching HIGH. Target-independent code that
+ adds an offset to a LO_SUM must prove that the offset will not
+ induce a carry. Failure to do either of these things would be
+ a bug, and we are not required to check for it here. The MIPS
+ backend itself should only create LO_SUMs for valid symbolic
+ constants, with the high part being either a HIGH or a copy
+ of _gp. */
+ info->symbol_type
+ = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
+ return (mips_valid_base_register_p (info->reg, mode, strict_p)
+ && mips_valid_lo_sum_p (info->symbol_type, mode));
+
+ case CONST_INT:
+ /* Small-integer addresses don't occur very often, but they
+ are legitimate if $0 is a valid base register. */
+ info->type = ADDRESS_CONST_INT;
+ return !TARGET_MIPS16 && SMALL_INT (x);
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ info->type = ADDRESS_SYMBOLIC;
+ return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
+ &info->symbol_type)
+ && mips_symbol_insns (info->symbol_type, mode) > 0
+ && !mips_split_p[info->symbol_type]);
+
+ default:
+ return false;
+ }
+}
+
+/* Implement TARGET_LEGITIMATE_ADDRESS_P. */
+
+static bool
+mips_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
+{
+ struct mips_address_info addr;
+
+ return mips_classify_address (&addr, x, mode, strict_p);
+}
+
+/* Return true if X is a legitimate $sp-based address for mode MDOE. */
+
+bool
+mips_stack_address_p (rtx x, enum machine_mode mode)
+{
+ struct mips_address_info addr;
+
+ return (mips_classify_address (&addr, x, mode, false)
+ && addr.type == ADDRESS_REG
+ && addr.reg == stack_pointer_rtx);
+}
+
+/* Return true if ADDR matches the pattern for the LWXS load scaled indexed
+ address instruction. Note that such addresses are not considered
+ legitimate in the TARGET_LEGITIMATE_ADDRESS_P sense, because their use
+ is so restricted. */
+
+static bool
+mips_lwxs_address_p (rtx addr)
+{
+ if (ISA_HAS_LWXS
+ && GET_CODE (addr) == PLUS
+ && REG_P (XEXP (addr, 1)))
+ {
+ rtx offset = XEXP (addr, 0);
+ if (GET_CODE (offset) == MULT
+ && REG_P (XEXP (offset, 0))
+ && CONST_INT_P (XEXP (offset, 1))
+ && INTVAL (XEXP (offset, 1)) == 4)
+ return true;
+ }
+ return false;
+}
+
+/* Return true if ADDR matches the pattern for the L{B,H,W,D}{,U}X load
+ indexed address instruction. Note that such addresses are
+ not considered legitimate in the TARGET_LEGITIMATE_ADDRESS_P
+ sense, because their use is so restricted. */
+
+static bool
+mips_lx_address_p (rtx addr, enum machine_mode mode)
+{
+ if (GET_CODE (addr) != PLUS
+ || !REG_P (XEXP (addr, 0))
+ || !REG_P (XEXP (addr, 1)))
+ return false;
+ if (ISA_HAS_LBX && mode == QImode)
+ return true;
+ if (ISA_HAS_LHX && mode == HImode)
+ return true;
+ if (ISA_HAS_LWX && mode == SImode)
+ return true;
+ if (ISA_HAS_LDX && mode == DImode)
+ return true;
+ return false;
+}
+
+/* Return true if a value at OFFSET bytes from base register BASE can be
+ accessed using an unextended MIPS16 instruction. MODE is the mode of
+ the value.
+
+ Usually the offset in an unextended instruction is a 5-bit field.
+ The offset is unsigned and shifted left once for LH and SH, twice
+ for LW and SW, and so on. An exception is LWSP and SWSP, which have
+ an 8-bit immediate field that's shifted left twice. */
+
+static bool
+mips16_unextended_reference_p (enum machine_mode mode, rtx base,
+ unsigned HOST_WIDE_INT offset)
+{
+ if (mode != BLKmode && offset % GET_MODE_SIZE (mode) == 0)
+ {
+ if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
+ return offset < 256U * GET_MODE_SIZE (mode);
+ return offset < 32U * GET_MODE_SIZE (mode);
+ }
+ return false;
+}
+
+/* Return the number of instructions needed to load or store a value
+ of mode MODE at address X, assuming that BASE_INSN_LENGTH is the
+ length of one instruction. Return 0 if X isn't valid for MODE.
+ Assume that multiword moves may need to be split into word moves
+ if MIGHT_SPLIT_P, otherwise assume that a single load or store is
+ enough. */
+
+int
+mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
+{
+ struct mips_address_info addr;
+ int factor;
+
+ /* BLKmode is used for single unaligned loads and stores and should
+ not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
+ meaningless, so we have to single it out as a special case one way
+ or the other.) */
+ if (mode != BLKmode && might_split_p)
+ factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+ else
+ factor = 1;
+
+ if (mips_classify_address (&addr, x, mode, false))
+ switch (addr.type)
+ {
+ case ADDRESS_REG:
+ if (TARGET_MIPS16
+ && !mips16_unextended_reference_p (mode, addr.reg,
+ UINTVAL (addr.offset)))
+ return factor * 2;
+ return factor;
+
+ case ADDRESS_LO_SUM:
+ return TARGET_MIPS16 ? factor * 2 : factor;
+
+ case ADDRESS_CONST_INT:
+ return factor;
+
+ case ADDRESS_SYMBOLIC:
+ return factor * mips_symbol_insns (addr.symbol_type, mode);
+ }
+ return 0;
+}
+
+/* Return true if X fits within an unsigned field of BITS bits that is
+ shifted left SHIFT bits before being used. */
+
+bool
+mips_unsigned_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0)
+{
+ return (x & ((1 << shift) - 1)) == 0 && x < ((unsigned) 1 << (shift + bits));
+}
+
+/* Return true if X fits within a signed field of BITS bits that is
+ shifted left SHIFT bits before being used. */
+
+bool
+mips_signed_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0)
+{
+ x += 1 << (bits + shift - 1);
+ return mips_unsigned_immediate_p (x, bits, shift);
+}
+
+/* Return true if X is legitimate for accessing values of mode MODE,
+ if it is based on a MIPS16 register, and if the offset satisfies
+ OFFSET_PREDICATE. */
+
+bool
+m16_based_address_p (rtx x, enum machine_mode mode,
+ insn_operand_predicate_fn offset_predicate)
+{
+ struct mips_address_info addr;
+
+ return (mips_classify_address (&addr, x, mode, false)
+ && addr.type == ADDRESS_REG
+ && M16_REG_P (REGNO (addr.reg))
+ && offset_predicate (addr.offset, mode));
+}
+
+/* Return true if X is a legitimate address that conforms to the requirements
+ for a microMIPS LWSP or SWSP insn. */
+
+bool
+lwsp_swsp_address_p (rtx x, enum machine_mode mode)
+{
+ struct mips_address_info addr;
+
+ return (mips_classify_address (&addr, x, mode, false)
+ && addr.type == ADDRESS_REG
+ && REGNO (addr.reg) == STACK_POINTER_REGNUM
+ && uw5_operand (addr.offset, mode));
+}
+
+/* Return true if X is a legitimate address with a 12-bit offset.
+ MODE is the mode of the value being accessed. */
+
+bool
+umips_12bit_offset_address_p (rtx x, enum machine_mode mode)
+{
+ struct mips_address_info addr;
+
+ return (mips_classify_address (&addr, x, mode, false)
+ && addr.type == ADDRESS_REG
+ && CONST_INT_P (addr.offset)
+ && UMIPS_12BIT_OFFSET_P (INTVAL (addr.offset)));
+}
+
+/* Return the number of instructions needed to load constant X,
+ assuming that BASE_INSN_LENGTH is the length of one instruction.
+ Return 0 if X isn't a valid constant. */
+
+int
+mips_const_insns (rtx x)
+{
+ struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
+ enum mips_symbol_type symbol_type;
+ rtx offset;
+
+ switch (GET_CODE (x))
+ {
+ case HIGH:
+ if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
+ &symbol_type)
+ || !mips_split_p[symbol_type])
+ return 0;
+
+ /* This is simply an LUI for normal mode. It is an extended
+ LI followed by an extended SLL for MIPS16. */
+ return TARGET_MIPS16 ? 4 : 1;
+
+ case CONST_INT:
+ if (TARGET_MIPS16)
+ /* Unsigned 8-bit constants can be loaded using an unextended
+ LI instruction. Unsigned 16-bit constants can be loaded
+ using an extended LI. Negative constants must be loaded
+ using LI and then negated. */
+ return (IN_RANGE (INTVAL (x), 0, 255) ? 1
+ : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
+ : IN_RANGE (-INTVAL (x), 0, 255) ? 2
+ : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
+ : 0);
+
+ return mips_build_integer (codes, INTVAL (x));
+
+ case CONST_DOUBLE:
+ case CONST_VECTOR:
+ /* Allow zeros for normal mode, where we can use $0. */
+ return !TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
+
+ case CONST:
+ if (CONST_GP_P (x))
+ return 1;
+
+ /* See if we can refer to X directly. */
+ if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
+ return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
+
+ /* Otherwise try splitting the constant into a base and offset.
+ If the offset is a 16-bit value, we can load the base address
+ into a register and then use (D)ADDIU to add in the offset.
+ If the offset is larger, we can load the base and offset
+ into separate registers and add them together with (D)ADDU.
+ However, the latter is only possible before reload; during
+ and after reload, we must have the option of forcing the
+ constant into the pool instead. */
+ split_const (x, &x, &offset);
+ if (offset != 0)
+ {
+ int n = mips_const_insns (x);
+ if (n != 0)
+ {
+ if (SMALL_INT (offset))
+ return n + 1;
+ else if (!targetm.cannot_force_const_mem (GET_MODE (x), x))
+ return n + 1 + mips_build_integer (codes, INTVAL (offset));
+ }
+ }
+ return 0;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
+ MAX_MACHINE_MODE);
+
+ default:
+ return 0;
+ }
+}
+
+/* X is a doubleword constant that can be handled by splitting it into
+ two words and loading each word separately. Return the number of
+ instructions required to do this, assuming that BASE_INSN_LENGTH
+ is the length of one instruction. */
+
+int
+mips_split_const_insns (rtx x)
+{
+ unsigned int low, high;
+
+ low = mips_const_insns (mips_subword (x, false));
+ high = mips_const_insns (mips_subword (x, true));
+ gcc_assert (low > 0 && high > 0);
+ return low + high;
+}
+
+/* Return the number of instructions needed to implement INSN,
+ given that it loads from or stores to MEM. Assume that
+ BASE_INSN_LENGTH is the length of one instruction. */
+
+int
+mips_load_store_insns (rtx mem, rtx insn)
+{
+ enum machine_mode mode;
+ bool might_split_p;
+ rtx set;
+
+ gcc_assert (MEM_P (mem));
+ mode = GET_MODE (mem);
+
+ /* Try to prove that INSN does not need to be split. */
+ might_split_p = GET_MODE_SIZE (mode) > UNITS_PER_WORD;
+ if (might_split_p)
+ {
+ set = single_set (insn);
+ if (set && !mips_split_move_insn_p (SET_DEST (set), SET_SRC (set), insn))
+ might_split_p = false;
+ }
+
+ return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
+}
+
+/* Return the number of instructions needed for an integer division,
+ assuming that BASE_INSN_LENGTH is the length of one instruction. */
+
+int
+mips_idiv_insns (void)
+{
+ int count;
+
+ count = 1;
+ if (TARGET_CHECK_ZERO_DIV)
+ {
+ if (GENERATE_DIVIDE_TRAPS)
+ count++;
+ else
+ count += 2;
+ }
+
+ if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
+ count++;
+ return count;
+}
+
+/* Emit a move from SRC to DEST. Assume that the move expanders can
+ handle all moves if !can_create_pseudo_p (). The distinction is
+ important because, unlike emit_move_insn, the move expanders know
+ how to force Pmode objects into the constant pool even when the
+ constant pool address is not itself legitimate. */
+
+rtx
+mips_emit_move (rtx dest, rtx src)
+{
+ return (can_create_pseudo_p ()
+ ? emit_move_insn (dest, src)
+ : emit_move_insn_1 (dest, src));
+}
+
+/* Emit a move from SRC to DEST, splitting compound moves into individual
+ instructions. SPLIT_TYPE is the type of split to perform. */
+
+static void
+mips_emit_move_or_split (rtx dest, rtx src, enum mips_split_type split_type)
+{
+ if (mips_split_move_p (dest, src, split_type))
+ mips_split_move (dest, src, split_type);
+ else
+ mips_emit_move (dest, src);
+}
+
+/* Emit an instruction of the form (set TARGET (CODE OP0)). */
+
+static void
+mips_emit_unary (enum rtx_code code, rtx target, rtx op0)
+{
+ emit_insn (gen_rtx_SET (VOIDmode, target,
+ gen_rtx_fmt_e (code, GET_MODE (op0), op0)));
+}
+
+/* Compute (CODE OP0) and store the result in a new register of mode MODE.
+ Return that new register. */
+
+static rtx
+mips_force_unary (enum machine_mode mode, enum rtx_code code, rtx op0)
+{
+ rtx reg;
+
+ reg = gen_reg_rtx (mode);
+ mips_emit_unary (code, reg, op0);
+ return reg;
+}
+
+/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
+
+void
+mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
+{
+ emit_insn (gen_rtx_SET (VOIDmode, target,
+ gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
+}
+
+/* Compute (CODE OP0 OP1) and store the result in a new register
+ of mode MODE. Return that new register. */
+
+static rtx
+mips_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
+{
+ rtx reg;
+
+ reg = gen_reg_rtx (mode);
+ mips_emit_binary (code, reg, op0, op1);
+ return reg;
+}
+
+/* Copy VALUE to a register and return that register. If new pseudos
+ are allowed, copy it into a new register, otherwise use DEST. */
+
+static rtx
+mips_force_temporary (rtx dest, rtx value)
+{
+ if (can_create_pseudo_p ())
+ return force_reg (Pmode, value);
+ else
+ {
+ mips_emit_move (dest, value);
+ return dest;
+ }
+}
+
+/* Emit a call sequence with call pattern PATTERN and return the call
+ instruction itself (which is not necessarily the last instruction
+ emitted). ORIG_ADDR is the original, unlegitimized address,
+ ADDR is the legitimized form, and LAZY_P is true if the call
+ address is lazily-bound. */
+
+static rtx
+mips_emit_call_insn (rtx pattern, rtx orig_addr, rtx addr, bool lazy_p)
+{
+ rtx insn, reg;
+
+ insn = emit_call_insn (pattern);
+
+ if (TARGET_MIPS16 && mips_use_pic_fn_addr_reg_p (orig_addr))
+ {
+ /* MIPS16 JALRs only take MIPS16 registers. If the target
+ function requires $25 to be valid on entry, we must copy it
+ there separately. The move instruction can be put in the
+ call's delay slot. */
+ reg = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
+ emit_insn_before (gen_move_insn (reg, addr), insn);
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
+ }
+
+ if (lazy_p)
+ /* Lazy-binding stubs require $gp to be valid on entry. */
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
+
+ if (TARGET_USE_GOT)
+ {
+ /* See the comment above load_call<mode> for details. */
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
+ gen_rtx_REG (Pmode, GOT_VERSION_REGNUM));
+ emit_insn (gen_update_got_version ());
+ }
+ return insn;
+}
+
+/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
+ then add CONST_INT OFFSET to the result. */
+
+static rtx
+mips_unspec_address_offset (rtx base, rtx offset,
+ enum mips_symbol_type symbol_type)
+{
+ base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
+ UNSPEC_ADDRESS_FIRST + symbol_type);
+ if (offset != const0_rtx)
+ base = gen_rtx_PLUS (Pmode, base, offset);
+ return gen_rtx_CONST (Pmode, base);
+}
+
+/* Return an UNSPEC address with underlying address ADDRESS and symbol
+ type SYMBOL_TYPE. */
+
+rtx
+mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
+{
+ rtx base, offset;
+
+ split_const (address, &base, &offset);
+ return mips_unspec_address_offset (base, offset, symbol_type);
+}
+
+/* If OP is an UNSPEC address, return the address to which it refers,
+ otherwise return OP itself. */
+
+rtx
+mips_strip_unspec_address (rtx op)
+{
+ rtx base, offset;
+
+ split_const (op, &base, &offset);
+ if (UNSPEC_ADDRESS_P (base))
+ op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
+ return op;
+}
+
+/* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
+ high part to BASE and return the result. Just return BASE otherwise.
+ TEMP is as for mips_force_temporary.
+
+ The returned expression can be used as the first operand to a LO_SUM. */
+
+static rtx
+mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
+ enum mips_symbol_type symbol_type)
+{
+ if (mips_split_p[symbol_type])
+ {
+ addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
+ addr = mips_force_temporary (temp, addr);
+ base = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
+ }
+ return base;
+}
+
+/* Return an instruction that copies $gp into register REG. We want
+ GCC to treat the register's value as constant, so that its value
+ can be rematerialized on demand. */
+
+static rtx
+gen_load_const_gp (rtx reg)
+{
+ return PMODE_INSN (gen_load_const_gp, (reg));
+}
+
+/* Return a pseudo register that contains the value of $gp throughout
+ the current function. Such registers are needed by MIPS16 functions,
+ for which $gp itself is not a valid base register or addition operand. */
+
+static rtx
+mips16_gp_pseudo_reg (void)
+{
+ if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
+ {
+ rtx insn, scan;
+
+ cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
+
+ push_topmost_sequence ();
+
+ scan = get_insns ();
+ while (NEXT_INSN (scan) && !INSN_P (NEXT_INSN (scan)))
+ scan = NEXT_INSN (scan);
+
+ insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
+ insn = emit_insn_after (insn, scan);
+ INSN_LOCATION (insn) = 0;
+
+ pop_topmost_sequence ();
+ }
+
+ return cfun->machine->mips16_gp_pseudo_rtx;
+}
+
+/* Return a base register that holds pic_offset_table_rtx.
+ TEMP, if nonnull, is a scratch Pmode base register. */
+
+rtx
+mips_pic_base_register (rtx temp)
+{
+ if (!TARGET_MIPS16)
+ return pic_offset_table_rtx;
+
+ if (currently_expanding_to_rtl)
+ return mips16_gp_pseudo_reg ();
+
+ if (can_create_pseudo_p ())
+ temp = gen_reg_rtx (Pmode);
+
+ if (TARGET_USE_GOT)
+ /* The first post-reload split exposes all references to $gp
+ (both uses and definitions). All references must remain
+ explicit after that point.
+
+ It is safe to introduce uses of $gp at any time, so for
+ simplicity, we do that before the split too. */
+ mips_emit_move (temp, pic_offset_table_rtx);
+ else
+ emit_insn (gen_load_const_gp (temp));
+ return temp;
+}
+
+/* Return the RHS of a load_call<mode> insn. */
+
+static rtx
+mips_unspec_call (rtx reg, rtx symbol)
+{
+ rtvec vec;
+
+ vec = gen_rtvec (3, reg, symbol, gen_rtx_REG (SImode, GOT_VERSION_REGNUM));
+ return gen_rtx_UNSPEC (Pmode, vec, UNSPEC_LOAD_CALL);
+}
+
+/* If SRC is the RHS of a load_call<mode> insn, return the underlying symbol
+ reference. Return NULL_RTX otherwise. */
+
+static rtx
+mips_strip_unspec_call (rtx src)
+{
+ if (GET_CODE (src) == UNSPEC && XINT (src, 1) == UNSPEC_LOAD_CALL)
+ return mips_strip_unspec_address (XVECEXP (src, 0, 1));
+ return NULL_RTX;
+}
+
+/* Create and return a GOT reference of type TYPE for address ADDR.
+ TEMP, if nonnull, is a scratch Pmode base register. */
+
+rtx
+mips_got_load (rtx temp, rtx addr, enum mips_symbol_type type)
+{
+ rtx base, high, lo_sum_symbol;
+
+ base = mips_pic_base_register (temp);
+
+ /* If we used the temporary register to load $gp, we can't use
+ it for the high part as well. */
+ if (temp != NULL && reg_overlap_mentioned_p (base, temp))
+ temp = NULL;
+
+ high = mips_unspec_offset_high (temp, base, addr, type);
+ lo_sum_symbol = mips_unspec_address (addr, type);
+
+ if (type == SYMBOL_GOTOFF_CALL)
+ return mips_unspec_call (high, lo_sum_symbol);
+ else
+ return PMODE_INSN (gen_unspec_got, (high, lo_sum_symbol));
+}
+
+/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
+ it appears in a MEM of that mode. Return true if ADDR is a legitimate
+ constant in that context and can be split into high and low parts.
+ If so, and if LOW_OUT is nonnull, emit the high part and store the
+ low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
+
+ TEMP is as for mips_force_temporary and is used to load the high
+ part into a register.
+
+ When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
+ a legitimize SET_SRC for an .md pattern, otherwise the low part
+ is guaranteed to be a legitimate address for mode MODE. */
+
+bool
+mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
+{
+ enum mips_symbol_context context;
+ enum mips_symbol_type symbol_type;
+ rtx high;
+
+ context = (mode == MAX_MACHINE_MODE
+ ? SYMBOL_CONTEXT_LEA
+ : SYMBOL_CONTEXT_MEM);
+ if (GET_CODE (addr) == HIGH && context == SYMBOL_CONTEXT_LEA)
+ {
+ addr = XEXP (addr, 0);
+ if (mips_symbolic_constant_p (addr, context, &symbol_type)
+ && mips_symbol_insns (symbol_type, mode) > 0
+ && mips_split_hi_p[symbol_type])
+ {
+ if (low_out)
+ switch (symbol_type)
+ {
+ case SYMBOL_GOT_PAGE_OFST:
+ /* The high part of a page/ofst pair is loaded from the GOT. */
+ *low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_PAGE);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ return true;
+ }
+ }
+ else
+ {
+ if (mips_symbolic_constant_p (addr, context, &symbol_type)
+ && mips_symbol_insns (symbol_type, mode) > 0
+ && mips_split_p[symbol_type])
+ {
+ if (low_out)
+ switch (symbol_type)
+ {
+ case SYMBOL_GOT_DISP:
+ /* SYMBOL_GOT_DISP symbols are loaded from the GOT. */
+ *low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_DISP);
+ break;
+
+ case SYMBOL_GP_RELATIVE:
+ high = mips_pic_base_register (temp);
+ *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
+ break;
+
+ default:
+ high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
+ high = mips_force_temporary (temp, high);
+ *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
+ break;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Return a legitimate address for REG + OFFSET. TEMP is as for
+ mips_force_temporary; it is only needed when OFFSET is not a
+ SMALL_OPERAND. */
+
+static rtx
+mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
+{
+ if (!SMALL_OPERAND (offset))
+ {
+ rtx high;
+
+ if (TARGET_MIPS16)
+ {
+ /* Load the full offset into a register so that we can use
+ an unextended instruction for the address itself. */
+ high = GEN_INT (offset);
+ offset = 0;
+ }
+ else
+ {
+ /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
+ The addition inside the macro CONST_HIGH_PART may cause an
+ overflow, so we need to force a sign-extension check. */
+ high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
+ offset = CONST_LOW_PART (offset);
+ }
+ high = mips_force_temporary (temp, high);
+ reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
+ }
+ return plus_constant (Pmode, reg, offset);
+}
+
+/* The __tls_get_attr symbol. */
+static GTY(()) rtx mips_tls_symbol;
+
+/* Return an instruction sequence that calls __tls_get_addr. SYM is
+ the TLS symbol we are referencing and TYPE is the symbol type to use
+ (either global dynamic or local dynamic). V0 is an RTX for the
+ return value location. */
+
+static rtx
+mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
+{
+ rtx insn, loc, a0;
+
+ a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
+
+ if (!mips_tls_symbol)
+ mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
+
+ loc = mips_unspec_address (sym, type);
+
+ start_sequence ();
+
+ emit_insn (gen_rtx_SET (Pmode, a0,
+ gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
+ insn = mips_expand_call (MIPS_CALL_NORMAL, v0, mips_tls_symbol,
+ const0_rtx, NULL_RTX, false);
+ RTL_CONST_CALL_P (insn) = 1;
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
+ insn = get_insns ();
+
+ end_sequence ();
+
+ return insn;
+}
+
+/* Return a pseudo register that contains the current thread pointer. */
+
+rtx
+mips_expand_thread_pointer (rtx tp)
+{
+ rtx fn;
+
+ if (TARGET_MIPS16)
+ {
+ if (!mips16_rdhwr_stub)
+ mips16_rdhwr_stub = new mips16_rdhwr_one_only_stub ();
+ fn = mips16_stub_call_address (mips16_rdhwr_stub);
+ emit_insn (PMODE_INSN (gen_tls_get_tp_mips16, (tp, fn)));
+ }
+ else
+ emit_insn (PMODE_INSN (gen_tls_get_tp, (tp)));
+ return tp;
+}
+
+static rtx
+mips_get_tp (void)
+{
+ return mips_expand_thread_pointer (gen_reg_rtx (Pmode));
+}
+
+/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
+ its address. The return value will be both a valid address and a valid
+ SET_SRC (either a REG or a LO_SUM). */
+
+static rtx
+mips_legitimize_tls_address (rtx loc)
+{
+ rtx dest, insn, v0, tp, tmp1, tmp2, eqv, offset;
+ enum tls_model model;
+
+ model = SYMBOL_REF_TLS_MODEL (loc);
+ /* Only TARGET_ABICALLS code can have more than one module; other
+ code must be be static and should not use a GOT. All TLS models
+ reduce to local exec in this situation. */
+ if (!TARGET_ABICALLS)
+ model = TLS_MODEL_LOCAL_EXEC;
+
+ switch (model)
+ {
+ case TLS_MODEL_GLOBAL_DYNAMIC:
+ v0 = gen_rtx_REG (Pmode, GP_RETURN);
+ insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
+ dest = gen_reg_rtx (Pmode);
+ emit_libcall_block (insn, dest, v0, loc);
+ break;
+
+ case TLS_MODEL_LOCAL_DYNAMIC:
+ v0 = gen_rtx_REG (Pmode, GP_RETURN);
+ insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
+ tmp1 = gen_reg_rtx (Pmode);
+
+ /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
+ share the LDM result with other LD model accesses. */
+ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
+ UNSPEC_TLS_LDM);
+ emit_libcall_block (insn, tmp1, v0, eqv);
+
+ offset = mips_unspec_address (loc, SYMBOL_DTPREL);
+ if (mips_split_p[SYMBOL_DTPREL])
+ {
+ tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
+ dest = gen_rtx_LO_SUM (Pmode, tmp2, offset);
+ }
+ else
+ dest = expand_binop (Pmode, add_optab, tmp1, offset,
+ 0, 0, OPTAB_DIRECT);
+ break;
+
+ case TLS_MODEL_INITIAL_EXEC:
+ tp = mips_get_tp ();
+ tmp1 = gen_reg_rtx (Pmode);
+ tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
+ if (Pmode == DImode)
+ emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
+ else
+ emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
+ dest = gen_reg_rtx (Pmode);
+ emit_insn (gen_add3_insn (dest, tmp1, tp));
+ break;
+
+ case TLS_MODEL_LOCAL_EXEC:
+ tmp1 = mips_get_tp ();
+ offset = mips_unspec_address (loc, SYMBOL_TPREL);
+ if (mips_split_p[SYMBOL_TPREL])
+ {
+ tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_TPREL);
+ dest = gen_rtx_LO_SUM (Pmode, tmp2, offset);
+ }
+ else
+ dest = expand_binop (Pmode, add_optab, tmp1, offset,
+ 0, 0, OPTAB_DIRECT);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ return dest;
+}
+
+/* Implement "TARGET = __builtin_mips_get_fcsr ()" for MIPS16,
+ using a stub. */
+
+void
+mips16_expand_get_fcsr (rtx target)
+{
+ if (!mips16_get_fcsr_stub)
+ mips16_get_fcsr_stub = new mips16_get_fcsr_one_only_stub ();
+ rtx fn = mips16_stub_call_address (mips16_get_fcsr_stub);
+ emit_insn (PMODE_INSN (gen_mips_get_fcsr_mips16, (fn)));
+ emit_move_insn (target, gen_rtx_REG (SImode, GET_FCSR_REGNUM));
+}
+
+/* Implement __builtin_mips_set_fcsr (TARGET) for MIPS16, using a stub. */
+
+void
+mips16_expand_set_fcsr (rtx newval)
+{
+ if (!mips16_set_fcsr_stub)
+ mips16_set_fcsr_stub = new mips16_set_fcsr_one_only_stub ();
+ rtx fn = mips16_stub_call_address (mips16_set_fcsr_stub);
+ emit_move_insn (gen_rtx_REG (SImode, SET_FCSR_REGNUM), newval);
+ emit_insn (PMODE_INSN (gen_mips_set_fcsr_mips16, (fn)));
+}
+
+/* If X is not a valid address for mode MODE, force it into a register. */
+
+static rtx
+mips_force_address (rtx x, enum machine_mode mode)
+{
+ if (!mips_legitimate_address_p (mode, x, false))
+ x = force_reg (Pmode, x);
+ return x;
+}
+
+/* This function is used to implement LEGITIMIZE_ADDRESS. If X can
+ be legitimized in a way that the generic machinery might not expect,
+ return a new address, otherwise return NULL. MODE is the mode of
+ the memory being accessed. */
+
+static rtx
+mips_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
+ enum machine_mode mode)
+{
+ rtx base, addr;
+ HOST_WIDE_INT offset;
+
+ if (mips_tls_symbol_p (x))
+ return mips_legitimize_tls_address (x);
+
+ /* See if the address can split into a high part and a LO_SUM. */
+ if (mips_split_symbol (NULL, x, mode, &addr))
+ return mips_force_address (addr, mode);
+
+ /* Handle BASE + OFFSET using mips_add_offset. */
+ mips_split_plus (x, &base, &offset);
+ if (offset != 0)
+ {
+ if (!mips_valid_base_register_p (base, mode, false))
+ base = copy_to_mode_reg (Pmode, base);
+ addr = mips_add_offset (NULL, base, offset);
+ return mips_force_address (addr, mode);
+ }
+
+ return x;
+}
+
+/* Load VALUE into DEST. TEMP is as for mips_force_temporary. */
+
+void
+mips_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
+{
+ struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
+ enum machine_mode mode;
+ unsigned int i, num_ops;
+ rtx x;
+
+ mode = GET_MODE (dest);
+ num_ops = mips_build_integer (codes, value);
+
+ /* Apply each binary operation to X. Invariant: X is a legitimate
+ source operand for a SET pattern. */
+ x = GEN_INT (codes[0].value);
+ for (i = 1; i < num_ops; i++)
+ {
+ if (!can_create_pseudo_p ())
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, temp, x));
+ x = temp;
+ }
+ else
+ x = force_reg (mode, x);
+ x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
+}
+
+/* Subroutine of mips_legitimize_move. Move constant SRC into register
+ DEST given that SRC satisfies immediate_operand but doesn't satisfy
+ move_operand. */
+
+static void
+mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
+{
+ rtx base, offset;
+
+ /* Split moves of big integers into smaller pieces. */
+ if (splittable_const_int_operand (src, mode))
+ {
+ mips_move_integer (dest, dest, INTVAL (src));
+ return;
+ }
+
+ /* Split moves of symbolic constants into high/low pairs. */
+ if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, dest, src));
+ return;
+ }
+
+ /* Generate the appropriate access sequences for TLS symbols. */
+ if (mips_tls_symbol_p (src))
+ {
+ mips_emit_move (dest, mips_legitimize_tls_address (src));
+ return;
+ }
+
+ /* If we have (const (plus symbol offset)), and that expression cannot
+ be forced into memory, load the symbol first and add in the offset.
+ In non-MIPS16 mode, prefer to do this even if the constant _can_ be
+ forced into memory, as it usually produces better code. */
+ split_const (src, &base, &offset);
+ if (offset != const0_rtx
+ && (targetm.cannot_force_const_mem (mode, src)
+ || (!TARGET_MIPS16 && can_create_pseudo_p ())))
+ {
+ base = mips_force_temporary (dest, base);
+ mips_emit_move (dest, mips_add_offset (NULL, base, INTVAL (offset)));
+ return;
+ }
+
+ src = force_const_mem (mode, src);
+
+ /* When using explicit relocs, constant pool references are sometimes
+ not legitimate addresses. */
+ mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
+ mips_emit_move (dest, src);
+}
+
+/* If (set DEST SRC) is not a valid move instruction, emit an equivalent
+ sequence that is valid. */
+
+bool
+mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
+{
+ if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
+ {
+ mips_emit_move (dest, force_reg (mode, src));
+ return true;
+ }
+
+ /* We need to deal with constants that would be legitimate
+ immediate_operands but aren't legitimate move_operands. */
+ if (CONSTANT_P (src) && !move_operand (src, mode))
+ {
+ mips_legitimize_const_move (mode, dest, src);
+ set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
+ return true;
+ }
+ return false;
+}
+
+/* Return true if value X in context CONTEXT is a small-data address
+ that can be rewritten as a LO_SUM. */
+
+static bool
+mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
+{
+ enum mips_symbol_type symbol_type;
+
+ return (mips_lo_relocs[SYMBOL_GP_RELATIVE]
+ && !mips_split_p[SYMBOL_GP_RELATIVE]
+ && mips_symbolic_constant_p (x, context, &symbol_type)
+ && symbol_type == SYMBOL_GP_RELATIVE);
+}
+
+/* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
+ containing MEM, or null if none. */
+
+static int
+mips_small_data_pattern_1 (rtx *loc, void *data)
+{
+ enum mips_symbol_context context;
+
+ /* Ignore things like "g" constraints in asms. We make no particular
+ guarantee about which symbolic constants are acceptable as asm operands
+ versus which must be forced into a GPR. */
+ if (GET_CODE (*loc) == LO_SUM || GET_CODE (*loc) == ASM_OPERANDS)
+ return -1;
+
+ if (MEM_P (*loc))
+ {
+ if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
+ return 1;
+ return -1;
+ }
+
+ context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
+ return mips_rewrite_small_data_p (*loc, context);
+}
+
+/* Return true if OP refers to small data symbols directly, not through
+ a LO_SUM. */
+
+bool
+mips_small_data_pattern_p (rtx op)
+{
+ return for_each_rtx (&op, mips_small_data_pattern_1, NULL);
+}
+
+/* A for_each_rtx callback, used by mips_rewrite_small_data.
+ DATA is the containing MEM, or null if none. */
+
+static int
+mips_rewrite_small_data_1 (rtx *loc, void *data)
+{
+ enum mips_symbol_context context;
+
+ if (MEM_P (*loc))
+ {
+ for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
+ return -1;
+ }
+
+ context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
+ if (mips_rewrite_small_data_p (*loc, context))
+ *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
+
+ if (GET_CODE (*loc) == LO_SUM)
+ return -1;
+
+ return 0;
+}
+
+/* Rewrite instruction pattern PATTERN so that it refers to small data
+ using explicit relocations. */
+
+rtx
+mips_rewrite_small_data (rtx pattern)
+{
+ pattern = copy_insn (pattern);
+ for_each_rtx (&pattern, mips_rewrite_small_data_1, NULL);
+ return pattern;
+}
+
+/* The cost of loading values from the constant pool. It should be
+ larger than the cost of any constant we want to synthesize inline. */
+#define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
+
+/* Return the cost of X when used as an operand to the MIPS16 instruction
+ that implements CODE. Return -1 if there is no such instruction, or if
+ X is not a valid immediate operand for it. */
+
+static int
+mips16_constant_cost (int code, HOST_WIDE_INT x)
+{
+ switch (code)
+ {
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ /* Shifts by between 1 and 8 bits (inclusive) are unextended,
+ other shifts are extended. The shift patterns truncate the shift
+ count to the right size, so there are no out-of-range values. */
+ if (IN_RANGE (x, 1, 8))
+ return 0;
+ return COSTS_N_INSNS (1);
+
+ case PLUS:
+ if (IN_RANGE (x, -128, 127))
+ return 0;
+ if (SMALL_OPERAND (x))
+ return COSTS_N_INSNS (1);
+ return -1;
+
+ case LEU:
+ /* Like LE, but reject the always-true case. */
+ if (x == -1)
+ return -1;
+ case LE:
+ /* We add 1 to the immediate and use SLT. */
+ x += 1;
+ case XOR:
+ /* We can use CMPI for an xor with an unsigned 16-bit X. */
+ case LT:
+ case LTU:
+ if (IN_RANGE (x, 0, 255))
+ return 0;
+ if (SMALL_OPERAND_UNSIGNED (x))
+ return COSTS_N_INSNS (1);
+ return -1;
+
+ case EQ:
+ case NE:
+ /* Equality comparisons with 0 are cheap. */
+ if (x == 0)
+ return 0;
+ return -1;
+
+ default:
+ return -1;
+ }
+}
+
+/* Return true if there is a non-MIPS16 instruction that implements CODE
+ and if that instruction accepts X as an immediate operand. */
+
+static int
+mips_immediate_operand_p (int code, HOST_WIDE_INT x)
+{
+ switch (code)
+ {
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ /* All shift counts are truncated to a valid constant. */
+ return true;
+
+ case ROTATE:
+ case ROTATERT:
+ /* Likewise rotates, if the target supports rotates at all. */
+ return ISA_HAS_ROR;
+
+ case AND:
+ case IOR:
+ case XOR:
+ /* These instructions take 16-bit unsigned immediates. */
+ return SMALL_OPERAND_UNSIGNED (x);
+
+ case PLUS:
+ case LT:
+ case LTU:
+ /* These instructions take 16-bit signed immediates. */
+ return SMALL_OPERAND (x);
+
+ case EQ:
+ case NE:
+ case GT:
+ case GTU:
+ /* The "immediate" forms of these instructions are really
+ implemented as comparisons with register 0. */
+ return x == 0;
+
+ case GE:
+ case GEU:
+ /* Likewise, meaning that the only valid immediate operand is 1. */
+ return x == 1;
+
+ case LE:
+ /* We add 1 to the immediate and use SLT. */
+ return SMALL_OPERAND (x + 1);
+
+ case LEU:
+ /* Likewise SLTU, but reject the always-true case. */
+ return SMALL_OPERAND (x + 1) && x + 1 != 0;
+
+ case SIGN_EXTRACT:
+ case ZERO_EXTRACT:
+ /* The bit position and size are immediate operands. */
+ return ISA_HAS_EXT_INS;
+
+ default:
+ /* By default assume that $0 can be used for 0. */
+ return x == 0;
+ }
+}
+
+/* Return the cost of binary operation X, given that the instruction
+ sequence for a word-sized or smaller operation has cost SINGLE_COST
+ and that the sequence of a double-word operation has cost DOUBLE_COST.
+ If SPEED is true, optimize for speed otherwise optimize for size. */
+
+static int
+mips_binary_cost (rtx x, int single_cost, int double_cost, bool speed)
+{
+ int cost;
+
+ if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
+ cost = double_cost;
+ else
+ cost = single_cost;
+ return (cost
+ + set_src_cost (XEXP (x, 0), speed)
+ + rtx_cost (XEXP (x, 1), GET_CODE (x), 1, speed));
+}
+
+/* Return the cost of floating-point multiplications of mode MODE. */
+
+static int
+mips_fp_mult_cost (enum machine_mode mode)
+{
+ return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
+}
+
+/* Return the cost of floating-point divisions of mode MODE. */
+
+static int
+mips_fp_div_cost (enum machine_mode mode)
+{
+ return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
+}
+
+/* Return the cost of sign-extending OP to mode MODE, not including the
+ cost of OP itself. */
+
+static int
+mips_sign_extend_cost (enum machine_mode mode, rtx op)
+{
+ if (MEM_P (op))
+ /* Extended loads are as cheap as unextended ones. */
+ return 0;
+
+ if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
+ /* A sign extension from SImode to DImode in 64-bit mode is free. */
+ return 0;
+
+ if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
+ /* We can use SEB or SEH. */
+ return COSTS_N_INSNS (1);
+
+ /* We need to use a shift left and a shift right. */
+ return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
+}
+
+/* Return the cost of zero-extending OP to mode MODE, not including the
+ cost of OP itself. */
+
+static int
+mips_zero_extend_cost (enum machine_mode mode, rtx op)
+{
+ if (MEM_P (op))
+ /* Extended loads are as cheap as unextended ones. */
+ return 0;
+
+ if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
+ /* We need a shift left by 32 bits and a shift right by 32 bits. */
+ return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
+
+ if (GENERATE_MIPS16E)
+ /* We can use ZEB or ZEH. */
+ return COSTS_N_INSNS (1);
+
+ if (TARGET_MIPS16)
+ /* We need to load 0xff or 0xffff into a register and use AND. */
+ return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
+
+ /* We can use ANDI. */
+ return COSTS_N_INSNS (1);
+}
+
+/* Return the cost of moving between two registers of mode MODE,
+ assuming that the move will be in pieces of at most UNITS bytes. */
+
+static int
+mips_set_reg_reg_piece_cost (enum machine_mode mode, unsigned int units)
+{
+ return COSTS_N_INSNS ((GET_MODE_SIZE (mode) + units - 1) / units);
+}
+
+/* Return the cost of moving between two registers of mode MODE. */
+
+static int
+mips_set_reg_reg_cost (enum machine_mode mode)
+{
+ switch (GET_MODE_CLASS (mode))
+ {
+ case MODE_CC:
+ return mips_set_reg_reg_piece_cost (mode, GET_MODE_SIZE (CCmode));
+
+ case MODE_FLOAT:
+ case MODE_COMPLEX_FLOAT:
+ case MODE_VECTOR_FLOAT:
+ if (TARGET_HARD_FLOAT)
+ return mips_set_reg_reg_piece_cost (mode, UNITS_PER_HWFPVALUE);
+ /* Fall through */
+
+ default:
+ return mips_set_reg_reg_piece_cost (mode, UNITS_PER_WORD);
+ }
+}
+
+/* Implement TARGET_RTX_COSTS. */
+
+static bool
+mips_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
+ int *total, bool speed)
+{
+ enum machine_mode mode = GET_MODE (x);
+ bool float_mode_p = FLOAT_MODE_P (mode);
+ int cost;
+ rtx addr;
+
+ /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
+ appear in the instruction stream, and the cost of a comparison is
+ really the cost of the branch or scc condition. At the time of
+ writing, GCC only uses an explicit outer COMPARE code when optabs
+ is testing whether a constant is expensive enough to force into a
+ register. We want optabs to pass such constants through the MIPS
+ expanders instead, so make all constants very cheap here. */
+ if (outer_code == COMPARE)
+ {
+ gcc_assert (CONSTANT_P (x));
+ *total = 0;
+ return true;
+ }
+
+ switch (code)
+ {
+ case CONST_INT:
+ /* Treat *clear_upper32-style ANDs as having zero cost in the
+ second operand. The cost is entirely in the first operand.
+
+ ??? This is needed because we would otherwise try to CSE
+ the constant operand. Although that's the right thing for
+ instructions that continue to be a register operation throughout
+ compilation, it is disastrous for instructions that could
+ later be converted into a memory operation. */
+ if (TARGET_64BIT
+ && outer_code == AND
+ && UINTVAL (x) == 0xffffffff)
+ {
+ *total = 0;
+ return true;
+ }
+
+ if (TARGET_MIPS16)
+ {
+ cost = mips16_constant_cost (outer_code, INTVAL (x));
+ if (cost >= 0)
+ {
+ *total = cost;
+ return true;
+ }
+ }
+ else
+ {
+ /* When not optimizing for size, we care more about the cost
+ of hot code, and hot code is often in a loop. If a constant
+ operand needs to be forced into a register, we will often be
+ able to hoist the constant load out of the loop, so the load
+ should not contribute to the cost. */
+ if (speed || mips_immediate_operand_p (outer_code, INTVAL (x)))
+ {
+ *total = 0;
+ return true;
+ }
+ }
+ /* Fall through. */
+
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST_DOUBLE:
+ if (force_to_mem_operand (x, VOIDmode))
+ {
+ *total = COSTS_N_INSNS (1);
+ return true;
+ }
+ cost = mips_const_insns (x);
+ if (cost > 0)
+ {
+ /* If the constant is likely to be stored in a GPR, SETs of
+ single-insn constants are as cheap as register sets; we
+ never want to CSE them.
+
+ Don't reduce the cost of storing a floating-point zero in
+ FPRs. If we have a zero in an FPR for other reasons, we
+ can get better cfg-cleanup and delayed-branch results by
+ using it consistently, rather than using $0 sometimes and
+ an FPR at other times. Also, moves between floating-point
+ registers are sometimes cheaper than (D)MTC1 $0. */
+ if (cost == 1
+ && outer_code == SET
+ && !(float_mode_p && TARGET_HARD_FLOAT))
+ cost = 0;
+ /* When non-MIPS16 code loads a constant N>1 times, we rarely
+ want to CSE the constant itself. It is usually better to
+ have N copies of the last operation in the sequence and one
+ shared copy of the other operations. (Note that this is
+ not true for MIPS16 code, where the final operation in the
+ sequence is often an extended instruction.)
+
+ Also, if we have a CONST_INT, we don't know whether it is
+ for a word or doubleword operation, so we cannot rely on
+ the result of mips_build_integer. */
+ else if (!TARGET_MIPS16
+ && (outer_code == SET || mode == VOIDmode))
+ cost = 1;
+ *total = COSTS_N_INSNS (cost);
+ return true;
+ }
+ /* The value will need to be fetched from the constant pool. */
+ *total = CONSTANT_POOL_COST;
+ return true;
+
+ case MEM:
+ /* If the address is legitimate, return the number of
+ instructions it needs. */
+ addr = XEXP (x, 0);
+ cost = mips_address_insns (addr, mode, true);
+ if (cost > 0)
+ {
+ *total = COSTS_N_INSNS (cost + 1);
+ return true;
+ }
+ /* Check for a scaled indexed address. */
+ if (mips_lwxs_address_p (addr)
+ || mips_lx_address_p (addr, mode))
+ {
+ *total = COSTS_N_INSNS (2);
+ return true;
+ }
+ /* Otherwise use the default handling. */
+ return false;
+
+ case FFS:
+ *total = COSTS_N_INSNS (6);
+ return false;
+
+ case NOT:
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
+ return false;
+
+ case AND:
+ /* Check for a *clear_upper32 pattern and treat it like a zero
+ extension. See the pattern's comment for details. */
+ if (TARGET_64BIT
+ && mode == DImode
+ && CONST_INT_P (XEXP (x, 1))
+ && UINTVAL (XEXP (x, 1)) == 0xffffffff)
+ {
+ *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
+ + set_src_cost (XEXP (x, 0), speed));
+ return true;
+ }
+ if (ISA_HAS_CINS && CONST_INT_P (XEXP (x, 1)))
+ {
+ rtx op = XEXP (x, 0);
+ if (GET_CODE (op) == ASHIFT
+ && CONST_INT_P (XEXP (op, 1))
+ && mask_low_and_shift_p (mode, XEXP (x, 1), XEXP (op, 1), 32))
+ {
+ *total = COSTS_N_INSNS (1) + set_src_cost (XEXP (op, 0), speed);
+ return true;
+ }
+ }
+ /* (AND (NOT op0) (NOT op1) is a nor operation that can be done in
+ a single instruction. */
+ if (!TARGET_MIPS16
+ && GET_CODE (XEXP (x, 0)) == NOT
+ && GET_CODE (XEXP (x, 1)) == NOT)
+ {
+ cost = GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1;
+ *total = (COSTS_N_INSNS (cost)
+ + set_src_cost (XEXP (XEXP (x, 0), 0), speed)
+ + set_src_cost (XEXP (XEXP (x, 1), 0), speed));
+ return true;
+ }
+
+ /* Fall through. */
+
+ case IOR:
+ case XOR:
+ /* Double-word operations use two single-word operations. */
+ *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2),
+ speed);
+ return true;
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ROTATE:
+ case ROTATERT:
+ if (CONSTANT_P (XEXP (x, 1)))
+ *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
+ speed);
+ else
+ *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12),
+ speed);
+ return true;
+
+ case ABS:
+ if (float_mode_p)
+ *total = mips_cost->fp_add;
+ else
+ *total = COSTS_N_INSNS (4);
+ return false;
+
+ case LO_SUM:
+ /* Low-part immediates need an extended MIPS16 instruction. */
+ *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
+ + set_src_cost (XEXP (x, 0), speed));
+ return true;
+
+ case LT:
+ case LTU:
+ case LE:
+ case LEU:
+ case GT:
+ case GTU:
+ case GE:
+ case GEU:
+ case EQ:
+ case NE:
+ case UNORDERED:
+ case LTGT:
+ /* Branch comparisons have VOIDmode, so use the first operand's
+ mode instead. */
+ mode = GET_MODE (XEXP (x, 0));
+ if (FLOAT_MODE_P (mode))
+ {
+ *total = mips_cost->fp_add;
+ return false;
+ }
+ *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
+ speed);
+ return true;
+
+ case MINUS:
+ if (float_mode_p
+ && (ISA_HAS_NMADD4_NMSUB4 || ISA_HAS_NMADD3_NMSUB3)
+ && TARGET_FUSED_MADD
+ && !HONOR_NANS (mode)
+ && !HONOR_SIGNED_ZEROS (mode))
+ {
+ /* See if we can use NMADD or NMSUB. See mips.md for the
+ associated patterns. */
+ rtx op0 = XEXP (x, 0);
+ rtx op1 = XEXP (x, 1);
+ if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
+ {
+ *total = (mips_fp_mult_cost (mode)
+ + set_src_cost (XEXP (XEXP (op0, 0), 0), speed)
+ + set_src_cost (XEXP (op0, 1), speed)
+ + set_src_cost (op1, speed));
+ return true;
+ }
+ if (GET_CODE (op1) == MULT)
+ {
+ *total = (mips_fp_mult_cost (mode)
+ + set_src_cost (op0, speed)
+ + set_src_cost (XEXP (op1, 0), speed)
+ + set_src_cost (XEXP (op1, 1), speed));
+ return true;
+ }
+ }
+ /* Fall through. */
+
+ case PLUS:
+ if (float_mode_p)
+ {
+ /* If this is part of a MADD or MSUB, treat the PLUS as
+ being free. */
+ if ((ISA_HAS_FP_MADD4_MSUB4 || ISA_HAS_FP_MADD3_MSUB3)
+ && TARGET_FUSED_MADD
+ && GET_CODE (XEXP (x, 0)) == MULT)
+ *total = 0;
+ else
+ *total = mips_cost->fp_add;
+ return false;
+ }
+
+ /* Double-word operations require three single-word operations and
+ an SLTU. The MIPS16 version then needs to move the result of
+ the SLTU from $24 to a MIPS16 register. */
+ *total = mips_binary_cost (x, COSTS_N_INSNS (1),
+ COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4),
+ speed);
+ return true;
+
+ case NEG:
+ if (float_mode_p
+ && (ISA_HAS_NMADD4_NMSUB4 || ISA_HAS_NMADD3_NMSUB3)
+ && TARGET_FUSED_MADD
+ && !HONOR_NANS (mode)
+ && HONOR_SIGNED_ZEROS (mode))
+ {
+ /* See if we can use NMADD or NMSUB. See mips.md for the
+ associated patterns. */
+ rtx op = XEXP (x, 0);
+ if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
+ && GET_CODE (XEXP (op, 0)) == MULT)
+ {
+ *total = (mips_fp_mult_cost (mode)
+ + set_src_cost (XEXP (XEXP (op, 0), 0), speed)
+ + set_src_cost (XEXP (XEXP (op, 0), 1), speed)
+ + set_src_cost (XEXP (op, 1), speed));
+ return true;
+ }
+ }
+
+ if (float_mode_p)
+ *total = mips_cost->fp_add;
+ else
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
+ return false;
+
+ case MULT:
+ if (float_mode_p)
+ *total = mips_fp_mult_cost (mode);
+ else if (mode == DImode && !TARGET_64BIT)
+ /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
+ where the mulsidi3 always includes an MFHI and an MFLO. */
+ *total = (speed
+ ? mips_cost->int_mult_si * 3 + 6
+ : COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9));
+ else if (!speed)
+ *total = COSTS_N_INSNS (ISA_HAS_MUL3 ? 1 : 2) + 1;
+ else if (mode == DImode)
+ *total = mips_cost->int_mult_di;
+ else
+ *total = mips_cost->int_mult_si;
+ return false;
+
+ case DIV:
+ /* Check for a reciprocal. */
+ if (float_mode_p
+ && ISA_HAS_FP_RECIP_RSQRT (mode)
+ && flag_unsafe_math_optimizations
+ && XEXP (x, 0) == CONST1_RTX (mode))
+ {
+ if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT)
+ /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
+ division as being free. */
+ *total = set_src_cost (XEXP (x, 1), speed);
+ else
+ *total = (mips_fp_div_cost (mode)
+ + set_src_cost (XEXP (x, 1), speed));
+ return true;
+ }
+ /* Fall through. */
+
+ case SQRT:
+ case MOD:
+ if (float_mode_p)
+ {
+ *total = mips_fp_div_cost (mode);
+ return false;
+ }
+ /* Fall through. */
+
+ case UDIV:
+ case UMOD:
+ if (!speed)
+ {
+ /* It is our responsibility to make division by a power of 2
+ as cheap as 2 register additions if we want the division
+ expanders to be used for such operations; see the setting
+ of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
+ should always produce shorter code than using
+ expand_sdiv2_pow2. */
+ if (TARGET_MIPS16
+ && CONST_INT_P (XEXP (x, 1))
+ && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
+ {
+ *total = COSTS_N_INSNS (2) + set_src_cost (XEXP (x, 0), speed);
+ return true;
+ }
+ *total = COSTS_N_INSNS (mips_idiv_insns ());
+ }
+ else if (mode == DImode)
+ *total = mips_cost->int_div_di;
+ else
+ *total = mips_cost->int_div_si;
+ return false;
+
+ case SIGN_EXTEND:
+ *total = mips_sign_extend_cost (mode, XEXP (x, 0));
+ return false;
+
+ case ZERO_EXTEND:
+ if (outer_code == SET
+ && ISA_HAS_BADDU
+ && (GET_CODE (XEXP (x, 0)) == TRUNCATE
+ || GET_CODE (XEXP (x, 0)) == SUBREG)
+ && GET_MODE (XEXP (x, 0)) == QImode
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
+ {
+ *total = set_src_cost (XEXP (XEXP (x, 0), 0), speed);
+ return true;
+ }
+ *total = mips_zero_extend_cost (mode, XEXP (x, 0));
+ return false;
+
+ case FLOAT:
+ case UNSIGNED_FLOAT:
+ case FIX:
+ case FLOAT_EXTEND:
+ case FLOAT_TRUNCATE:
+ *total = mips_cost->fp_add;
+ return false;
+
+ case SET:
+ if (register_operand (SET_DEST (x), VOIDmode)
+ && reg_or_0_operand (SET_SRC (x), VOIDmode))
+ {
+ *total = mips_set_reg_reg_cost (GET_MODE (SET_DEST (x)));
+ return true;
+ }
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+/* Implement TARGET_ADDRESS_COST. */
+
+static int
+mips_address_cost (rtx addr, enum machine_mode mode,
+ addr_space_t as ATTRIBUTE_UNUSED,
+ bool speed ATTRIBUTE_UNUSED)
+{
+ return mips_address_insns (addr, mode, false);
+}
+
+/* Information about a single instruction in a multi-instruction
+ asm sequence. */
+struct mips_multi_member {
+ /* True if this is a label, false if it is code. */
+ bool is_label_p;
+
+ /* The output_asm_insn format of the instruction. */
+ const char *format;
+
+ /* The operands to the instruction. */
+ rtx operands[MAX_RECOG_OPERANDS];
+};
+typedef struct mips_multi_member mips_multi_member;
+
+/* The instructions that make up the current multi-insn sequence. */
+static vec<mips_multi_member> mips_multi_members;
+
+/* How many instructions (as opposed to labels) are in the current
+ multi-insn sequence. */
+static unsigned int mips_multi_num_insns;
+
+/* Start a new multi-insn sequence. */
+
+static void
+mips_multi_start (void)
+{
+ mips_multi_members.truncate (0);
+ mips_multi_num_insns = 0;
+}
+
+/* Add a new, uninitialized member to the current multi-insn sequence. */
+
+static struct mips_multi_member *
+mips_multi_add (void)
+{
+ mips_multi_member empty;
+ return mips_multi_members.safe_push (empty);
+}
+
+/* Add a normal insn with the given asm format to the current multi-insn
+ sequence. The other arguments are a null-terminated list of operands. */
+
+static void
+mips_multi_add_insn (const char *format, ...)
+{
+ struct mips_multi_member *member;
+ va_list ap;
+ unsigned int i;
+ rtx op;
+
+ member = mips_multi_add ();
+ member->is_label_p = false;
+ member->format = format;
+ va_start (ap, format);
+ i = 0;
+ while ((op = va_arg (ap, rtx)))
+ member->operands[i++] = op;
+ va_end (ap);
+ mips_multi_num_insns++;
+}
+
+/* Add the given label definition to the current multi-insn sequence.
+ The definition should include the colon. */
+
+static void
+mips_multi_add_label (const char *label)
+{
+ struct mips_multi_member *member;
+
+ member = mips_multi_add ();
+ member->is_label_p = true;
+ member->format = label;
+}
+
+/* Return the index of the last member of the current multi-insn sequence. */
+
+static unsigned int
+mips_multi_last_index (void)
+{
+ return mips_multi_members.length () - 1;
+}
+
+/* Add a copy of an existing instruction to the current multi-insn
+ sequence. I is the index of the instruction that should be copied. */
+
+static void
+mips_multi_copy_insn (unsigned int i)
+{
+ struct mips_multi_member *member;
+
+ member = mips_multi_add ();
+ memcpy (member, &mips_multi_members[i], sizeof (*member));
+ gcc_assert (!member->is_label_p);
+}
+
+/* Change the operand of an existing instruction in the current
+ multi-insn sequence. I is the index of the instruction,
+ OP is the index of the operand, and X is the new value. */
+
+static void
+mips_multi_set_operand (unsigned int i, unsigned int op, rtx x)
+{
+ mips_multi_members[i].operands[op] = x;
+}
+
+/* Write out the asm code for the current multi-insn sequence. */
+
+static void
+mips_multi_write (void)
+{
+ struct mips_multi_member *member;
+ unsigned int i;
+
+ FOR_EACH_VEC_ELT (mips_multi_members, i, member)
+ if (member->is_label_p)
+ fprintf (asm_out_file, "%s\n", member->format);
+ else
+ output_asm_insn (member->format, member->operands);
+}
+
+/* Return one word of double-word value OP, taking into account the fixed
+ endianness of certain registers. HIGH_P is true to select the high part,
+ false to select the low part. */
+
+rtx
+mips_subword (rtx op, bool high_p)
+{
+ unsigned int byte, offset;
+ enum machine_mode mode;
+
+ mode = GET_MODE (op);
+ if (mode == VOIDmode)
+ mode = TARGET_64BIT ? TImode : DImode;
+
+ if (TARGET_BIG_ENDIAN ? !high_p : high_p)
+ byte = UNITS_PER_WORD;
+ else
+ byte = 0;
+
+ if (FP_REG_RTX_P (op))
+ {
+ /* Paired FPRs are always ordered little-endian. */
+ offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
+ return gen_rtx_REG (word_mode, REGNO (op) + offset);
+ }
+
+ if (MEM_P (op))
+ return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
+
+ return simplify_gen_subreg (word_mode, op, mode, byte);
+}
+
+/* Return true if SRC should be moved into DEST using "MULT $0, $0".
+ SPLIT_TYPE is the condition under which moves should be split. */
+
+static bool
+mips_mult_move_p (rtx dest, rtx src, enum mips_split_type split_type)
+{
+ return ((split_type != SPLIT_FOR_SPEED
+ || mips_tuning_info.fast_mult_zero_zero_p)
+ && src == const0_rtx
+ && REG_P (dest)
+ && GET_MODE_SIZE (GET_MODE (dest)) == 2 * UNITS_PER_WORD
+ && (ISA_HAS_DSP_MULT
+ ? ACC_REG_P (REGNO (dest))
+ : MD_REG_P (REGNO (dest))));
+}
+
+/* Return true if a move from SRC to DEST should be split into two.
+ SPLIT_TYPE describes the split condition. */
+
+bool
+mips_split_move_p (rtx dest, rtx src, enum mips_split_type split_type)
+{
+ /* Check whether the move can be done using some variant of MULT $0,$0. */
+ if (mips_mult_move_p (dest, src, split_type))
+ return false;
+
+ /* FPR-to-FPR moves can be done in a single instruction, if they're
+ allowed at all. */
+ unsigned int size = GET_MODE_SIZE (GET_MODE (dest));
+ if (size == 8 && FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
+ return false;
+
+ /* Check for floating-point loads and stores. */
+ if (size == 8 && ISA_HAS_LDC1_SDC1)
+ {
+ if (FP_REG_RTX_P (dest) && MEM_P (src))
+ return false;
+ if (FP_REG_RTX_P (src) && MEM_P (dest))
+ return false;
+ }
+
+ /* Otherwise split all multiword moves. */
+ return size > UNITS_PER_WORD;
+}
+
+/* Split a move from SRC to DEST, given that mips_split_move_p holds.
+ SPLIT_TYPE describes the split condition. */
+
+void
+mips_split_move (rtx dest, rtx src, enum mips_split_type split_type)
+{
+ rtx low_dest;
+
+ gcc_checking_assert (mips_split_move_p (dest, src, split_type));
+ if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
+ {
+ if (!TARGET_64BIT && GET_MODE (dest) == DImode)
+ emit_insn (gen_move_doubleword_fprdi (dest, src));
+ else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
+ emit_insn (gen_move_doubleword_fprdf (dest, src));
+ else if (!TARGET_64BIT && GET_MODE (dest) == V2SFmode)
+ emit_insn (gen_move_doubleword_fprv2sf (dest, src));
+ else if (!TARGET_64BIT && GET_MODE (dest) == V2SImode)
+ emit_insn (gen_move_doubleword_fprv2si (dest, src));
+ else if (!TARGET_64BIT && GET_MODE (dest) == V4HImode)
+ emit_insn (gen_move_doubleword_fprv4hi (dest, src));
+ else if (!TARGET_64BIT && GET_MODE (dest) == V8QImode)
+ emit_insn (gen_move_doubleword_fprv8qi (dest, src));
+ else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
+ emit_insn (gen_move_doubleword_fprtf (dest, src));
+ else
+ gcc_unreachable ();
+ }
+ else if (REG_P (dest) && REGNO (dest) == MD_REG_FIRST)
+ {
+ low_dest = mips_subword (dest, false);
+ mips_emit_move (low_dest, mips_subword (src, false));
+ if (TARGET_64BIT)
+ emit_insn (gen_mthidi_ti (dest, mips_subword (src, true), low_dest));
+ else
+ emit_insn (gen_mthisi_di (dest, mips_subword (src, true), low_dest));
+ }
+ else if (REG_P (src) && REGNO (src) == MD_REG_FIRST)
+ {
+ mips_emit_move (mips_subword (dest, false), mips_subword (src, false));
+ if (TARGET_64BIT)
+ emit_insn (gen_mfhidi_ti (mips_subword (dest, true), src));
+ else
+ emit_insn (gen_mfhisi_di (mips_subword (dest, true), src));
+ }
+ else
+ {
+ /* The operation can be split into two normal moves. Decide in
+ which order to do them. */
+ low_dest = mips_subword (dest, false);
+ if (REG_P (low_dest)
+ && reg_overlap_mentioned_p (low_dest, src))
+ {
+ mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
+ mips_emit_move (low_dest, mips_subword (src, false));
+ }
+ else
+ {
+ mips_emit_move (low_dest, mips_subword (src, false));
+ mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
+ }
+ }
+}
+
+/* Return the split type for instruction INSN. */
+
+static enum mips_split_type
+mips_insn_split_type (rtx insn)
+{
+ basic_block bb = BLOCK_FOR_INSN (insn);
+ if (bb)
+ {
+ if (optimize_bb_for_speed_p (bb))
+ return SPLIT_FOR_SPEED;
+ else
+ return SPLIT_FOR_SIZE;
+ }
+ /* Once CFG information has been removed, we should trust the optimization
+ decisions made by previous passes and only split where necessary. */
+ return SPLIT_IF_NECESSARY;
+}
+
+/* Return true if a move from SRC to DEST in INSN should be split. */
+
+bool
+mips_split_move_insn_p (rtx dest, rtx src, rtx insn)
+{
+ return mips_split_move_p (dest, src, mips_insn_split_type (insn));
+}
+
+/* Split a move from SRC to DEST in INSN, given that mips_split_move_insn_p
+ holds. */
+
+void
+mips_split_move_insn (rtx dest, rtx src, rtx insn)
+{
+ mips_split_move (dest, src, mips_insn_split_type (insn));
+}
+
+/* Return the appropriate instructions to move SRC into DEST. Assume
+ that SRC is operand 1 and DEST is operand 0. */
+
+const char *
+mips_output_move (rtx dest, rtx src)
+{
+ enum rtx_code dest_code, src_code;
+ enum machine_mode mode;
+ enum mips_symbol_type symbol_type;
+ bool dbl_p;
+
+ dest_code = GET_CODE (dest);
+ src_code = GET_CODE (src);
+ mode = GET_MODE (dest);
+ dbl_p = (GET_MODE_SIZE (mode) == 8);
+
+ if (mips_split_move_p (dest, src, SPLIT_IF_NECESSARY))
+ return "#";
+
+ if ((src_code == REG && GP_REG_P (REGNO (src)))
+ || (!TARGET_MIPS16 && src == CONST0_RTX (mode)))
+ {
+ if (dest_code == REG)
+ {
+ if (GP_REG_P (REGNO (dest)))
+ return "move\t%0,%z1";
+
+ if (mips_mult_move_p (dest, src, SPLIT_IF_NECESSARY))
+ {
+ if (ISA_HAS_DSP_MULT)
+ return "mult\t%q0,%.,%.";
+ else
+ return "mult\t%.,%.";
+ }
+
+ /* Moves to HI are handled by special .md insns. */
+ if (REGNO (dest) == LO_REGNUM)
+ return "mtlo\t%z1";
+
+ if (DSP_ACC_REG_P (REGNO (dest)))
+ {
+ static char retval[] = "mt__\t%z1,%q0";
+
+ retval[2] = reg_names[REGNO (dest)][4];
+ retval[3] = reg_names[REGNO (dest)][5];
+ return retval;
+ }
+
+ if (FP_REG_P (REGNO (dest)))
+ return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
+
+ if (ALL_COP_REG_P (REGNO (dest)))
+ {
+ static char retval[] = "dmtc_\t%z1,%0";
+
+ retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
+ return dbl_p ? retval : retval + 1;
+ }
+ }
+ if (dest_code == MEM)
+ switch (GET_MODE_SIZE (mode))
+ {
+ case 1: return "sb\t%z1,%0";
+ case 2: return "sh\t%z1,%0";
+ case 4: return "sw\t%z1,%0";
+ case 8: return "sd\t%z1,%0";
+ }
+ }
+ if (dest_code == REG && GP_REG_P (REGNO (dest)))
+ {
+ if (src_code == REG)
+ {
+ /* Moves from HI are handled by special .md insns. */
+ if (REGNO (src) == LO_REGNUM)
+ {
+ /* When generating VR4120 or VR4130 code, we use MACC and
+ DMACC instead of MFLO. This avoids both the normal
+ MIPS III HI/LO hazards and the errata related to
+ -mfix-vr4130. */
+ if (ISA_HAS_MACCHI)
+ return dbl_p ? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
+ return "mflo\t%0";
+ }
+
+ if (DSP_ACC_REG_P (REGNO (src)))
+ {
+ static char retval[] = "mf__\t%0,%q1";
+
+ retval[2] = reg_names[REGNO (src)][4];
+ retval[3] = reg_names[REGNO (src)][5];
+ return retval;
+ }
+
+ if (FP_REG_P (REGNO (src)))
+ return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
+
+ if (ALL_COP_REG_P (REGNO (src)))
+ {
+ static char retval[] = "dmfc_\t%0,%1";
+
+ retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
+ return dbl_p ? retval : retval + 1;
+ }
+ }
+
+ if (src_code == MEM)
+ switch (GET_MODE_SIZE (mode))
+ {
+ case 1: return "lbu\t%0,%1";
+ case 2: return "lhu\t%0,%1";
+ case 4: return "lw\t%0,%1";
+ case 8: return "ld\t%0,%1";
+ }
+
+ if (src_code == CONST_INT)
+ {
+ /* Don't use the X format for the operand itself, because that
+ will give out-of-range numbers for 64-bit hosts and 32-bit
+ targets. */
+ if (!TARGET_MIPS16)
+ return "li\t%0,%1\t\t\t# %X1";
+
+ if (SMALL_OPERAND_UNSIGNED (INTVAL (src)))
+ return "li\t%0,%1";
+
+ if (SMALL_OPERAND_UNSIGNED (-INTVAL (src)))
+ return "#";
+ }
+
+ if (src_code == HIGH)
+ return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
+
+ if (CONST_GP_P (src))
+ return "move\t%0,%1";
+
+ if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
+ && mips_lo_relocs[symbol_type] != 0)
+ {
+ /* A signed 16-bit constant formed by applying a relocation
+ operator to a symbolic address. */
+ gcc_assert (!mips_split_p[symbol_type]);
+ return "li\t%0,%R1";
+ }
+
+ if (symbolic_operand (src, VOIDmode))
+ {
+ gcc_assert (TARGET_MIPS16
+ ? TARGET_MIPS16_TEXT_LOADS
+ : !TARGET_EXPLICIT_RELOCS);
+ return dbl_p ? "dla\t%0,%1" : "la\t%0,%1";
+ }
+ }
+ if (src_code == REG && FP_REG_P (REGNO (src)))
+ {
+ if (dest_code == REG && FP_REG_P (REGNO (dest)))
+ {
+ if (GET_MODE (dest) == V2SFmode)
+ return "mov.ps\t%0,%1";
+ else
+ return dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1";
+ }
+
+ if (dest_code == MEM)
+ return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
+ }
+ if (dest_code == REG && FP_REG_P (REGNO (dest)))
+ {
+ if (src_code == MEM)
+ return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
+ }
+ if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
+ {
+ static char retval[] = "l_c_\t%0,%1";
+
+ retval[1] = (dbl_p ? 'd' : 'w');
+ retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
+ return retval;
+ }
+ if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
+ {
+ static char retval[] = "s_c_\t%1,%0";
+
+ retval[1] = (dbl_p ? 'd' : 'w');
+ retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
+ return retval;
+ }
+ gcc_unreachable ();
+}
+
+/* Return true if CMP1 is a suitable second operand for integer ordering
+ test CODE. See also the *sCC patterns in mips.md. */
+
+static bool
+mips_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
+{
+ switch (code)
+ {
+ case GT:
+ case GTU:
+ return reg_or_0_operand (cmp1, VOIDmode);
+
+ case GE:
+ case GEU:
+ return !TARGET_MIPS16 && cmp1 == const1_rtx;
+
+ case LT:
+ case LTU:
+ return arith_operand (cmp1, VOIDmode);
+
+ case LE:
+ return sle_operand (cmp1, VOIDmode);
+
+ case LEU:
+ return sleu_operand (cmp1, VOIDmode);
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return true if *CMP1 (of mode MODE) is a valid second operand for
+ integer ordering test *CODE, or if an equivalent combination can
+ be formed by adjusting *CODE and *CMP1. When returning true, update
+ *CODE and *CMP1 with the chosen code and operand, otherwise leave
+ them alone. */
+
+static bool
+mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
+ enum machine_mode mode)
+{
+ HOST_WIDE_INT plus_one;
+
+ if (mips_int_order_operand_ok_p (*code, *cmp1))
+ return true;
+
+ if (CONST_INT_P (*cmp1))
+ switch (*code)
+ {
+ case LE:
+ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
+ if (INTVAL (*cmp1) < plus_one)
+ {
+ *code = LT;
+ *cmp1 = force_reg (mode, GEN_INT (plus_one));
+ return true;
+ }
+ break;
+
+ case LEU:
+ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
+ if (plus_one != 0)
+ {
+ *code = LTU;
+ *cmp1 = force_reg (mode, GEN_INT (plus_one));
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return false;
+}
+
+/* Compare CMP0 and CMP1 using ordering test CODE and store the result
+ in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
+ is nonnull, it's OK to set TARGET to the inverse of the result and
+ flip *INVERT_PTR instead. */
+
+static void
+mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
+ rtx target, rtx cmp0, rtx cmp1)
+{
+ enum machine_mode mode;
+
+ /* First see if there is a MIPS instruction that can do this operation.
+ If not, try doing the same for the inverse operation. If that also
+ fails, force CMP1 into a register and try again. */
+ mode = GET_MODE (cmp0);
+ if (mips_canonicalize_int_order_test (&code, &cmp1, mode))
+ mips_emit_binary (code, target, cmp0, cmp1);
+ else
+ {
+ enum rtx_code inv_code = reverse_condition (code);
+ if (!mips_canonicalize_int_order_test (&inv_code, &cmp1, mode))
+ {
+ cmp1 = force_reg (mode, cmp1);
+ mips_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
+ }
+ else if (invert_ptr == 0)
+ {
+ rtx inv_target;
+
+ inv_target = mips_force_binary (GET_MODE (target),
+ inv_code, cmp0, cmp1);
+ mips_emit_binary (XOR, target, inv_target, const1_rtx);
+ }
+ else
+ {
+ *invert_ptr = !*invert_ptr;
+ mips_emit_binary (inv_code, target, cmp0, cmp1);
+ }
+ }
+}
+
+/* Return a register that is zero iff CMP0 and CMP1 are equal.
+ The register will have the same mode as CMP0. */
+
+static rtx
+mips_zero_if_equal (rtx cmp0, rtx cmp1)
+{
+ if (cmp1 == const0_rtx)
+ return cmp0;
+
+ if (uns_arith_operand (cmp1, VOIDmode))
+ return expand_binop (GET_MODE (cmp0), xor_optab,
+ cmp0, cmp1, 0, 0, OPTAB_DIRECT);
+
+ return expand_binop (GET_MODE (cmp0), sub_optab,
+ cmp0, cmp1, 0, 0, OPTAB_DIRECT);
+}
+
+/* Convert *CODE into a code that can be used in a floating-point
+ scc instruction (C.cond.fmt). Return true if the values of
+ the condition code registers will be inverted, with 0 indicating
+ that the condition holds. */
+
+static bool
+mips_reversed_fp_cond (enum rtx_code *code)
+{
+ switch (*code)
+ {
+ case NE:
+ case LTGT:
+ case ORDERED:
+ *code = reverse_condition_maybe_unordered (*code);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* Allocate a floating-point condition-code register of mode MODE.
+
+ These condition code registers are used for certain kinds
+ of compound operation, such as compare and branches, vconds,
+ and built-in functions. At expand time, their use is entirely
+ controlled by MIPS-specific code and is entirely internal
+ to these compound operations.
+
+ We could (and did in the past) expose condition-code values
+ as pseudo registers and leave the register allocator to pick
+ appropriate registers. The problem is that it is not practically
+ possible for the rtl optimizers to guarantee that no spills will
+ be needed, even when AVOID_CCMODE_COPIES is defined. We would
+ therefore need spill and reload sequences to handle the worst case.
+
+ Although such sequences do exist, they are very expensive and are
+ not something we'd want to use. This is especially true of CCV2 and
+ CCV4, where all the shuffling would greatly outweigh whatever benefit
+ the vectorization itself provides.
+
+ The main benefit of having more than one condition-code register
+ is to allow the pipelining of operations, especially those involving
+ comparisons and conditional moves. We don't really expect the
+ registers to be live for long periods, and certainly never want
+ them to be live across calls.
+
+ Also, there should be no penalty attached to using all the available
+ registers. They are simply bits in the same underlying FPU control
+ register.
+
+ We therefore expose the hardware registers from the outset and use
+ a simple round-robin allocation scheme. */
+
+static rtx
+mips_allocate_fcc (enum machine_mode mode)
+{
+ unsigned int regno, count;
+
+ gcc_assert (TARGET_HARD_FLOAT && ISA_HAS_8CC);
+
+ if (mode == CCmode)
+ count = 1;
+ else if (mode == CCV2mode)
+ count = 2;
+ else if (mode == CCV4mode)
+ count = 4;
+ else
+ gcc_unreachable ();
+
+ cfun->machine->next_fcc += -cfun->machine->next_fcc & (count - 1);
+ if (cfun->machine->next_fcc > ST_REG_LAST - ST_REG_FIRST)
+ cfun->machine->next_fcc = 0;
+ regno = ST_REG_FIRST + cfun->machine->next_fcc;
+ cfun->machine->next_fcc += count;
+ return gen_rtx_REG (mode, regno);
+}
+
+/* Convert a comparison into something that can be used in a branch or
+ conditional move. On entry, *OP0 and *OP1 are the values being
+ compared and *CODE is the code used to compare them.
+
+ Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
+ If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
+ otherwise any standard branch condition can be used. The standard branch
+ conditions are:
+
+ - EQ or NE between two registers.
+ - any comparison between a register and zero. */
+
+static void
+mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
+{
+ rtx cmp_op0 = *op0;
+ rtx cmp_op1 = *op1;
+
+ if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT)
+ {
+ if (!need_eq_ne_p && *op1 == const0_rtx)
+ ;
+ else if (*code == EQ || *code == NE)
+ {
+ if (need_eq_ne_p)
+ {
+ *op0 = mips_zero_if_equal (cmp_op0, cmp_op1);
+ *op1 = const0_rtx;
+ }
+ else
+ *op1 = force_reg (GET_MODE (cmp_op0), cmp_op1);
+ }
+ else
+ {
+ /* The comparison needs a separate scc instruction. Store the
+ result of the scc in *OP0 and compare it against zero. */
+ bool invert = false;
+ *op0 = gen_reg_rtx (GET_MODE (cmp_op0));
+ mips_emit_int_order_test (*code, &invert, *op0, cmp_op0, cmp_op1);
+ *code = (invert ? EQ : NE);
+ *op1 = const0_rtx;
+ }
+ }
+ else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_op0)))
+ {
+ *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
+ mips_emit_binary (*code, *op0, cmp_op0, cmp_op1);
+ *code = NE;
+ *op1 = const0_rtx;
+ }
+ else
+ {
+ enum rtx_code cmp_code;
+
+ /* Floating-point tests use a separate C.cond.fmt comparison to
+ set a condition code register. The branch or conditional move
+ will then compare that register against zero.
+
+ Set CMP_CODE to the code of the comparison instruction and
+ *CODE to the code that the branch or move should use. */
+ cmp_code = *code;
+ *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
+ *op0 = (ISA_HAS_8CC
+ ? mips_allocate_fcc (CCmode)
+ : gen_rtx_REG (CCmode, FPSW_REGNUM));
+ *op1 = const0_rtx;
+ mips_emit_binary (cmp_code, *op0, cmp_op0, cmp_op1);
+ }
+}
+
+/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
+ and OPERAND[3]. Store the result in OPERANDS[0].
+
+ On 64-bit targets, the mode of the comparison and target will always be
+ SImode, thus possibly narrower than that of the comparison's operands. */
+
+void
+mips_expand_scc (rtx operands[])
+{
+ rtx target = operands[0];
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx op0 = operands[2];
+ rtx op1 = operands[3];
+
+ gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT);
+
+ if (code == EQ || code == NE)
+ {
+ if (ISA_HAS_SEQ_SNE
+ && reg_imm10_operand (op1, GET_MODE (op1)))
+ mips_emit_binary (code, target, op0, op1);
+ else
+ {
+ rtx zie = mips_zero_if_equal (op0, op1);
+ mips_emit_binary (code, target, zie, const0_rtx);
+ }
+ }
+ else
+ mips_emit_int_order_test (code, 0, target, op0, op1);
+}
+
+/* Compare OPERANDS[1] with OPERANDS[2] using comparison code
+ CODE and jump to OPERANDS[3] if the condition holds. */
+
+void
+mips_expand_conditional_branch (rtx *operands)
+{
+ enum rtx_code code = GET_CODE (operands[0]);
+ rtx op0 = operands[1];
+ rtx op1 = operands[2];
+ rtx condition;
+
+ mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
+ condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
+ emit_jump_insn (gen_condjump (condition, operands[3]));
+}
+
+/* Implement:
+
+ (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
+ (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
+
+void
+mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
+ enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
+{
+ rtx cmp_result;
+ bool reversed_p;
+
+ reversed_p = mips_reversed_fp_cond (&cond);
+ cmp_result = mips_allocate_fcc (CCV2mode);
+ emit_insn (gen_scc_ps (cmp_result,
+ gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
+ if (reversed_p)
+ emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
+ cmp_result));
+ else
+ emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
+ cmp_result));
+}
+
+/* Perform the comparison in OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0]
+ if the condition holds, otherwise move OPERANDS[3] into OPERANDS[0]. */
+
+void
+mips_expand_conditional_move (rtx *operands)
+{
+ rtx cond;
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx op0 = XEXP (operands[1], 0);
+ rtx op1 = XEXP (operands[1], 1);
+
+ mips_emit_compare (&code, &op0, &op1, true);
+ cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
+ operands[2], operands[3])));
+}
+
+/* Perform the comparison in COMPARISON, then trap if the condition holds. */
+
+void
+mips_expand_conditional_trap (rtx comparison)
+{
+ rtx op0, op1;
+ enum machine_mode mode;
+ enum rtx_code code;
+
+ /* MIPS conditional trap instructions don't have GT or LE flavors,
+ so we must swap the operands and convert to LT and GE respectively. */
+ code = GET_CODE (comparison);
+ switch (code)
+ {
+ case GT:
+ case LE:
+ case GTU:
+ case LEU:
+ code = swap_condition (code);
+ op0 = XEXP (comparison, 1);
+ op1 = XEXP (comparison, 0);
+ break;
+
+ default:
+ op0 = XEXP (comparison, 0);
+ op1 = XEXP (comparison, 1);
+ break;
+ }
+
+ mode = GET_MODE (XEXP (comparison, 0));
+ op0 = force_reg (mode, op0);
+ if (!arith_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ emit_insn (gen_rtx_TRAP_IF (VOIDmode,
+ gen_rtx_fmt_ee (code, mode, op0, op1),
+ const0_rtx));
+}
+
+/* Initialize *CUM for a call to a function of type FNTYPE. */
+
+void
+mips_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype)
+{
+ memset (cum, 0, sizeof (*cum));
+ cum->prototype = (fntype && prototype_p (fntype));
+ cum->gp_reg_found = (cum->prototype && stdarg_p (fntype));
+}
+
+/* Fill INFO with information about a single argument. CUM is the
+ cumulative state for earlier arguments. MODE is the mode of this
+ argument and TYPE is its type (if known). NAMED is true if this
+ is a named (fixed) argument rather than a variable one. */
+
+static void
+mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
+ enum machine_mode mode, const_tree type, bool named)
+{
+ bool doubleword_aligned_p;
+ unsigned int num_bytes, num_words, max_regs;
+
+ /* Work out the size of the argument. */
+ num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
+ num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+ /* Decide whether it should go in a floating-point register, assuming
+ one is free. Later code checks for availability.
+
+ The checks against UNITS_PER_FPVALUE handle the soft-float and
+ single-float cases. */
+ switch (mips_abi)
+ {
+ case ABI_EABI:
+ /* The EABI conventions have traditionally been defined in terms
+ of TYPE_MODE, regardless of the actual type. */
+ info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
+ || mode == V2SFmode)
+ && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
+ break;
+
+ case ABI_32:
+ case ABI_O64:
+ /* Only leading floating-point scalars are passed in
+ floating-point registers. We also handle vector floats the same
+ say, which is OK because they are not covered by the standard ABI. */
+ info->fpr_p = (!cum->gp_reg_found
+ && cum->arg_number < 2
+ && (type == 0
+ || SCALAR_FLOAT_TYPE_P (type)
+ || VECTOR_FLOAT_TYPE_P (type))
+ && (GET_MODE_CLASS (mode) == MODE_FLOAT
+ || mode == V2SFmode)
+ && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
+ break;
+
+ case ABI_N32:
+ case ABI_64:
+ /* Scalar, complex and vector floating-point types are passed in
+ floating-point registers, as long as this is a named rather
+ than a variable argument. */
+ info->fpr_p = (named
+ && (type == 0 || FLOAT_TYPE_P (type))
+ && (GET_MODE_CLASS (mode) == MODE_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+ || mode == V2SFmode)
+ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
+
+ /* ??? According to the ABI documentation, the real and imaginary
+ parts of complex floats should be passed in individual registers.
+ The real and imaginary parts of stack arguments are supposed
+ to be contiguous and there should be an extra word of padding
+ at the end.
+
+ This has two problems. First, it makes it impossible to use a
+ single "void *" va_list type, since register and stack arguments
+ are passed differently. (At the time of writing, MIPSpro cannot
+ handle complex float varargs correctly.) Second, it's unclear
+ what should happen when there is only one register free.
+
+ For now, we assume that named complex floats should go into FPRs
+ if there are two FPRs free, otherwise they should be passed in the
+ same way as a struct containing two floats. */
+ if (info->fpr_p
+ && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+ && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
+ {
+ if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
+ info->fpr_p = false;
+ else
+ num_words = 2;
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* See whether the argument has doubleword alignment. */
+ doubleword_aligned_p = (mips_function_arg_boundary (mode, type)
+ > BITS_PER_WORD);
+
+ /* Set REG_OFFSET to the register count we're interested in.
+ The EABI allocates the floating-point registers separately,
+ but the other ABIs allocate them like integer registers. */
+ info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
+ ? cum->num_fprs
+ : cum->num_gprs);
+
+ /* Advance to an even register if the argument is doubleword-aligned. */
+ if (doubleword_aligned_p)
+ info->reg_offset += info->reg_offset & 1;
+
+ /* Work out the offset of a stack argument. */
+ info->stack_offset = cum->stack_words;
+ if (doubleword_aligned_p)
+ info->stack_offset += info->stack_offset & 1;
+
+ max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
+
+ /* Partition the argument between registers and stack. */
+ info->reg_words = MIN (num_words, max_regs);
+ info->stack_words = num_words - info->reg_words;
+}
+
+/* INFO describes a register argument that has the normal format for the
+ argument's mode. Return the register it uses, assuming that FPRs are
+ available if HARD_FLOAT_P. */
+
+static unsigned int
+mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
+{
+ if (!info->fpr_p || !hard_float_p)
+ return GP_ARG_FIRST + info->reg_offset;
+ else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
+ /* In o32, the second argument is always passed in $f14
+ for TARGET_DOUBLE_FLOAT, regardless of whether the
+ first argument was a word or doubleword. */
+ return FP_ARG_FIRST + 2;
+ else
+ return FP_ARG_FIRST + info->reg_offset;
+}
+
+/* Implement TARGET_STRICT_ARGUMENT_NAMING. */
+
+static bool
+mips_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
+{
+ return !TARGET_OLDABI;
+}
+
+/* Implement TARGET_FUNCTION_ARG. */
+
+static rtx
+mips_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
+ const_tree type, bool named)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+ struct mips_arg_info info;
+
+ /* We will be called with a mode of VOIDmode after the last argument
+ has been seen. Whatever we return will be passed to the call expander.
+ If we need a MIPS16 fp_code, return a REG with the code stored as
+ the mode. */
+ if (mode == VOIDmode)
+ {
+ if (TARGET_MIPS16 && cum->fp_code != 0)
+ return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
+ else
+ return NULL;
+ }
+
+ mips_get_arg_info (&info, cum, mode, type, named);
+
+ /* Return straight away if the whole argument is passed on the stack. */
+ if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
+ return NULL;
+
+ /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
+ contains a double in its entirety, then that 64-bit chunk is passed
+ in a floating-point register. */
+ if (TARGET_NEWABI
+ && TARGET_HARD_FLOAT
+ && named
+ && type != 0
+ && TREE_CODE (type) == RECORD_TYPE
+ && TYPE_SIZE_UNIT (type)
+ && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
+ {
+ tree field;
+
+ /* First check to see if there is any such field. */
+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
+ if (TREE_CODE (field) == FIELD_DECL
+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
+ && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
+ && tree_fits_shwi_p (bit_position (field))
+ && int_bit_position (field) % BITS_PER_WORD == 0)
+ break;
+
+ if (field != 0)
+ {
+ /* Now handle the special case by returning a PARALLEL
+ indicating where each 64-bit chunk goes. INFO.REG_WORDS
+ chunks are passed in registers. */
+ unsigned int i;
+ HOST_WIDE_INT bitpos;
+ rtx ret;
+
+ /* assign_parms checks the mode of ENTRY_PARM, so we must
+ use the actual mode here. */
+ ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
+
+ bitpos = 0;
+ field = TYPE_FIELDS (type);
+ for (i = 0; i < info.reg_words; i++)
+ {
+ rtx reg;
+
+ for (; field; field = DECL_CHAIN (field))
+ if (TREE_CODE (field) == FIELD_DECL
+ && int_bit_position (field) >= bitpos)
+ break;
+
+ if (field
+ && int_bit_position (field) == bitpos
+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
+ && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
+ reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
+ else
+ reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
+
+ XVECEXP (ret, 0, i)
+ = gen_rtx_EXPR_LIST (VOIDmode, reg,
+ GEN_INT (bitpos / BITS_PER_UNIT));
+
+ bitpos += BITS_PER_WORD;
+ }
+ return ret;
+ }
+ }
+
+ /* Handle the n32/n64 conventions for passing complex floating-point
+ arguments in FPR pairs. The real part goes in the lower register
+ and the imaginary part goes in the upper register. */
+ if (TARGET_NEWABI
+ && info.fpr_p
+ && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ {
+ rtx real, imag;
+ enum machine_mode inner;
+ unsigned int regno;
+
+ inner = GET_MODE_INNER (mode);
+ regno = FP_ARG_FIRST + info.reg_offset;
+ if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
+ {
+ /* Real part in registers, imaginary part on stack. */
+ gcc_assert (info.stack_words == info.reg_words);
+ return gen_rtx_REG (inner, regno);
+ }
+ else
+ {
+ gcc_assert (info.stack_words == 0);
+ real = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (inner, regno),
+ const0_rtx);
+ imag = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (inner,
+ regno + info.reg_words / 2),
+ GEN_INT (GET_MODE_SIZE (inner)));
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
+ }
+ }
+
+ return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
+}
+
+/* Implement TARGET_FUNCTION_ARG_ADVANCE. */
+
+static void
+mips_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
+ const_tree type, bool named)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+ struct mips_arg_info info;
+
+ mips_get_arg_info (&info, cum, mode, type, named);
+
+ if (!info.fpr_p)
+ cum->gp_reg_found = true;
+
+ /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
+ an explanation of what this code does. It assumes that we're using
+ either the o32 or the o64 ABI, both of which pass at most 2 arguments
+ in FPRs. */
+ if (cum->arg_number < 2 && info.fpr_p)
+ cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
+
+ /* Advance the register count. This has the effect of setting
+ num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
+ argument required us to skip the final GPR and pass the whole
+ argument on the stack. */
+ if (mips_abi != ABI_EABI || !info.fpr_p)
+ cum->num_gprs = info.reg_offset + info.reg_words;
+ else if (info.reg_words > 0)
+ cum->num_fprs += MAX_FPRS_PER_FMT;
+
+ /* Advance the stack word count. */
+ if (info.stack_words > 0)
+ cum->stack_words = info.stack_offset + info.stack_words;
+
+ cum->arg_number++;
+}
+
+/* Implement TARGET_ARG_PARTIAL_BYTES. */
+
+static int
+mips_arg_partial_bytes (cumulative_args_t cum,
+ enum machine_mode mode, tree type, bool named)
+{
+ struct mips_arg_info info;
+
+ mips_get_arg_info (&info, get_cumulative_args (cum), mode, type, named);
+ return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
+}
+
+/* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
+ least PARM_BOUNDARY bits of alignment, but will be given anything up
+ to STACK_BOUNDARY bits if the type requires it. */
+
+static unsigned int
+mips_function_arg_boundary (enum machine_mode mode, const_tree type)
+{
+ unsigned int alignment;
+
+ alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
+ if (alignment < PARM_BOUNDARY)
+ alignment = PARM_BOUNDARY;
+ if (alignment > STACK_BOUNDARY)
+ alignment = STACK_BOUNDARY;
+ return alignment;
+}
+
+/* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
+ upward rather than downward. In other words, return true if the
+ first byte of the stack slot has useful data, false if the last
+ byte does. */
+
+bool
+mips_pad_arg_upward (enum machine_mode mode, const_tree type)
+{
+ /* On little-endian targets, the first byte of every stack argument
+ is passed in the first byte of the stack slot. */
+ if (!BYTES_BIG_ENDIAN)
+ return true;
+
+ /* Otherwise, integral types are padded downward: the last byte of a
+ stack argument is passed in the last byte of the stack slot. */
+ if (type != 0
+ ? (INTEGRAL_TYPE_P (type)
+ || POINTER_TYPE_P (type)
+ || FIXED_POINT_TYPE_P (type))
+ : (SCALAR_INT_MODE_P (mode)
+ || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
+ return false;
+
+ /* Big-endian o64 pads floating-point arguments downward. */
+ if (mips_abi == ABI_O64)
+ if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return false;
+
+ /* Other types are padded upward for o32, o64, n32 and n64. */
+ if (mips_abi != ABI_EABI)
+ return true;
+
+ /* Arguments smaller than a stack slot are padded downward. */
+ if (mode != BLKmode)
+ return GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY;
+ else
+ return int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT);
+}
+
+/* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
+ if the least significant byte of the register has useful data. Return
+ the opposite if the most significant byte does. */
+
+bool
+mips_pad_reg_upward (enum machine_mode mode, tree type)
+{
+ /* No shifting is required for floating-point arguments. */
+ if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return !BYTES_BIG_ENDIAN;
+
+ /* Otherwise, apply the same padding to register arguments as we do
+ to stack arguments. */
+ return mips_pad_arg_upward (mode, type);
+}
+
+/* Return nonzero when an argument must be passed by reference. */
+
+static bool
+mips_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode, const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ if (mips_abi == ABI_EABI)
+ {
+ int size;
+
+ /* ??? How should SCmode be handled? */
+ if (mode == DImode || mode == DFmode
+ || mode == DQmode || mode == UDQmode
+ || mode == DAmode || mode == UDAmode)
+ return 0;
+
+ size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
+ return size == -1 || size > UNITS_PER_WORD;
+ }
+ else
+ {
+ /* If we have a variable-sized parameter, we have no choice. */
+ return targetm.calls.must_pass_in_stack (mode, type);
+ }
+}
+
+/* Implement TARGET_CALLEE_COPIES. */
+
+static bool
+mips_callee_copies (cumulative_args_t cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_tree type ATTRIBUTE_UNUSED, bool named)
+{
+ return mips_abi == ABI_EABI && named;
+}
+
+/* See whether VALTYPE is a record whose fields should be returned in
+ floating-point registers. If so, return the number of fields and
+ list them in FIELDS (which should have two elements). Return 0
+ otherwise.
+
+ For n32 & n64, a structure with one or two fields is returned in
+ floating-point registers as long as every field has a floating-point
+ type. */
+
+static int
+mips_fpr_return_fields (const_tree valtype, tree *fields)
+{
+ tree field;
+ int i;
+
+ if (!TARGET_NEWABI)
+ return 0;
+
+ if (TREE_CODE (valtype) != RECORD_TYPE)
+ return 0;
+
+ i = 0;
+ for (field = TYPE_FIELDS (valtype); field != 0; field = DECL_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
+ return 0;
+
+ if (i == 2)
+ return 0;
+
+ fields[i++] = field;
+ }
+ return i;
+}
+
+/* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
+ a value in the most significant part of $2/$3 if:
+
+ - the target is big-endian;
+
+ - the value has a structure or union type (we generalize this to
+ cover aggregates from other languages too); and
+
+ - the structure is not returned in floating-point registers. */
+
+static bool
+mips_return_in_msb (const_tree valtype)
+{
+ tree fields[2];
+
+ return (TARGET_NEWABI
+ && TARGET_BIG_ENDIAN
+ && AGGREGATE_TYPE_P (valtype)
+ && mips_fpr_return_fields (valtype, fields) == 0);
+}
+
+/* Return true if the function return value MODE will get returned in a
+ floating-point register. */
+
+static bool
+mips_return_mode_in_fpr_p (enum machine_mode mode)
+{
+ return ((GET_MODE_CLASS (mode) == MODE_FLOAT
+ || mode == V2SFmode
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
+}
+
+/* Return the representation of an FPR return register when the
+ value being returned in FP_RETURN has mode VALUE_MODE and the
+ return type itself has mode TYPE_MODE. On NewABI targets,
+ the two modes may be different for structures like:
+
+ struct __attribute__((packed)) foo { float f; }
+
+ where we return the SFmode value of "f" in FP_RETURN, but where
+ the structure itself has mode BLKmode. */
+
+static rtx
+mips_return_fpr_single (enum machine_mode type_mode,
+ enum machine_mode value_mode)
+{
+ rtx x;
+
+ x = gen_rtx_REG (value_mode, FP_RETURN);
+ if (type_mode != value_mode)
+ {
+ x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
+ x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
+ }
+ return x;
+}
+
+/* Return a composite value in a pair of floating-point registers.
+ MODE1 and OFFSET1 are the mode and byte offset for the first value,
+ likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
+ complete value.
+
+ For n32 & n64, $f0 always holds the first value and $f2 the second.
+ Otherwise the values are packed together as closely as possible. */
+
+static rtx
+mips_return_fpr_pair (enum machine_mode mode,
+ enum machine_mode mode1, HOST_WIDE_INT offset1,
+ enum machine_mode mode2, HOST_WIDE_INT offset2)
+{
+ int inc;
+
+ inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
+ return gen_rtx_PARALLEL
+ (mode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode1, FP_RETURN),
+ GEN_INT (offset1)),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode2, FP_RETURN + inc),
+ GEN_INT (offset2))));
+
+}
+
+/* Implement TARGET_FUNCTION_VALUE and TARGET_LIBCALL_VALUE.
+ For normal calls, VALTYPE is the return type and MODE is VOIDmode.
+ For libcalls, VALTYPE is null and MODE is the mode of the return value. */
+
+static rtx
+mips_function_value_1 (const_tree valtype, const_tree fn_decl_or_type,
+ enum machine_mode mode)
+{
+ if (valtype)
+ {
+ tree fields[2];
+ int unsigned_p;
+ const_tree func;
+
+ if (fn_decl_or_type && DECL_P (fn_decl_or_type))
+ func = fn_decl_or_type;
+ else
+ func = NULL;
+
+ mode = TYPE_MODE (valtype);
+ unsigned_p = TYPE_UNSIGNED (valtype);
+
+ /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
+ return values, promote the mode here too. */
+ mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
+
+ /* Handle structures whose fields are returned in $f0/$f2. */
+ switch (mips_fpr_return_fields (valtype, fields))
+ {
+ case 1:
+ return mips_return_fpr_single (mode,
+ TYPE_MODE (TREE_TYPE (fields[0])));
+
+ case 2:
+ return mips_return_fpr_pair (mode,
+ TYPE_MODE (TREE_TYPE (fields[0])),
+ int_byte_position (fields[0]),
+ TYPE_MODE (TREE_TYPE (fields[1])),
+ int_byte_position (fields[1]));
+ }
+
+ /* If a value is passed in the most significant part of a register, see
+ whether we have to round the mode up to a whole number of words. */
+ if (mips_return_in_msb (valtype))
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (valtype);
+ if (size % UNITS_PER_WORD != 0)
+ {
+ size += UNITS_PER_WORD - size % UNITS_PER_WORD;
+ mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+ }
+ }
+
+ /* For EABI, the class of return register depends entirely on MODE.
+ For example, "struct { some_type x; }" and "union { some_type x; }"
+ are returned in the same way as a bare "some_type" would be.
+ Other ABIs only use FPRs for scalar, complex or vector types. */
+ if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
+ return gen_rtx_REG (mode, GP_RETURN);
+ }
+
+ if (!TARGET_MIPS16)
+ {
+ /* Handle long doubles for n32 & n64. */
+ if (mode == TFmode)
+ return mips_return_fpr_pair (mode,
+ DImode, 0,
+ DImode, GET_MODE_SIZE (mode) / 2);
+
+ if (mips_return_mode_in_fpr_p (mode))
+ {
+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ return mips_return_fpr_pair (mode,
+ GET_MODE_INNER (mode), 0,
+ GET_MODE_INNER (mode),
+ GET_MODE_SIZE (mode) / 2);
+ else
+ return gen_rtx_REG (mode, FP_RETURN);
+ }
+ }
+
+ return gen_rtx_REG (mode, GP_RETURN);
+}
+
+/* Implement TARGET_FUNCTION_VALUE. */
+
+static rtx
+mips_function_value (const_tree valtype, const_tree fn_decl_or_type,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ return mips_function_value_1 (valtype, fn_decl_or_type, VOIDmode);
+}
+
+/* Implement TARGET_LIBCALL_VALUE. */
+
+static rtx
+mips_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
+{
+ return mips_function_value_1 (NULL_TREE, NULL_TREE, mode);
+}
+
+/* Implement TARGET_FUNCTION_VALUE_REGNO_P.
+
+ On the MIPS, R2 R3 and F0 F2 are the only register thus used.
+ Currently, R2 and F0 are only implemented here (C has no complex type). */
+
+static bool
+mips_function_value_regno_p (const unsigned int regno)
+{
+ if (regno == GP_RETURN
+ || regno == FP_RETURN
+ || (LONG_DOUBLE_TYPE_SIZE == 128
+ && FP_RETURN != GP_RETURN
+ && regno == FP_RETURN + 2))
+ return true;
+
+ return false;
+}
+
+/* Implement TARGET_RETURN_IN_MEMORY. Under the o32 and o64 ABIs,
+ all BLKmode objects are returned in memory. Under the n32, n64
+ and embedded ABIs, small structures are returned in a register.
+ Objects with varying size must still be returned in memory, of
+ course. */
+
+static bool
+mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
+{
+ return (TARGET_OLDABI
+ ? TYPE_MODE (type) == BLKmode
+ : !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD));
+}
+
+/* Implement TARGET_SETUP_INCOMING_VARARGS. */
+
+static void
+mips_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
+ tree type, int *pretend_size ATTRIBUTE_UNUSED,
+ int no_rtl)
+{
+ CUMULATIVE_ARGS local_cum;
+ int gp_saved, fp_saved;
+
+ /* The caller has advanced CUM up to, but not beyond, the last named
+ argument. Advance a local copy of CUM past the last "real" named
+ argument, to find out how many registers are left over. */
+ local_cum = *get_cumulative_args (cum);
+ mips_function_arg_advance (pack_cumulative_args (&local_cum), mode, type,
+ true);
+
+ /* Found out how many registers we need to save. */
+ gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
+ fp_saved = (EABI_FLOAT_VARARGS_P
+ ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
+ : 0);
+
+ if (!no_rtl)
+ {
+ if (gp_saved > 0)
+ {
+ rtx ptr, mem;
+
+ ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
+ REG_PARM_STACK_SPACE (cfun->decl)
+ - gp_saved * UNITS_PER_WORD);
+ mem = gen_frame_mem (BLKmode, ptr);
+ set_mem_alias_set (mem, get_varargs_alias_set ());
+
+ move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
+ mem, gp_saved);
+ }
+ if (fp_saved > 0)
+ {
+ /* We can't use move_block_from_reg, because it will use
+ the wrong mode. */
+ enum machine_mode mode;
+ int off, i;
+
+ /* Set OFF to the offset from virtual_incoming_args_rtx of
+ the first float register. The FP save area lies below
+ the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
+ off = (-gp_saved * UNITS_PER_WORD) & -UNITS_PER_FPVALUE;
+ off -= fp_saved * UNITS_PER_FPREG;
+
+ mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
+
+ for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
+ i += MAX_FPRS_PER_FMT)
+ {
+ rtx ptr, mem;
+
+ ptr = plus_constant (Pmode, virtual_incoming_args_rtx, off);
+ mem = gen_frame_mem (mode, ptr);
+ set_mem_alias_set (mem, get_varargs_alias_set ());
+ mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
+ off += UNITS_PER_HWFPVALUE;
+ }
+ }
+ }
+ if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
+ cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
+ + fp_saved * UNITS_PER_FPREG);
+}
+
+/* Implement TARGET_BUILTIN_VA_LIST. */
+
+static tree
+mips_build_builtin_va_list (void)
+{
+ if (EABI_FLOAT_VARARGS_P)
+ {
+ /* We keep 3 pointers, and two offsets.
+
+ Two pointers are to the overflow area, which starts at the CFA.
+ One of these is constant, for addressing into the GPR save area
+ below it. The other is advanced up the stack through the
+ overflow region.
+
+ The third pointer is to the bottom of the GPR save area.
+ Since the FPR save area is just below it, we can address
+ FPR slots off this pointer.
+
+ We also keep two one-byte offsets, which are to be subtracted
+ from the constant pointers to yield addresses in the GPR and
+ FPR save areas. These are downcounted as float or non-float
+ arguments are used, and when they get to zero, the argument
+ must be obtained from the overflow region. */
+ tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
+ tree array, index;
+
+ record = lang_hooks.types.make_type (RECORD_TYPE);
+
+ f_ovfl = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__overflow_argptr"),
+ ptr_type_node);
+ f_gtop = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__gpr_top"),
+ ptr_type_node);
+ f_ftop = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__fpr_top"),
+ ptr_type_node);
+ f_goff = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__gpr_offset"),
+ unsigned_char_type_node);
+ f_foff = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__fpr_offset"),
+ unsigned_char_type_node);
+ /* Explicitly pad to the size of a pointer, so that -Wpadded won't
+ warn on every user file. */
+ index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
+ array = build_array_type (unsigned_char_type_node,
+ build_index_type (index));
+ f_res = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__reserved"), array);
+
+ DECL_FIELD_CONTEXT (f_ovfl) = record;
+ DECL_FIELD_CONTEXT (f_gtop) = record;
+ DECL_FIELD_CONTEXT (f_ftop) = record;
+ DECL_FIELD_CONTEXT (f_goff) = record;
+ DECL_FIELD_CONTEXT (f_foff) = record;
+ DECL_FIELD_CONTEXT (f_res) = record;
+
+ TYPE_FIELDS (record) = f_ovfl;
+ DECL_CHAIN (f_ovfl) = f_gtop;
+ DECL_CHAIN (f_gtop) = f_ftop;
+ DECL_CHAIN (f_ftop) = f_goff;
+ DECL_CHAIN (f_goff) = f_foff;
+ DECL_CHAIN (f_foff) = f_res;
+
+ layout_type (record);
+ return record;
+ }
+ else
+ /* Otherwise, we use 'void *'. */
+ return ptr_type_node;
+}
+
+/* Implement TARGET_EXPAND_BUILTIN_VA_START. */
+
+static void
+mips_va_start (tree valist, rtx nextarg)
+{
+ if (EABI_FLOAT_VARARGS_P)
+ {
+ const CUMULATIVE_ARGS *cum;
+ tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
+ tree ovfl, gtop, ftop, goff, foff;
+ tree t;
+ int gpr_save_area_size;
+ int fpr_save_area_size;
+ int fpr_offset;
+
+ cum = &crtl->args.info;
+ gpr_save_area_size
+ = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
+ fpr_save_area_size
+ = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
+
+ f_ovfl = TYPE_FIELDS (va_list_type_node);
+ f_gtop = DECL_CHAIN (f_ovfl);
+ f_ftop = DECL_CHAIN (f_gtop);
+ f_goff = DECL_CHAIN (f_ftop);
+ f_foff = DECL_CHAIN (f_goff);
+
+ ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
+ NULL_TREE);
+ gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
+ NULL_TREE);
+ ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
+ NULL_TREE);
+ goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
+ NULL_TREE);
+ foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
+ NULL_TREE);
+
+ /* Emit code to initialize OVFL, which points to the next varargs
+ stack argument. CUM->STACK_WORDS gives the number of stack
+ words used by named arguments. */
+ t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
+ if (cum->stack_words > 0)
+ t = fold_build_pointer_plus_hwi (t, cum->stack_words * UNITS_PER_WORD);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Emit code to initialize GTOP, the top of the GPR save area. */
+ t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Emit code to initialize FTOP, the top of the FPR save area.
+ This address is gpr_save_area_bytes below GTOP, rounded
+ down to the next fp-aligned boundary. */
+ t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
+ fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
+ fpr_offset &= -UNITS_PER_FPVALUE;
+ if (fpr_offset)
+ t = fold_build_pointer_plus_hwi (t, -fpr_offset);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Emit code to initialize GOFF, the offset from GTOP of the
+ next GPR argument. */
+ t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
+ build_int_cst (TREE_TYPE (goff), gpr_save_area_size));
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Likewise emit code to initialize FOFF, the offset from FTOP
+ of the next FPR argument. */
+ t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
+ build_int_cst (TREE_TYPE (foff), fpr_save_area_size));
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
+ else
+ {
+ nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
+ std_expand_builtin_va_start (valist, nextarg);
+ }
+}
+
+/* Like std_gimplify_va_arg_expr, but apply alignment to zero-sized
+ types as well. */
+
+static tree
+mips_std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p)
+{
+ tree addr, t, type_size, rounded_size, valist_tmp;
+ unsigned HOST_WIDE_INT align, boundary;
+ bool indirect;
+
+ indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
+ if (indirect)
+ type = build_pointer_type (type);
+
+ align = PARM_BOUNDARY / BITS_PER_UNIT;
+ boundary = targetm.calls.function_arg_boundary (TYPE_MODE (type), type);
+
+ /* When we align parameter on stack for caller, if the parameter
+ alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
+ aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
+ here with caller. */
+ if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
+ boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
+
+ boundary /= BITS_PER_UNIT;
+
+ /* Hoist the valist value into a temporary for the moment. */
+ valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
+
+ /* va_list pointer is aligned to PARM_BOUNDARY. If argument actually
+ requires greater alignment, we must perform dynamic alignment. */
+ if (boundary > align)
+ {
+ t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
+ fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
+ gimplify_and_add (t, pre_p);
+
+ t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
+ fold_build2 (BIT_AND_EXPR, TREE_TYPE (valist),
+ valist_tmp,
+ build_int_cst (TREE_TYPE (valist), -boundary)));
+ gimplify_and_add (t, pre_p);
+ }
+ else
+ boundary = align;
+
+ /* If the actual alignment is less than the alignment of the type,
+ adjust the type accordingly so that we don't assume strict alignment
+ when dereferencing the pointer. */
+ boundary *= BITS_PER_UNIT;
+ if (boundary < TYPE_ALIGN (type))
+ {
+ type = build_variant_type_copy (type);
+ TYPE_ALIGN (type) = boundary;
+ }
+
+ /* Compute the rounded size of the type. */
+ type_size = size_in_bytes (type);
+ rounded_size = round_up (type_size, align);
+
+ /* Reduce rounded_size so it's sharable with the postqueue. */
+ gimplify_expr (&rounded_size, pre_p, post_p, is_gimple_val, fb_rvalue);
+
+ /* Get AP. */
+ addr = valist_tmp;
+ if (PAD_VARARGS_DOWN && !integer_zerop (rounded_size))
+ {
+ /* Small args are padded downward. */
+ t = fold_build2_loc (input_location, GT_EXPR, sizetype,
+ rounded_size, size_int (align));
+ t = fold_build3 (COND_EXPR, sizetype, t, size_zero_node,
+ size_binop (MINUS_EXPR, rounded_size, type_size));
+ addr = fold_build_pointer_plus (addr, t);
+ }
+
+ /* Compute new value for AP. */
+ t = fold_build_pointer_plus (valist_tmp, rounded_size);
+ t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
+ gimplify_and_add (t, pre_p);
+
+ addr = fold_convert (build_pointer_type (type), addr);
+
+ if (indirect)
+ addr = build_va_arg_indirect_ref (addr);
+
+ return build_va_arg_indirect_ref (addr);
+}
+
+/* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
+
+static tree
+mips_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p)
+{
+ tree addr;
+ bool indirect_p;
+
+ indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
+ if (indirect_p)
+ type = build_pointer_type (type);
+
+ if (!EABI_FLOAT_VARARGS_P)
+ addr = mips_std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
+ else
+ {
+ tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
+ tree ovfl, top, off, align;
+ HOST_WIDE_INT size, rsize, osize;
+ tree t, u;
+
+ f_ovfl = TYPE_FIELDS (va_list_type_node);
+ f_gtop = DECL_CHAIN (f_ovfl);
+ f_ftop = DECL_CHAIN (f_gtop);
+ f_goff = DECL_CHAIN (f_ftop);
+ f_foff = DECL_CHAIN (f_goff);
+
+ /* Let:
+
+ TOP be the top of the GPR or FPR save area;
+ OFF be the offset from TOP of the next register;
+ ADDR_RTX be the address of the argument;
+ SIZE be the number of bytes in the argument type;
+ RSIZE be the number of bytes used to store the argument
+ when it's in the register save area; and
+ OSIZE be the number of bytes used to store it when it's
+ in the stack overflow area.
+
+ The code we want is:
+
+ 1: off &= -rsize; // round down
+ 2: if (off != 0)
+ 3: {
+ 4: addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
+ 5: off -= rsize;
+ 6: }
+ 7: else
+ 8: {
+ 9: ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
+ 10: addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
+ 11: ovfl += osize;
+ 14: }
+
+ [1] and [9] can sometimes be optimized away. */
+
+ ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
+ NULL_TREE);
+ size = int_size_in_bytes (type);
+
+ if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
+ && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
+ {
+ top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop),
+ unshare_expr (valist), f_ftop, NULL_TREE);
+ off = build3 (COMPONENT_REF, TREE_TYPE (f_foff),
+ unshare_expr (valist), f_foff, NULL_TREE);
+
+ /* When va_start saves FPR arguments to the stack, each slot
+ takes up UNITS_PER_HWFPVALUE bytes, regardless of the
+ argument's precision. */
+ rsize = UNITS_PER_HWFPVALUE;
+
+ /* Overflow arguments are padded to UNITS_PER_WORD bytes
+ (= PARM_BOUNDARY bits). This can be different from RSIZE
+ in two cases:
+
+ (1) On 32-bit targets when TYPE is a structure such as:
+
+ struct s { float f; };
+
+ Such structures are passed in paired FPRs, so RSIZE
+ will be 8 bytes. However, the structure only takes
+ up 4 bytes of memory, so OSIZE will only be 4.
+
+ (2) In combinations such as -mgp64 -msingle-float
+ -fshort-double. Doubles passed in registers will then take
+ up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
+ stack take up UNITS_PER_WORD bytes. */
+ osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
+ }
+ else
+ {
+ top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop),
+ unshare_expr (valist), f_gtop, NULL_TREE);
+ off = build3 (COMPONENT_REF, TREE_TYPE (f_goff),
+ unshare_expr (valist), f_goff, NULL_TREE);
+ rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
+ if (rsize > UNITS_PER_WORD)
+ {
+ /* [1] Emit code for: off &= -rsize. */
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (off), unshare_expr (off),
+ build_int_cst (TREE_TYPE (off), -rsize));
+ gimplify_assign (unshare_expr (off), t, pre_p);
+ }
+ osize = rsize;
+ }
+
+ /* [2] Emit code to branch if off == 0. */
+ t = build2 (NE_EXPR, boolean_type_node, unshare_expr (off),
+ build_int_cst (TREE_TYPE (off), 0));
+ addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
+
+ /* [5] Emit code for: off -= rsize. We do this as a form of
+ post-decrement not available to C. */
+ t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
+ t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
+
+ /* [4] Emit code for:
+ addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0). */
+ t = fold_convert (sizetype, t);
+ t = fold_build1 (NEGATE_EXPR, sizetype, t);
+ t = fold_build_pointer_plus (top, t);
+ if (BYTES_BIG_ENDIAN && rsize > size)
+ t = fold_build_pointer_plus_hwi (t, rsize - size);
+ COND_EXPR_THEN (addr) = t;
+
+ if (osize > UNITS_PER_WORD)
+ {
+ /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize. */
+ t = fold_build_pointer_plus_hwi (unshare_expr (ovfl), osize - 1);
+ u = build_int_cst (TREE_TYPE (t), -osize);
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
+ align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl),
+ unshare_expr (ovfl), t);
+ }
+ else
+ align = NULL;
+
+ /* [10, 11] Emit code for:
+ addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
+ ovfl += osize. */
+ u = fold_convert (TREE_TYPE (ovfl), build_int_cst (NULL_TREE, osize));
+ t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
+ if (BYTES_BIG_ENDIAN && osize > size)
+ t = fold_build_pointer_plus_hwi (t, osize - size);
+
+ /* String [9] and [10, 11] together. */
+ if (align)
+ t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
+ COND_EXPR_ELSE (addr) = t;
+
+ addr = fold_convert (build_pointer_type (type), addr);
+ addr = build_va_arg_indirect_ref (addr);
+ }
+
+ if (indirect_p)
+ addr = build_va_arg_indirect_ref (addr);
+
+ return addr;
+}
+
+/* Declare a unique, locally-binding function called NAME, then start
+ its definition. */
+
+static void
+mips_start_unique_function (const char *name)
+{
+ tree decl;
+
+ decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
+ get_identifier (name),
+ build_function_type_list (void_type_node, NULL_TREE));
+ DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
+ NULL_TREE, void_type_node);
+ TREE_PUBLIC (decl) = 1;
+ TREE_STATIC (decl) = 1;
+
+ DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
+
+ targetm.asm_out.unique_section (decl, 0);
+ switch_to_section (get_named_section (decl, NULL, 0));
+
+ targetm.asm_out.globalize_label (asm_out_file, name);
+ fputs ("\t.hidden\t", asm_out_file);
+ assemble_name (asm_out_file, name);
+ putc ('\n', asm_out_file);
+}
+
+/* Start a definition of function NAME. MIPS16_P indicates whether the
+ function contains MIPS16 code. */
+
+static void
+mips_start_function_definition (const char *name, bool mips16_p)
+{
+ if (mips16_p)
+ fprintf (asm_out_file, "\t.set\tmips16\n");
+ else
+ fprintf (asm_out_file, "\t.set\tnomips16\n");
+
+ if (TARGET_MICROMIPS)
+ fprintf (asm_out_file, "\t.set\tmicromips\n");
+#ifdef HAVE_GAS_MICROMIPS
+ else
+ fprintf (asm_out_file, "\t.set\tnomicromips\n");
+#endif
+
+ if (!flag_inhibit_size_directive)
+ {
+ fputs ("\t.ent\t", asm_out_file);
+ assemble_name (asm_out_file, name);
+ fputs ("\n", asm_out_file);
+ }
+
+ ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, name, "function");
+
+ /* Start the definition proper. */
+ assemble_name (asm_out_file, name);
+ fputs (":\n", asm_out_file);
+}
+
+/* End a function definition started by mips_start_function_definition. */
+
+static void
+mips_end_function_definition (const char *name)
+{
+ if (!flag_inhibit_size_directive)
+ {
+ fputs ("\t.end\t", asm_out_file);
+ assemble_name (asm_out_file, name);
+ fputs ("\n", asm_out_file);
+ }
+}
+
+/* If *STUB_PTR points to a stub, output a comdat-style definition for it,
+ then free *STUB_PTR. */
+
+static void
+mips_finish_stub (mips_one_only_stub **stub_ptr)
+{
+ mips_one_only_stub *stub = *stub_ptr;
+ if (!stub)
+ return;
+
+ const char *name = stub->get_name ();
+ mips_start_unique_function (name);
+ mips_start_function_definition (name, false);
+ stub->output_body ();
+ mips_end_function_definition (name);
+ delete stub;
+ *stub_ptr = 0;
+}
+
+/* Return true if calls to X can use R_MIPS_CALL* relocations. */
+
+static bool
+mips_ok_for_lazy_binding_p (rtx x)
+{
+ return (TARGET_USE_GOT
+ && GET_CODE (x) == SYMBOL_REF
+ && !SYMBOL_REF_BIND_NOW_P (x)
+ && !mips_symbol_binds_local_p (x));
+}
+
+/* Load function address ADDR into register DEST. TYPE is as for
+ mips_expand_call. Return true if we used an explicit lazy-binding
+ sequence. */
+
+static bool
+mips_load_call_address (enum mips_call_type type, rtx dest, rtx addr)
+{
+ /* If we're generating PIC, and this call is to a global function,
+ try to allow its address to be resolved lazily. This isn't
+ possible for sibcalls when $gp is call-saved because the value
+ of $gp on entry to the stub would be our caller's gp, not ours. */
+ if (TARGET_EXPLICIT_RELOCS
+ && !(type == MIPS_CALL_SIBCALL && TARGET_CALL_SAVED_GP)
+ && mips_ok_for_lazy_binding_p (addr))
+ {
+ addr = mips_got_load (dest, addr, SYMBOL_GOTOFF_CALL);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, addr));
+ return true;
+ }
+ else
+ {
+ mips_emit_move (dest, addr);
+ return false;
+ }
+}
+
+/* Each locally-defined hard-float MIPS16 function has a local symbol
+ associated with it. This hash table maps the function symbol (FUNC)
+ to the local symbol (LOCAL). */
+struct GTY(()) mips16_local_alias {
+ rtx func;
+ rtx local;
+};
+static GTY ((param_is (struct mips16_local_alias))) htab_t mips16_local_aliases;
+
+/* Hash table callbacks for mips16_local_aliases. */
+
+static hashval_t
+mips16_local_aliases_hash (const void *entry)
+{
+ const struct mips16_local_alias *alias;
+
+ alias = (const struct mips16_local_alias *) entry;
+ return htab_hash_string (XSTR (alias->func, 0));
+}
+
+static int
+mips16_local_aliases_eq (const void *entry1, const void *entry2)
+{
+ const struct mips16_local_alias *alias1, *alias2;
+
+ alias1 = (const struct mips16_local_alias *) entry1;
+ alias2 = (const struct mips16_local_alias *) entry2;
+ return rtx_equal_p (alias1->func, alias2->func);
+}
+
+/* FUNC is the symbol for a locally-defined hard-float MIPS16 function.
+ Return a local alias for it, creating a new one if necessary. */
+
+static rtx
+mips16_local_alias (rtx func)
+{
+ struct mips16_local_alias *alias, tmp_alias;
+ void **slot;
+
+ /* Create the hash table if this is the first call. */
+ if (mips16_local_aliases == NULL)
+ mips16_local_aliases = htab_create_ggc (37, mips16_local_aliases_hash,
+ mips16_local_aliases_eq, NULL);
+
+ /* Look up the function symbol, creating a new entry if need be. */
+ tmp_alias.func = func;
+ slot = htab_find_slot (mips16_local_aliases, &tmp_alias, INSERT);
+ gcc_assert (slot != NULL);
+
+ alias = (struct mips16_local_alias *) *slot;
+ if (alias == NULL)
+ {
+ const char *func_name, *local_name;
+ rtx local;
+
+ /* Create a new SYMBOL_REF for the local symbol. The choice of
+ __fn_local_* is based on the __fn_stub_* names that we've
+ traditionally used for the non-MIPS16 stub. */
+ func_name = targetm.strip_name_encoding (XSTR (func, 0));
+ local_name = ACONCAT (("__fn_local_", func_name, NULL));
+ local = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (local_name));
+ SYMBOL_REF_FLAGS (local) = SYMBOL_REF_FLAGS (func) | SYMBOL_FLAG_LOCAL;
+
+ /* Create a new structure to represent the mapping. */
+ alias = ggc_alloc_mips16_local_alias ();
+ alias->func = func;
+ alias->local = local;
+ *slot = alias;
+ }
+ return alias->local;
+}
+
+/* A chained list of functions for which mips16_build_call_stub has already
+ generated a stub. NAME is the name of the function and FP_RET_P is true
+ if the function returns a value in floating-point registers. */
+struct mips16_stub {
+ struct mips16_stub *next;
+ char *name;
+ bool fp_ret_p;
+};
+static struct mips16_stub *mips16_stubs;
+
+/* Return the two-character string that identifies floating-point
+ return mode MODE in the name of a MIPS16 function stub. */
+
+static const char *
+mips16_call_stub_mode_suffix (enum machine_mode mode)
+{
+ if (mode == SFmode)
+ return "sf";
+ else if (mode == DFmode)
+ return "df";
+ else if (mode == SCmode)
+ return "sc";
+ else if (mode == DCmode)
+ return "dc";
+ else if (mode == V2SFmode)
+ return "df";
+ else
+ gcc_unreachable ();
+}
+
+/* Write instructions to move a 32-bit value between general register
+ GPREG and floating-point register FPREG. DIRECTION is 't' to move
+ from GPREG to FPREG and 'f' to move in the opposite direction. */
+
+static void
+mips_output_32bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
+{
+ fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
+ reg_names[gpreg], reg_names[fpreg]);
+}
+
+/* Likewise for 64-bit values. */
+
+static void
+mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
+{
+ if (TARGET_64BIT)
+ fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
+ reg_names[gpreg], reg_names[fpreg]);
+ else if (TARGET_FLOAT64)
+ {
+ fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
+ reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
+ fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
+ reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
+ }
+ else
+ {
+ /* Move the least-significant word. */
+ fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
+ reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
+ /* ...then the most significant word. */
+ fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
+ reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg + 1]);
+ }
+}
+
+/* Write out code to move floating-point arguments into or out of
+ general registers. FP_CODE is the code describing which arguments
+ are present (see the comment above the definition of CUMULATIVE_ARGS
+ in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
+
+static void
+mips_output_args_xfer (int fp_code, char direction)
+{
+ unsigned int gparg, fparg, f;
+ CUMULATIVE_ARGS cum;
+
+ /* This code only works for o32 and o64. */
+ gcc_assert (TARGET_OLDABI);
+
+ mips_init_cumulative_args (&cum, NULL);
+
+ for (f = (unsigned int) fp_code; f != 0; f >>= 2)
+ {
+ enum machine_mode mode;
+ struct mips_arg_info info;
+
+ if ((f & 3) == 1)
+ mode = SFmode;
+ else if ((f & 3) == 2)
+ mode = DFmode;
+ else
+ gcc_unreachable ();
+
+ mips_get_arg_info (&info, &cum, mode, NULL, true);
+ gparg = mips_arg_regno (&info, false);
+ fparg = mips_arg_regno (&info, true);
+
+ if (mode == SFmode)
+ mips_output_32bit_xfer (direction, gparg, fparg);
+ else
+ mips_output_64bit_xfer (direction, gparg, fparg);
+
+ mips_function_arg_advance (pack_cumulative_args (&cum), mode, NULL, true);
+ }
+}
+
+/* Write a MIPS16 stub for the current function. This stub is used
+ for functions which take arguments in the floating-point registers.
+ It is normal-mode code that moves the floating-point arguments
+ into the general registers and then jumps to the MIPS16 code. */
+
+static void
+mips16_build_function_stub (void)
+{
+ const char *fnname, *alias_name, *separator;
+ char *secname, *stubname;
+ tree stubdecl;
+ unsigned int f;
+ rtx symbol, alias;
+
+ /* Create the name of the stub, and its unique section. */
+ symbol = XEXP (DECL_RTL (current_function_decl), 0);
+ alias = mips16_local_alias (symbol);
+
+ fnname = targetm.strip_name_encoding (XSTR (symbol, 0));
+ alias_name = targetm.strip_name_encoding (XSTR (alias, 0));
+ secname = ACONCAT ((".mips16.fn.", fnname, NULL));
+ stubname = ACONCAT (("__fn_stub_", fnname, NULL));
+
+ /* Build a decl for the stub. */
+ stubdecl = build_decl (BUILTINS_LOCATION,
+ FUNCTION_DECL, get_identifier (stubname),
+ build_function_type_list (void_type_node, NULL_TREE));
+ DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
+ DECL_RESULT (stubdecl) = build_decl (BUILTINS_LOCATION,
+ RESULT_DECL, NULL_TREE, void_type_node);
+
+ /* Output a comment. */
+ fprintf (asm_out_file, "\t# Stub function for %s (",
+ current_function_name ());
+ separator = "";
+ for (f = (unsigned int) crtl->args.info.fp_code; f != 0; f >>= 2)
+ {
+ fprintf (asm_out_file, "%s%s", separator,
+ (f & 3) == 1 ? "float" : "double");
+ separator = ", ";
+ }
+ fprintf (asm_out_file, ")\n");
+
+ /* Start the function definition. */
+ assemble_start_function (stubdecl, stubname);
+ mips_start_function_definition (stubname, false);
+
+ /* If generating pic2 code, either set up the global pointer or
+ switch to pic0. */
+ if (TARGET_ABICALLS_PIC2)
+ {
+ if (TARGET_ABSOLUTE_ABICALLS)
+ fprintf (asm_out_file, "\t.option\tpic0\n");
+ else
+ {
+ output_asm_insn ("%(.cpload\t%^%)", NULL);
+ /* Emit an R_MIPS_NONE relocation to tell the linker what the
+ target function is. Use a local GOT access when loading the
+ symbol, to cut down on the number of unnecessary GOT entries
+ for stubs that aren't needed. */
+ output_asm_insn (".reloc\t0,R_MIPS_NONE,%0", &symbol);
+ symbol = alias;
+ }
+ }
+
+ /* Load the address of the MIPS16 function into $25. Do this first so
+ that targets with coprocessor interlocks can use an MFC1 to fill the
+ delay slot. */
+ output_asm_insn ("la\t%^,%0", &symbol);
+
+ /* Move the arguments from floating-point registers to general registers. */
+ mips_output_args_xfer (crtl->args.info.fp_code, 'f');
+
+ /* Jump to the MIPS16 function. */
+ output_asm_insn ("jr\t%^", NULL);
+
+ if (TARGET_ABICALLS_PIC2 && TARGET_ABSOLUTE_ABICALLS)
+ fprintf (asm_out_file, "\t.option\tpic2\n");
+
+ mips_end_function_definition (stubname);
+
+ /* If the linker needs to create a dynamic symbol for the target
+ function, it will associate the symbol with the stub (which,
+ unlike the target function, follows the proper calling conventions).
+ It is therefore useful to have a local alias for the target function,
+ so that it can still be identified as MIPS16 code. As an optimization,
+ this symbol can also be used for indirect MIPS16 references from
+ within this file. */
+ ASM_OUTPUT_DEF (asm_out_file, alias_name, fnname);
+
+ switch_to_section (function_section (current_function_decl));
+}
+
+/* The current function is a MIPS16 function that returns a value in an FPR.
+ Copy the return value from its soft-float to its hard-float location.
+ libgcc2 has special non-MIPS16 helper functions for each case. */
+
+static void
+mips16_copy_fpr_return_value (void)
+{
+ rtx fn, insn, retval;
+ tree return_type;
+ enum machine_mode return_mode;
+ const char *name;
+
+ return_type = DECL_RESULT (current_function_decl);
+ return_mode = DECL_MODE (return_type);
+
+ name = ACONCAT (("__mips16_ret_",
+ mips16_call_stub_mode_suffix (return_mode),
+ NULL));
+ fn = mips16_stub_function (name);
+
+ /* The function takes arguments in $2 (and possibly $3), so calls
+ to it cannot be lazily bound. */
+ SYMBOL_REF_FLAGS (fn) |= SYMBOL_FLAG_BIND_NOW;
+
+ /* Model the call as something that takes the GPR return value as
+ argument and returns an "updated" value. */
+ retval = gen_rtx_REG (return_mode, GP_RETURN);
+ insn = mips_expand_call (MIPS_CALL_EPILOGUE, retval, fn,
+ const0_rtx, NULL_RTX, false);
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
+}
+
+/* Consider building a stub for a MIPS16 call to function *FN_PTR.
+ RETVAL is the location of the return value, or null if this is
+ a "call" rather than a "call_value". ARGS_SIZE is the size of the
+ arguments and FP_CODE is the code built by mips_function_arg;
+ see the comment before the fp_code field in CUMULATIVE_ARGS for details.
+
+ There are three alternatives:
+
+ - If a stub was needed, emit the call and return the call insn itself.
+
+ - If we can avoid using a stub by redirecting the call, set *FN_PTR
+ to the new target and return null.
+
+ - If *FN_PTR doesn't need a stub, return null and leave *FN_PTR
+ unmodified.
+
+ A stub is needed for calls to functions that, in normal mode,
+ receive arguments in FPRs or return values in FPRs. The stub
+ copies the arguments from their soft-float positions to their
+ hard-float positions, calls the real function, then copies the
+ return value from its hard-float position to its soft-float
+ position.
+
+ We can emit a JAL to *FN_PTR even when *FN_PTR might need a stub.
+ If *FN_PTR turns out to be to a non-MIPS16 function, the linker
+ automatically redirects the JAL to the stub, otherwise the JAL
+ continues to call FN directly. */
+
+static rtx
+mips16_build_call_stub (rtx retval, rtx *fn_ptr, rtx args_size, int fp_code)
+{
+ const char *fnname;
+ bool fp_ret_p;
+ struct mips16_stub *l;
+ rtx insn, fn;
+
+ /* We don't need to do anything if we aren't in MIPS16 mode, or if
+ we were invoked with the -msoft-float option. */
+ if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
+ return NULL_RTX;
+
+ /* Figure out whether the value might come back in a floating-point
+ register. */
+ fp_ret_p = retval && mips_return_mode_in_fpr_p (GET_MODE (retval));
+
+ /* We don't need to do anything if there were no floating-point
+ arguments and the value will not be returned in a floating-point
+ register. */
+ if (fp_code == 0 && !fp_ret_p)
+ return NULL_RTX;
+
+ /* We don't need to do anything if this is a call to a special
+ MIPS16 support function. */
+ fn = *fn_ptr;
+ if (mips16_stub_function_p (fn))
+ return NULL_RTX;
+
+ /* If we're calling a locally-defined MIPS16 function, we know that
+ it will return values in both the "soft-float" and "hard-float"
+ registers. There is no need to use a stub to move the latter
+ to the former. */
+ if (fp_code == 0 && mips16_local_function_p (fn))
+ return NULL_RTX;
+
+ /* This code will only work for o32 and o64 abis. The other ABI's
+ require more sophisticated support. */
+ gcc_assert (TARGET_OLDABI);
+
+ /* If we're calling via a function pointer, use one of the magic
+ libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
+ Each stub expects the function address to arrive in register $2. */
+ if (GET_CODE (fn) != SYMBOL_REF
+ || !call_insn_operand (fn, VOIDmode))
+ {
+ char buf[30];
+ rtx stub_fn, insn, addr;
+ bool lazy_p;
+
+ /* If this is a locally-defined and locally-binding function,
+ avoid the stub by calling the local alias directly. */
+ if (mips16_local_function_p (fn))
+ {
+ *fn_ptr = mips16_local_alias (fn);
+ return NULL_RTX;
+ }
+
+ /* Create a SYMBOL_REF for the libgcc.a function. */
+ if (fp_ret_p)
+ sprintf (buf, "__mips16_call_stub_%s_%d",
+ mips16_call_stub_mode_suffix (GET_MODE (retval)),
+ fp_code);
+ else
+ sprintf (buf, "__mips16_call_stub_%d", fp_code);
+ stub_fn = mips16_stub_function (buf);
+
+ /* The function uses $2 as an argument, so calls to it
+ cannot be lazily bound. */
+ SYMBOL_REF_FLAGS (stub_fn) |= SYMBOL_FLAG_BIND_NOW;
+
+ /* Load the target function into $2. */
+ addr = gen_rtx_REG (Pmode, GP_REG_FIRST + 2);
+ lazy_p = mips_load_call_address (MIPS_CALL_NORMAL, addr, fn);
+
+ /* Emit the call. */
+ insn = mips_expand_call (MIPS_CALL_NORMAL, retval, stub_fn,
+ args_size, NULL_RTX, lazy_p);
+
+ /* Tell GCC that this call does indeed use the value of $2. */
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), addr);
+
+ /* If we are handling a floating-point return value, we need to
+ save $18 in the function prologue. Putting a note on the
+ call will mean that df_regs_ever_live_p ($18) will be true if the
+ call is not eliminated, and we can check that in the prologue
+ code. */
+ if (fp_ret_p)
+ CALL_INSN_FUNCTION_USAGE (insn) =
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (word_mode, 18)),
+ CALL_INSN_FUNCTION_USAGE (insn));
+
+ return insn;
+ }
+
+ /* We know the function we are going to call. If we have already
+ built a stub, we don't need to do anything further. */
+ fnname = targetm.strip_name_encoding (XSTR (fn, 0));
+ for (l = mips16_stubs; l != NULL; l = l->next)
+ if (strcmp (l->name, fnname) == 0)
+ break;
+
+ if (l == NULL)
+ {
+ const char *separator;
+ char *secname, *stubname;
+ tree stubid, stubdecl;
+ unsigned int f;
+
+ /* If the function does not return in FPRs, the special stub
+ section is named
+ .mips16.call.FNNAME
+
+ If the function does return in FPRs, the stub section is named
+ .mips16.call.fp.FNNAME
+
+ Build a decl for the stub. */
+ secname = ACONCAT ((".mips16.call.", fp_ret_p ? "fp." : "",
+ fnname, NULL));
+ stubname = ACONCAT (("__call_stub_", fp_ret_p ? "fp_" : "",
+ fnname, NULL));
+ stubid = get_identifier (stubname);
+ stubdecl = build_decl (BUILTINS_LOCATION,
+ FUNCTION_DECL, stubid,
+ build_function_type_list (void_type_node,
+ NULL_TREE));
+ DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
+ DECL_RESULT (stubdecl) = build_decl (BUILTINS_LOCATION,
+ RESULT_DECL, NULL_TREE,
+ void_type_node);
+
+ /* Output a comment. */
+ fprintf (asm_out_file, "\t# Stub function to call %s%s (",
+ (fp_ret_p
+ ? (GET_MODE (retval) == SFmode ? "float " : "double ")
+ : ""),
+ fnname);
+ separator = "";
+ for (f = (unsigned int) fp_code; f != 0; f >>= 2)
+ {
+ fprintf (asm_out_file, "%s%s", separator,
+ (f & 3) == 1 ? "float" : "double");
+ separator = ", ";
+ }
+ fprintf (asm_out_file, ")\n");
+
+ /* Start the function definition. */
+ assemble_start_function (stubdecl, stubname);
+ mips_start_function_definition (stubname, false);
+
+ if (fp_ret_p)
+ {
+ fprintf (asm_out_file, "\t.cfi_startproc\n");
+
+ /* Create a fake CFA 4 bytes below the stack pointer.
+ This works around unwinders (like libgcc's) that expect
+ the CFA for non-signal frames to be unique. */
+ fprintf (asm_out_file, "\t.cfi_def_cfa 29,-4\n");
+
+ /* "Save" $sp in itself so we don't use the fake CFA.
+ This is: DW_CFA_val_expression r29, { DW_OP_reg29 }. */
+ fprintf (asm_out_file, "\t.cfi_escape 0x16,29,1,0x6d\n");
+ }
+ else
+ {
+ /* Load the address of the MIPS16 function into $25. Do this
+ first so that targets with coprocessor interlocks can use
+ an MFC1 to fill the delay slot. */
+ if (TARGET_EXPLICIT_RELOCS)
+ {
+ output_asm_insn ("lui\t%^,%%hi(%0)", &fn);
+ output_asm_insn ("addiu\t%^,%^,%%lo(%0)", &fn);
+ }
+ else
+ output_asm_insn ("la\t%^,%0", &fn);
+ }
+
+ /* Move the arguments from general registers to floating-point
+ registers. */
+ mips_output_args_xfer (fp_code, 't');
+
+ if (fp_ret_p)
+ {
+ /* Save the return address in $18 and call the non-MIPS16 function.
+ The stub's caller knows that $18 might be clobbered, even though
+ $18 is usually a call-saved register. */
+ fprintf (asm_out_file, "\tmove\t%s,%s\n",
+ reg_names[GP_REG_FIRST + 18], reg_names[RETURN_ADDR_REGNUM]);
+ output_asm_insn (MIPS_CALL ("jal", &fn, 0, -1), &fn);
+ fprintf (asm_out_file, "\t.cfi_register 31,18\n");
+
+ /* Move the result from floating-point registers to
+ general registers. */
+ switch (GET_MODE (retval))
+ {
+ case SCmode:
+ mips_output_32bit_xfer ('f', GP_RETURN + TARGET_BIG_ENDIAN,
+ TARGET_BIG_ENDIAN
+ ? FP_REG_FIRST + MAX_FPRS_PER_FMT
+ : FP_REG_FIRST);
+ mips_output_32bit_xfer ('f', GP_RETURN + TARGET_LITTLE_ENDIAN,
+ TARGET_LITTLE_ENDIAN
+ ? FP_REG_FIRST + MAX_FPRS_PER_FMT
+ : FP_REG_FIRST);
+ if (GET_MODE (retval) == SCmode && TARGET_64BIT)
+ {
+ /* On 64-bit targets, complex floats are returned in
+ a single GPR, such that "sd" on a suitably-aligned
+ target would store the value correctly. */
+ fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
+ reg_names[GP_RETURN + TARGET_BIG_ENDIAN],
+ reg_names[GP_RETURN + TARGET_BIG_ENDIAN]);
+ fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
+ reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN],
+ reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN]);
+ fprintf (asm_out_file, "\tdsrl\t%s,%s,32\n",
+ reg_names[GP_RETURN + TARGET_BIG_ENDIAN],
+ reg_names[GP_RETURN + TARGET_BIG_ENDIAN]);
+ fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
+ reg_names[GP_RETURN],
+ reg_names[GP_RETURN],
+ reg_names[GP_RETURN + 1]);
+ }
+ break;
+
+ case SFmode:
+ mips_output_32bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
+ break;
+
+ case DCmode:
+ mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
+ FP_REG_FIRST + MAX_FPRS_PER_FMT);
+ /* Fall though. */
+ case DFmode:
+ case V2SFmode:
+ mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 18]);
+ fprintf (asm_out_file, "\t.cfi_endproc\n");
+ }
+ else
+ {
+ /* Jump to the previously-loaded address. */
+ output_asm_insn ("jr\t%^", NULL);
+ }
+
+#ifdef ASM_DECLARE_FUNCTION_SIZE
+ ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
+#endif
+
+ mips_end_function_definition (stubname);
+
+ /* Record this stub. */
+ l = XNEW (struct mips16_stub);
+ l->name = xstrdup (fnname);
+ l->fp_ret_p = fp_ret_p;
+ l->next = mips16_stubs;
+ mips16_stubs = l;
+ }
+
+ /* If we expect a floating-point return value, but we've built a
+ stub which does not expect one, then we're in trouble. We can't
+ use the existing stub, because it won't handle the floating-point
+ value. We can't build a new stub, because the linker won't know
+ which stub to use for the various calls in this object file.
+ Fortunately, this case is illegal, since it means that a function
+ was declared in two different ways in a single compilation. */
+ if (fp_ret_p && !l->fp_ret_p)
+ error ("cannot handle inconsistent calls to %qs", fnname);
+
+ if (retval == NULL_RTX)
+ insn = gen_call_internal_direct (fn, args_size);
+ else
+ insn = gen_call_value_internal_direct (retval, fn, args_size);
+ insn = mips_emit_call_insn (insn, fn, fn, false);
+
+ /* If we are calling a stub which handles a floating-point return
+ value, we need to arrange to save $18 in the prologue. We do this
+ by marking the function call as using the register. The prologue
+ will later see that it is used, and emit code to save it. */
+ if (fp_ret_p)
+ CALL_INSN_FUNCTION_USAGE (insn) =
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (word_mode, 18)),
+ CALL_INSN_FUNCTION_USAGE (insn));
+
+ return insn;
+}
+
+/* Expand a call of type TYPE. RESULT is where the result will go (null
+ for "call"s and "sibcall"s), ADDR is the address of the function,
+ ARGS_SIZE is the size of the arguments and AUX is the value passed
+ to us by mips_function_arg. LAZY_P is true if this call already
+ involves a lazily-bound function address (such as when calling
+ functions through a MIPS16 hard-float stub).
+
+ Return the call itself. */
+
+rtx
+mips_expand_call (enum mips_call_type type, rtx result, rtx addr,
+ rtx args_size, rtx aux, bool lazy_p)
+{
+ rtx orig_addr, pattern, insn;
+ int fp_code;
+
+ fp_code = aux == 0 ? 0 : (int) GET_MODE (aux);
+ insn = mips16_build_call_stub (result, &addr, args_size, fp_code);
+ if (insn)
+ {
+ gcc_assert (!lazy_p && type == MIPS_CALL_NORMAL);
+ return insn;
+ }
+
+ orig_addr = addr;
+ if (!call_insn_operand (addr, VOIDmode))
+ {
+ if (type == MIPS_CALL_EPILOGUE)
+ addr = MIPS_EPILOGUE_TEMP (Pmode);
+ else
+ addr = gen_reg_rtx (Pmode);
+ lazy_p |= mips_load_call_address (type, addr, orig_addr);
+ }
+
+ if (result == 0)
+ {
+ rtx (*fn) (rtx, rtx);
+
+ if (type == MIPS_CALL_SIBCALL)
+ fn = gen_sibcall_internal;
+ else
+ fn = gen_call_internal;
+
+ pattern = fn (addr, args_size);
+ }
+ else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
+ {
+ /* Handle return values created by mips_return_fpr_pair. */
+ rtx (*fn) (rtx, rtx, rtx, rtx);
+ rtx reg1, reg2;
+
+ if (type == MIPS_CALL_SIBCALL)
+ fn = gen_sibcall_value_multiple_internal;
+ else
+ fn = gen_call_value_multiple_internal;
+
+ reg1 = XEXP (XVECEXP (result, 0, 0), 0);
+ reg2 = XEXP (XVECEXP (result, 0, 1), 0);
+ pattern = fn (reg1, addr, args_size, reg2);
+ }
+ else
+ {
+ rtx (*fn) (rtx, rtx, rtx);
+
+ if (type == MIPS_CALL_SIBCALL)
+ fn = gen_sibcall_value_internal;
+ else
+ fn = gen_call_value_internal;
+
+ /* Handle return values created by mips_return_fpr_single. */
+ if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
+ result = XEXP (XVECEXP (result, 0, 0), 0);
+ pattern = fn (result, addr, args_size);
+ }
+
+ return mips_emit_call_insn (pattern, orig_addr, addr, lazy_p);
+}
+
+/* Split call instruction INSN into a $gp-clobbering call and
+ (where necessary) an instruction to restore $gp from its save slot.
+ CALL_PATTERN is the pattern of the new call. */
+
+void
+mips_split_call (rtx insn, rtx call_pattern)
+{
+ emit_call_insn (call_pattern);
+ if (!find_reg_note (insn, REG_NORETURN, 0))
+ /* Pick a temporary register that is suitable for both MIPS16 and
+ non-MIPS16 code. $4 and $5 are used for returning complex double
+ values in soft-float code, so $6 is the first suitable candidate. */
+ mips_restore_gp_from_cprestore_slot (gen_rtx_REG (Pmode, GP_ARG_FIRST + 2));
+}
+
+/* Return true if a call to DECL may need to use JALX. */
+
+static bool
+mips_call_may_need_jalx_p (tree decl)
+{
+ /* If the current translation unit would use a different mode for DECL,
+ assume that the call needs JALX. */
+ if (mips_get_compress_mode (decl) != TARGET_COMPRESSION)
+ return true;
+
+ /* mips_get_compress_mode is always accurate for locally-binding
+ functions in the current translation unit. */
+ if (!DECL_EXTERNAL (decl) && targetm.binds_local_p (decl))
+ return false;
+
+ /* When -minterlink-compressed is in effect, assume that functions
+ could use a different encoding mode unless an attribute explicitly
+ tells us otherwise. */
+ if (TARGET_INTERLINK_COMPRESSED)
+ {
+ if (!TARGET_COMPRESSION
+ && mips_get_compress_off_flags (DECL_ATTRIBUTES (decl)) ==0)
+ return true;
+ if (TARGET_COMPRESSION
+ && mips_get_compress_on_flags (DECL_ATTRIBUTES (decl)) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
+
+static bool
+mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
+{
+ if (!TARGET_SIBCALLS)
+ return false;
+
+ /* Interrupt handlers need special epilogue code and therefore can't
+ use sibcalls. */
+ if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
+ return false;
+
+ /* Direct Js are only possible to functions that use the same ISA encoding.
+ There is no JX counterpoart of JALX. */
+ if (decl
+ && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode)
+ && mips_call_may_need_jalx_p (decl))
+ return false;
+
+ /* Sibling calls should not prevent lazy binding. Lazy-binding stubs
+ require $gp to be valid on entry, so sibcalls can only use stubs
+ if $gp is call-clobbered. */
+ if (decl
+ && TARGET_CALL_SAVED_GP
+ && !TARGET_ABICALLS_PIC0
+ && !targetm.binds_local_p (decl))
+ return false;
+
+ /* Otherwise OK. */
+ return true;
+}
+
+/* Emit code to move general operand SRC into condition-code
+ register DEST given that SCRATCH is a scratch TFmode FPR.
+ The sequence is:
+
+ FP1 = SRC
+ FP2 = 0.0f
+ DEST = FP2 < FP1
+
+ where FP1 and FP2 are single-precision FPRs taken from SCRATCH. */
+
+void
+mips_expand_fcc_reload (rtx dest, rtx src, rtx scratch)
+{
+ rtx fp1, fp2;
+
+ /* Change the source to SFmode. */
+ if (MEM_P (src))
+ src = adjust_address (src, SFmode, 0);
+ else if (REG_P (src) || GET_CODE (src) == SUBREG)
+ src = gen_rtx_REG (SFmode, true_regnum (src));
+
+ fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
+ fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
+
+ mips_emit_move (copy_rtx (fp1), src);
+ mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
+ emit_insn (gen_slt_sf (dest, fp2, fp1));
+}
+
+/* Implement MOVE_BY_PIECES_P. */
+
+bool
+mips_move_by_pieces_p (unsigned HOST_WIDE_INT size, unsigned int align)
+{
+ if (HAVE_movmemsi)
+ {
+ /* movmemsi is meant to generate code that is at least as good as
+ move_by_pieces. However, movmemsi effectively uses a by-pieces
+ implementation both for moves smaller than a word and for
+ word-aligned moves of no more than MIPS_MAX_MOVE_BYTES_STRAIGHT
+ bytes. We should allow the tree-level optimisers to do such
+ moves by pieces, as it often exposes other optimization
+ opportunities. We might as well continue to use movmemsi at
+ the rtl level though, as it produces better code when
+ scheduling is disabled (such as at -O). */
+ if (currently_expanding_to_rtl)
+ return false;
+ if (align < BITS_PER_WORD)
+ return size < UNITS_PER_WORD;
+ return size <= MIPS_MAX_MOVE_BYTES_STRAIGHT;
+ }
+ /* The default value. If this becomes a target hook, we should
+ call the default definition instead. */
+ return (move_by_pieces_ninsns (size, align, MOVE_MAX_PIECES + 1)
+ < (unsigned int) MOVE_RATIO (optimize_insn_for_speed_p ()));
+}
+
+/* Implement STORE_BY_PIECES_P. */
+
+bool
+mips_store_by_pieces_p (unsigned HOST_WIDE_INT size, unsigned int align)
+{
+ /* Storing by pieces involves moving constants into registers
+ of size MIN (ALIGN, BITS_PER_WORD), then storing them.
+ We need to decide whether it is cheaper to load the address of
+ constant data into a register and use a block move instead. */
+
+ /* If the data is only byte aligned, then:
+
+ (a1) A block move of less than 4 bytes would involve three 3 LBs and
+ 3 SBs. We might as well use 3 single-instruction LIs and 3 SBs
+ instead.
+
+ (a2) A block move of 4 bytes from aligned source data can use an
+ LW/SWL/SWR sequence. This is often better than the 4 LIs and
+ 4 SBs that we would generate when storing by pieces. */
+ if (align <= BITS_PER_UNIT)
+ return size < 4;
+
+ /* If the data is 2-byte aligned, then:
+
+ (b1) A block move of less than 4 bytes would use a combination of LBs,
+ LHs, SBs and SHs. We get better code by using single-instruction
+ LIs, SBs and SHs instead.
+
+ (b2) A block move of 4 bytes from aligned source data would again use
+ an LW/SWL/SWR sequence. In most cases, loading the address of
+ the source data would require at least one extra instruction.
+ It is often more efficient to use 2 single-instruction LIs and
+ 2 SHs instead.
+
+ (b3) A block move of up to 3 additional bytes would be like (b1).
+
+ (b4) A block move of 8 bytes from aligned source data can use two
+ LW/SWL/SWR sequences or a single LD/SDL/SDR sequence. Both
+ sequences are better than the 4 LIs and 4 SHs that we'd generate
+ when storing by pieces.
+
+ The reasoning for higher alignments is similar:
+
+ (c1) A block move of less than 4 bytes would be the same as (b1).
+
+ (c2) A block move of 4 bytes would use an LW/SW sequence. Again,
+ loading the address of the source data would typically require
+ at least one extra instruction. It is generally better to use
+ LUI/ORI/SW instead.
+
+ (c3) A block move of up to 3 additional bytes would be like (b1).
+
+ (c4) A block move of 8 bytes can use two LW/SW sequences or a single
+ LD/SD sequence, and in these cases we've traditionally preferred
+ the memory copy over the more bulky constant moves. */
+ return size < 8;
+}
+
+/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
+ Assume that the areas do not overlap. */
+
+static void
+mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
+{
+ HOST_WIDE_INT offset, delta;
+ unsigned HOST_WIDE_INT bits;
+ int i;
+ enum machine_mode mode;
+ rtx *regs;
+
+ /* Work out how many bits to move at a time. If both operands have
+ half-word alignment, it is usually better to move in half words.
+ For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
+ and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
+ Otherwise move word-sized chunks. */
+ if (MEM_ALIGN (src) == BITS_PER_WORD / 2
+ && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
+ bits = BITS_PER_WORD / 2;
+ else
+ bits = BITS_PER_WORD;
+
+ mode = mode_for_size (bits, MODE_INT, 0);
+ delta = bits / BITS_PER_UNIT;
+
+ /* Allocate a buffer for the temporary registers. */
+ regs = XALLOCAVEC (rtx, length / delta);
+
+ /* Load as many BITS-sized chunks as possible. Use a normal load if
+ the source has enough alignment, otherwise use left/right pairs. */
+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
+ {
+ regs[i] = gen_reg_rtx (mode);
+ if (MEM_ALIGN (src) >= bits)
+ mips_emit_move (regs[i], adjust_address (src, mode, offset));
+ else
+ {
+ rtx part = adjust_address (src, BLKmode, offset);
+ set_mem_size (part, delta);
+ if (!mips_expand_ext_as_unaligned_load (regs[i], part, bits, 0, 0))
+ gcc_unreachable ();
+ }
+ }
+
+ /* Copy the chunks to the destination. */
+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
+ if (MEM_ALIGN (dest) >= bits)
+ mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
+ else
+ {
+ rtx part = adjust_address (dest, BLKmode, offset);
+ set_mem_size (part, delta);
+ if (!mips_expand_ins_as_unaligned_store (part, regs[i], bits, 0))
+ gcc_unreachable ();
+ }
+
+ /* Mop up any left-over bytes. */
+ if (offset < length)
+ {
+ src = adjust_address (src, BLKmode, offset);
+ dest = adjust_address (dest, BLKmode, offset);
+ move_by_pieces (dest, src, length - offset,
+ MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
+ }
+}
+
+/* Helper function for doing a loop-based block operation on memory
+ reference MEM. Each iteration of the loop will operate on LENGTH
+ bytes of MEM.
+
+ Create a new base register for use within the loop and point it to
+ the start of MEM. Create a new memory reference that uses this
+ register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
+
+static void
+mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
+ rtx *loop_reg, rtx *loop_mem)
+{
+ *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
+
+ /* Although the new mem does not refer to a known location,
+ it does keep up to LENGTH bytes of alignment. */
+ *loop_mem = change_address (mem, BLKmode, *loop_reg);
+ set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
+}
+
+/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
+ bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
+ the memory regions do not overlap. */
+
+static void
+mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
+ HOST_WIDE_INT bytes_per_iter)
+{
+ rtx label, src_reg, dest_reg, final_src, test;
+ HOST_WIDE_INT leftover;
+
+ leftover = length % bytes_per_iter;
+ length -= leftover;
+
+ /* Create registers and memory references for use within the loop. */
+ mips_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
+ mips_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
+
+ /* Calculate the value that SRC_REG should have after the last iteration
+ of the loop. */
+ final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
+ 0, 0, OPTAB_WIDEN);
+
+ /* Emit the start of the loop. */
+ label = gen_label_rtx ();
+ emit_label (label);
+
+ /* Emit the loop body. */
+ mips_block_move_straight (dest, src, bytes_per_iter);
+
+ /* Move on to the next block. */
+ mips_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
+ mips_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
+
+ /* Emit the loop condition. */
+ test = gen_rtx_NE (VOIDmode, src_reg, final_src);
+ if (Pmode == DImode)
+ emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
+ else
+ emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
+
+ /* Mop up any left-over bytes. */
+ if (leftover)
+ mips_block_move_straight (dest, src, leftover);
+}
+
+/* Expand a movmemsi instruction, which copies LENGTH bytes from
+ memory reference SRC to memory reference DEST. */
+
+bool
+mips_expand_block_move (rtx dest, rtx src, rtx length)
+{
+ if (CONST_INT_P (length))
+ {
+ if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)
+ {
+ mips_block_move_straight (dest, src, INTVAL (length));
+ return true;
+ }
+ else if (optimize)
+ {
+ mips_block_move_loop (dest, src, INTVAL (length),
+ MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER);
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Expand a loop of synci insns for the address range [BEGIN, END). */
+
+void
+mips_expand_synci_loop (rtx begin, rtx end)
+{
+ rtx inc, label, end_label, cmp_result, mask, length;
+
+ /* Create end_label. */
+ end_label = gen_label_rtx ();
+
+ /* Check if begin equals end. */
+ cmp_result = gen_rtx_EQ (VOIDmode, begin, end);
+ emit_jump_insn (gen_condjump (cmp_result, end_label));
+
+ /* Load INC with the cache line size (rdhwr INC,$1). */
+ inc = gen_reg_rtx (Pmode);
+ emit_insn (PMODE_INSN (gen_rdhwr_synci_step, (inc)));
+
+ /* Check if inc is 0. */
+ cmp_result = gen_rtx_EQ (VOIDmode, inc, const0_rtx);
+ emit_jump_insn (gen_condjump (cmp_result, end_label));
+
+ /* Calculate mask. */
+ mask = mips_force_unary (Pmode, NEG, inc);
+
+ /* Mask out begin by mask. */
+ begin = mips_force_binary (Pmode, AND, begin, mask);
+
+ /* Calculate length. */
+ length = mips_force_binary (Pmode, MINUS, end, begin);
+
+ /* Loop back to here. */
+ label = gen_label_rtx ();
+ emit_label (label);
+
+ emit_insn (gen_synci (begin));
+
+ /* Update length. */
+ mips_emit_binary (MINUS, length, length, inc);
+
+ /* Update begin. */
+ mips_emit_binary (PLUS, begin, begin, inc);
+
+ /* Check if length is greater than 0. */
+ cmp_result = gen_rtx_GT (VOIDmode, length, const0_rtx);
+ emit_jump_insn (gen_condjump (cmp_result, label));
+
+ emit_label (end_label);
+}
+
+/* Expand a QI or HI mode atomic memory operation.
+
+ GENERATOR contains a pointer to the gen_* function that generates
+ the SI mode underlying atomic operation using masks that we
+ calculate.
+
+ RESULT is the return register for the operation. Its value is NULL
+ if unused.
+
+ MEM is the location of the atomic access.
+
+ OLDVAL is the first operand for the operation.
+
+ NEWVAL is the optional second operand for the operation. Its value
+ is NULL if unused. */
+
+void
+mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator,
+ rtx result, rtx mem, rtx oldval, rtx newval)
+{
+ rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask;
+ rtx unshifted_mask_reg, mask, inverted_mask, si_op;
+ rtx res = NULL;
+ enum machine_mode mode;
+
+ mode = GET_MODE (mem);
+
+ /* Compute the address of the containing SImode value. */
+ orig_addr = force_reg (Pmode, XEXP (mem, 0));
+ memsi_addr = mips_force_binary (Pmode, AND, orig_addr,
+ force_reg (Pmode, GEN_INT (-4)));
+
+ /* Create a memory reference for it. */
+ memsi = gen_rtx_MEM (SImode, memsi_addr);
+ set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
+ MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
+
+ /* Work out the byte offset of the QImode or HImode value,
+ counting from the least significant byte. */
+ shift = mips_force_binary (Pmode, AND, orig_addr, GEN_INT (3));
+ if (TARGET_BIG_ENDIAN)
+ mips_emit_binary (XOR, shift, shift, GEN_INT (mode == QImode ? 3 : 2));
+
+ /* Multiply by eight to convert the shift value from bytes to bits. */
+ mips_emit_binary (ASHIFT, shift, shift, GEN_INT (3));
+
+ /* Make the final shift an SImode value, so that it can be used in
+ SImode operations. */
+ shiftsi = force_reg (SImode, gen_lowpart (SImode, shift));
+
+ /* Set MASK to an inclusive mask of the QImode or HImode value. */
+ unshifted_mask = GEN_INT (GET_MODE_MASK (mode));
+ unshifted_mask_reg = force_reg (SImode, unshifted_mask);
+ mask = mips_force_binary (SImode, ASHIFT, unshifted_mask_reg, shiftsi);
+
+ /* Compute the equivalent exclusive mask. */
+ inverted_mask = gen_reg_rtx (SImode);
+ emit_insn (gen_rtx_SET (VOIDmode, inverted_mask,
+ gen_rtx_NOT (SImode, mask)));
+
+ /* Shift the old value into place. */
+ if (oldval != const0_rtx)
+ {
+ oldval = convert_modes (SImode, mode, oldval, true);
+ oldval = force_reg (SImode, oldval);
+ oldval = mips_force_binary (SImode, ASHIFT, oldval, shiftsi);
+ }
+
+ /* Do the same for the new value. */
+ if (newval && newval != const0_rtx)
+ {
+ newval = convert_modes (SImode, mode, newval, true);
+ newval = force_reg (SImode, newval);
+ newval = mips_force_binary (SImode, ASHIFT, newval, shiftsi);
+ }
+
+ /* Do the SImode atomic access. */
+ if (result)
+ res = gen_reg_rtx (SImode);
+ if (newval)
+ si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, newval);
+ else if (result)
+ si_op = generator.fn_5 (res, memsi, mask, inverted_mask, oldval);
+ else
+ si_op = generator.fn_4 (memsi, mask, inverted_mask, oldval);
+
+ emit_insn (si_op);
+
+ if (result)
+ {
+ /* Shift and convert the result. */
+ mips_emit_binary (AND, res, res, mask);
+ mips_emit_binary (LSHIFTRT, res, res, shiftsi);
+ mips_emit_move (result, gen_lowpart (GET_MODE (result), res));
+ }
+}
+
+/* Return true if it is possible to use left/right accesses for a
+ bitfield of WIDTH bits starting BITPOS bits into BLKmode memory OP.
+ When returning true, update *LEFT and *RIGHT as follows:
+
+ *LEFT is a QImode reference to the first byte if big endian or
+ the last byte if little endian. This address can be used in the
+ left-side instructions (LWL, SWL, LDL, SDL).
+
+ *RIGHT is a QImode reference to the opposite end of the field and
+ can be used in the patterning right-side instruction. */
+
+static bool
+mips_get_unaligned_mem (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos,
+ rtx *left, rtx *right)
+{
+ rtx first, last;
+
+ /* Check that the size is valid. */
+ if (width != 32 && (!TARGET_64BIT || width != 64))
+ return false;
+
+ /* We can only access byte-aligned values. Since we are always passed
+ a reference to the first byte of the field, it is not necessary to
+ do anything with BITPOS after this check. */
+ if (bitpos % BITS_PER_UNIT != 0)
+ return false;
+
+ /* Reject aligned bitfields: we want to use a normal load or store
+ instead of a left/right pair. */
+ if (MEM_ALIGN (op) >= width)
+ return false;
+
+ /* Get references to both ends of the field. */
+ first = adjust_address (op, QImode, 0);
+ last = adjust_address (op, QImode, width / BITS_PER_UNIT - 1);
+
+ /* Allocate to LEFT and RIGHT according to endianness. LEFT should
+ correspond to the MSB and RIGHT to the LSB. */
+ if (TARGET_BIG_ENDIAN)
+ *left = first, *right = last;
+ else
+ *left = last, *right = first;
+
+ return true;
+}
+
+/* Try to use left/right loads to expand an "extv" or "extzv" pattern.
+ DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
+ the operation is the equivalent of:
+
+ (set DEST (*_extract SRC WIDTH BITPOS))
+
+ Return true on success. */
+
+bool
+mips_expand_ext_as_unaligned_load (rtx dest, rtx src, HOST_WIDE_INT width,
+ HOST_WIDE_INT bitpos, bool unsigned_p)
+{
+ rtx left, right, temp;
+ rtx dest1 = NULL_RTX;
+
+ /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
+ be a DImode, create a new temp and emit a zero extend at the end. */
+ if (GET_MODE (dest) == DImode
+ && REG_P (dest)
+ && GET_MODE_BITSIZE (SImode) == width)
+ {
+ dest1 = dest;
+ dest = gen_reg_rtx (SImode);
+ }
+
+ if (!mips_get_unaligned_mem (src, width, bitpos, &left, &right))
+ return false;
+
+ temp = gen_reg_rtx (GET_MODE (dest));
+ if (GET_MODE (dest) == DImode)
+ {
+ emit_insn (gen_mov_ldl (temp, src, left));
+ emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
+ }
+ else
+ {
+ emit_insn (gen_mov_lwl (temp, src, left));
+ emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
+ }
+
+ /* If we were loading 32bits and the original register was DI then
+ sign/zero extend into the orignal dest. */
+ if (dest1)
+ {
+ if (unsigned_p)
+ emit_insn (gen_zero_extendsidi2 (dest1, dest));
+ else
+ emit_insn (gen_extendsidi2 (dest1, dest));
+ }
+ return true;
+}
+
+/* Try to use left/right stores to expand an "ins" pattern. DEST, WIDTH,
+ BITPOS and SRC are the operands passed to the expander; the operation
+ is the equivalent of:
+
+ (set (zero_extract DEST WIDTH BITPOS) SRC)
+
+ Return true on success. */
+
+bool
+mips_expand_ins_as_unaligned_store (rtx dest, rtx src, HOST_WIDE_INT width,
+ HOST_WIDE_INT bitpos)
+{
+ rtx left, right;
+ enum machine_mode mode;
+
+ if (!mips_get_unaligned_mem (dest, width, bitpos, &left, &right))
+ return false;
+
+ mode = mode_for_size (width, MODE_INT, 0);
+ src = gen_lowpart (mode, src);
+ if (mode == DImode)
+ {
+ emit_insn (gen_mov_sdl (dest, src, left));
+ emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
+ }
+ else
+ {
+ emit_insn (gen_mov_swl (dest, src, left));
+ emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
+ }
+ return true;
+}
+
+/* Return true if X is a MEM with the same size as MODE. */
+
+bool
+mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
+{
+ return (MEM_P (x)
+ && MEM_SIZE_KNOWN_P (x)
+ && MEM_SIZE (x) == GET_MODE_SIZE (mode));
+}
+
+/* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
+ source of an "ext" instruction or the destination of an "ins"
+ instruction. OP must be a register operand and the following
+ conditions must hold:
+
+ 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op))
+ 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
+ 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
+
+ Also reject lengths equal to a word as they are better handled
+ by the move patterns. */
+
+bool
+mips_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos)
+{
+ if (!ISA_HAS_EXT_INS
+ || !register_operand (op, VOIDmode)
+ || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
+ return false;
+
+ if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1))
+ return false;
+
+ if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op)))
+ return false;
+
+ return true;
+}
+
+/* Check if MASK and SHIFT are valid in mask-low-and-shift-left
+ operation if MAXLEN is the maxium length of consecutive bits that
+ can make up MASK. MODE is the mode of the operation. See
+ mask_low_and_shift_len for the actual definition. */
+
+bool
+mask_low_and_shift_p (enum machine_mode mode, rtx mask, rtx shift, int maxlen)
+{
+ return IN_RANGE (mask_low_and_shift_len (mode, mask, shift), 1, maxlen);
+}
+
+/* Return true iff OP1 and OP2 are valid operands together for the
+ *and<MODE>3 and *and<MODE>3_mips16 patterns. For the cases to consider,
+ see the table in the comment before the pattern. */
+
+bool
+and_operands_ok (enum machine_mode mode, rtx op1, rtx op2)
+{
+ return (memory_operand (op1, mode)
+ ? and_load_operand (op2, mode)
+ : and_reg_operand (op2, mode));
+}
+
+/* The canonical form of a mask-low-and-shift-left operation is
+ (and (ashift X SHIFT) MASK) where MASK has the lower SHIFT number of bits
+ cleared. Thus we need to shift MASK to the right before checking if it
+ is a valid mask value. MODE is the mode of the operation. If true
+ return the length of the mask, otherwise return -1. */
+
+int
+mask_low_and_shift_len (enum machine_mode mode, rtx mask, rtx shift)
+{
+ HOST_WIDE_INT shval;
+
+ shval = INTVAL (shift) & (GET_MODE_BITSIZE (mode) - 1);
+ return exact_log2 ((UINTVAL (mask) >> shval) + 1);
+}
+
+/* Return true if -msplit-addresses is selected and should be honored.
+
+ -msplit-addresses is a half-way house between explicit relocations
+ and the traditional assembler macros. It can split absolute 32-bit
+ symbolic constants into a high/lo_sum pair but uses macros for other
+ sorts of access.
+
+ Like explicit relocation support for REL targets, it relies
+ on GNU extensions in the assembler and the linker.
+
+ Although this code should work for -O0, it has traditionally
+ been treated as an optimization. */
+
+static bool
+mips_split_addresses_p (void)
+{
+ return (TARGET_SPLIT_ADDRESSES
+ && optimize
+ && !TARGET_MIPS16
+ && !flag_pic
+ && !ABI_HAS_64BIT_SYMBOLS);
+}
+
+/* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs. */
+
+static void
+mips_init_relocs (void)
+{
+ memset (mips_split_p, '\0', sizeof (mips_split_p));
+ memset (mips_split_hi_p, '\0', sizeof (mips_split_hi_p));
+ memset (mips_use_pcrel_pool_p, '\0', sizeof (mips_use_pcrel_pool_p));
+ memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
+ memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
+
+ if (TARGET_MIPS16_PCREL_LOADS)
+ mips_use_pcrel_pool_p[SYMBOL_ABSOLUTE] = true;
+ else
+ {
+ if (ABI_HAS_64BIT_SYMBOLS)
+ {
+ if (TARGET_EXPLICIT_RELOCS)
+ {
+ mips_split_p[SYMBOL_64_HIGH] = true;
+ mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
+ mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
+
+ mips_split_p[SYMBOL_64_MID] = true;
+ mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
+ mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
+
+ mips_split_p[SYMBOL_64_LOW] = true;
+ mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
+ mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
+
+ mips_split_p[SYMBOL_ABSOLUTE] = true;
+ mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
+ }
+ }
+ else
+ {
+ if (TARGET_EXPLICIT_RELOCS
+ || mips_split_addresses_p ()
+ || TARGET_MIPS16)
+ {
+ mips_split_p[SYMBOL_ABSOLUTE] = true;
+ mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
+ mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
+ }
+ }
+ }
+
+ if (TARGET_MIPS16)
+ {
+ /* The high part is provided by a pseudo copy of $gp. */
+ mips_split_p[SYMBOL_GP_RELATIVE] = true;
+ mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
+ }
+ else if (TARGET_EXPLICIT_RELOCS)
+ /* Small data constants are kept whole until after reload,
+ then lowered by mips_rewrite_small_data. */
+ mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
+
+ if (TARGET_EXPLICIT_RELOCS)
+ {
+ mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
+ if (TARGET_NEWABI)
+ {
+ mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
+ mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
+ }
+ else
+ {
+ mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
+ mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
+ }
+ if (TARGET_MIPS16)
+ /* Expose the use of $28 as soon as possible. */
+ mips_split_hi_p[SYMBOL_GOT_PAGE_OFST] = true;
+
+ if (TARGET_XGOT)
+ {
+ /* The HIGH and LO_SUM are matched by special .md patterns. */
+ mips_split_p[SYMBOL_GOT_DISP] = true;
+
+ mips_split_p[SYMBOL_GOTOFF_DISP] = true;
+ mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
+ mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
+
+ mips_split_p[SYMBOL_GOTOFF_CALL] = true;
+ mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
+ mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
+ }
+ else
+ {
+ if (TARGET_NEWABI)
+ mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
+ else
+ mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
+ mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
+ if (TARGET_MIPS16)
+ /* Expose the use of $28 as soon as possible. */
+ mips_split_p[SYMBOL_GOT_DISP] = true;
+ }
+ }
+
+ if (TARGET_NEWABI)
+ {
+ mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
+ mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
+ mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
+ }
+
+ mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
+ mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
+
+ if (TARGET_MIPS16_PCREL_LOADS)
+ {
+ mips_use_pcrel_pool_p[SYMBOL_DTPREL] = true;
+ mips_use_pcrel_pool_p[SYMBOL_TPREL] = true;
+ }
+ else
+ {
+ mips_split_p[SYMBOL_DTPREL] = true;
+ mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
+ mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
+
+ mips_split_p[SYMBOL_TPREL] = true;
+ mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
+ mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
+ }
+
+ mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
+ mips_lo_relocs[SYMBOL_HALF] = "%half(";
+}
+
+/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
+ in context CONTEXT. RELOCS is the array of relocations to use. */
+
+static void
+mips_print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
+ const char **relocs)
+{
+ enum mips_symbol_type symbol_type;
+ const char *p;
+
+ symbol_type = mips_classify_symbolic_expression (op, context);
+ gcc_assert (relocs[symbol_type]);
+
+ fputs (relocs[symbol_type], file);
+ output_addr_const (file, mips_strip_unspec_address (op));
+ for (p = relocs[symbol_type]; *p != 0; p++)
+ if (*p == '(')
+ fputc (')', file);
+}
+
+/* Start a new block with the given asm switch enabled. If we need
+ to print a directive, emit PREFIX before it and SUFFIX after it. */
+
+static void
+mips_push_asm_switch_1 (struct mips_asm_switch *asm_switch,
+ const char *prefix, const char *suffix)
+{
+ if (asm_switch->nesting_level == 0)
+ fprintf (asm_out_file, "%s.set\tno%s%s", prefix, asm_switch->name, suffix);
+ asm_switch->nesting_level++;
+}
+
+/* Likewise, but end a block. */
+
+static void
+mips_pop_asm_switch_1 (struct mips_asm_switch *asm_switch,
+ const char *prefix, const char *suffix)
+{
+ gcc_assert (asm_switch->nesting_level);
+ asm_switch->nesting_level--;
+ if (asm_switch->nesting_level == 0)
+ fprintf (asm_out_file, "%s.set\t%s%s", prefix, asm_switch->name, suffix);
+}
+
+/* Wrappers around mips_push_asm_switch_1 and mips_pop_asm_switch_1
+ that either print a complete line or print nothing. */
+
+void
+mips_push_asm_switch (struct mips_asm_switch *asm_switch)
+{
+ mips_push_asm_switch_1 (asm_switch, "\t", "\n");
+}
+
+void
+mips_pop_asm_switch (struct mips_asm_switch *asm_switch)
+{
+ mips_pop_asm_switch_1 (asm_switch, "\t", "\n");
+}
+
+/* Print the text for PRINT_OPERAND punctation character CH to FILE.
+ The punctuation characters are:
+
+ '(' Start a nested ".set noreorder" block.
+ ')' End a nested ".set noreorder" block.
+ '[' Start a nested ".set noat" block.
+ ']' End a nested ".set noat" block.
+ '<' Start a nested ".set nomacro" block.
+ '>' End a nested ".set nomacro" block.
+ '*' Behave like %(%< if generating a delayed-branch sequence.
+ '#' Print a nop if in a ".set noreorder" block.
+ '/' Like '#', but do nothing within a delayed-branch sequence.
+ '?' Print "l" if mips_branch_likely is true
+ '~' Print a nop if mips_branch_likely is true
+ '.' Print the name of the register with a hard-wired zero (zero or $0).
+ '@' Print the name of the assembler temporary register (at or $1).
+ '^' Print the name of the pic call-through register (t9 or $25).
+ '+' Print the name of the gp register (usually gp or $28).
+ '$' Print the name of the stack pointer register (sp or $29).
+ ':' Print "c" to use the compact version if the delay slot is a nop.
+ '!' Print "s" to use the short version if the delay slot contains a
+ 16-bit instruction.
+
+ See also mips_init_print_operand_pucnt. */
+
+static void
+mips_print_operand_punctuation (FILE *file, int ch)
+{
+ switch (ch)
+ {
+ case '(':
+ mips_push_asm_switch_1 (&mips_noreorder, "", "\n\t");
+ break;
+
+ case ')':
+ mips_pop_asm_switch_1 (&mips_noreorder, "\n\t", "");
+ break;
+
+ case '[':
+ mips_push_asm_switch_1 (&mips_noat, "", "\n\t");
+ break;
+
+ case ']':
+ mips_pop_asm_switch_1 (&mips_noat, "\n\t", "");
+ break;
+
+ case '<':
+ mips_push_asm_switch_1 (&mips_nomacro, "", "\n\t");
+ break;
+
+ case '>':
+ mips_pop_asm_switch_1 (&mips_nomacro, "\n\t", "");
+ break;
+
+ case '*':
+ if (final_sequence != 0)
+ {
+ mips_print_operand_punctuation (file, '(');
+ mips_print_operand_punctuation (file, '<');
+ }
+ break;
+
+ case '#':
+ if (mips_noreorder.nesting_level > 0)
+ fputs ("\n\tnop", file);
+ break;
+
+ case '/':
+ /* Print an extra newline so that the delayed insn is separated
+ from the following ones. This looks neater and is consistent
+ with non-nop delayed sequences. */
+ if (mips_noreorder.nesting_level > 0 && final_sequence == 0)
+ fputs ("\n\tnop\n", file);
+ break;
+
+ case '?':
+ if (mips_branch_likely)
+ putc ('l', file);
+ break;
+
+ case '~':
+ if (mips_branch_likely)
+ fputs ("\n\tnop", file);
+ break;
+
+ case '.':
+ fputs (reg_names[GP_REG_FIRST + 0], file);
+ break;
+
+ case '@':
+ fputs (reg_names[AT_REGNUM], file);
+ break;
+
+ case '^':
+ fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
+ break;
+
+ case '+':
+ fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
+ break;
+
+ case '$':
+ fputs (reg_names[STACK_POINTER_REGNUM], file);
+ break;
+
+ case ':':
+ /* When final_sequence is 0, the delay slot will be a nop. We can
+ use the compact version for microMIPS. */
+ if (final_sequence == 0)
+ putc ('c', file);
+ break;
+
+ case '!':
+ /* If the delay slot instruction is short, then use the
+ compact version. */
+ if (final_sequence == 0
+ || get_attr_length (XVECEXP (final_sequence, 0, 1)) == 2)
+ putc ('s', file);
+ break;
+
+ default:
+ gcc_unreachable ();
+ break;
+ }
+}
+
+/* Initialize mips_print_operand_punct. */
+
+static void
+mips_init_print_operand_punct (void)
+{
+ const char *p;
+
+ for (p = "()[]<>*#/?~.@^+$:!"; *p; p++)
+ mips_print_operand_punct[(unsigned char) *p] = true;
+}
+
+/* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
+ associated with condition CODE. Print the condition part of the
+ opcode to FILE. */
+
+static void
+mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
+{
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ case GT:
+ case GE:
+ case LT:
+ case LE:
+ case GTU:
+ case GEU:
+ case LTU:
+ case LEU:
+ /* Conveniently, the MIPS names for these conditions are the same
+ as their RTL equivalents. */
+ fputs (GET_RTX_NAME (code), file);
+ break;
+
+ default:
+ output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
+ break;
+ }
+}
+
+/* Likewise floating-point branches. */
+
+static void
+mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
+{
+ switch (code)
+ {
+ case EQ:
+ fputs ("c1f", file);
+ break;
+
+ case NE:
+ fputs ("c1t", file);
+ break;
+
+ default:
+ output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
+ break;
+ }
+}
+
+/* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
+
+static bool
+mips_print_operand_punct_valid_p (unsigned char code)
+{
+ return mips_print_operand_punct[code];
+}
+
+/* Implement TARGET_PRINT_OPERAND. The MIPS-specific operand codes are:
+
+ 'X' Print CONST_INT OP in hexadecimal format.
+ 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
+ 'd' Print CONST_INT OP in decimal.
+ 'm' Print one less than CONST_INT OP in decimal.
+ 'h' Print the high-part relocation associated with OP, after stripping
+ any outermost HIGH.
+ 'R' Print the low-part relocation associated with OP.
+ 'C' Print the integer branch condition for comparison OP.
+ 'N' Print the inverse of the integer branch condition for comparison OP.
+ 'F' Print the FPU branch condition for comparison OP.
+ 'W' Print the inverse of the FPU branch condition for comparison OP.
+ 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
+ 'z' for (eq:?I ...), 'n' for (ne:?I ...).
+ 't' Like 'T', but with the EQ/NE cases reversed
+ 'Y' Print mips_fp_conditions[INTVAL (OP)]
+ 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
+ 'q' Print a DSP accumulator register.
+ 'D' Print the second part of a double-word register or memory operand.
+ 'L' Print the low-order register in a double-word register operand.
+ 'M' Print high-order register in a double-word register operand.
+ 'z' Print $0 if OP is zero, otherwise print OP normally.
+ 'b' Print the address of a memory operand, without offset. */
+
+static void
+mips_print_operand (FILE *file, rtx op, int letter)
+{
+ enum rtx_code code;
+
+ if (mips_print_operand_punct_valid_p (letter))
+ {
+ mips_print_operand_punctuation (file, letter);
+ return;
+ }
+
+ gcc_assert (op);
+ code = GET_CODE (op);
+
+ switch (letter)
+ {
+ case 'X':
+ if (CONST_INT_P (op))
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
+ else
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ break;
+
+ case 'x':
+ if (CONST_INT_P (op))
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
+ else
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ break;
+
+ case 'd':
+ if (CONST_INT_P (op))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
+ else
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ break;
+
+ case 'm':
+ if (CONST_INT_P (op))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1);
+ else
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ break;
+
+ case 'h':
+ if (code == HIGH)
+ op = XEXP (op, 0);
+ mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
+ break;
+
+ case 'R':
+ mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
+ break;
+
+ case 'C':
+ mips_print_int_branch_condition (file, code, letter);
+ break;
+
+ case 'N':
+ mips_print_int_branch_condition (file, reverse_condition (code), letter);
+ break;
+
+ case 'F':
+ mips_print_float_branch_condition (file, code, letter);
+ break;
+
+ case 'W':
+ mips_print_float_branch_condition (file, reverse_condition (code),
+ letter);
+ break;
+
+ case 'T':
+ case 't':
+ {
+ int truth = (code == NE) == (letter == 'T');
+ fputc ("zfnt"[truth * 2 + ST_REG_P (REGNO (XEXP (op, 0)))], file);
+ }
+ break;
+
+ case 'Y':
+ if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
+ fputs (mips_fp_conditions[UINTVAL (op)], file);
+ else
+ output_operand_lossage ("'%%%c' is not a valid operand prefix",
+ letter);
+ break;
+
+ case 'Z':
+ if (ISA_HAS_8CC)
+ {
+ mips_print_operand (file, op, 0);
+ fputc (',', file);
+ }
+ break;
+
+ case 'q':
+ if (code == REG && MD_REG_P (REGNO (op)))
+ fprintf (file, "$ac0");
+ else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
+ fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
+ else
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ break;
+
+ default:
+ switch (code)
+ {
+ case REG:
+ {
+ unsigned int regno = REGNO (op);
+ if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
+ || (letter == 'L' && TARGET_BIG_ENDIAN)
+ || letter == 'D')
+ regno++;
+ else if (letter && letter != 'z' && letter != 'M' && letter != 'L')
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ /* We need to print $0 .. $31 for COP0 registers. */
+ if (COP0_REG_P (regno))
+ fprintf (file, "$%s", &reg_names[regno][4]);
+ else
+ fprintf (file, "%s", reg_names[regno]);
+ }
+ break;
+
+ case MEM:
+ if (letter == 'D')
+ output_address (plus_constant (Pmode, XEXP (op, 0), 4));
+ else if (letter == 'b')
+ {
+ gcc_assert (REG_P (XEXP (op, 0)));
+ mips_print_operand (file, XEXP (op, 0), 0);
+ }
+ else if (letter && letter != 'z')
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ else
+ output_address (XEXP (op, 0));
+ break;
+
+ default:
+ if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
+ fputs (reg_names[GP_REG_FIRST], file);
+ else if (letter && letter != 'z')
+ output_operand_lossage ("invalid use of '%%%c'", letter);
+ else if (CONST_GP_P (op))
+ fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
+ else
+ output_addr_const (file, mips_strip_unspec_address (op));
+ break;
+ }
+ }
+}
+
+/* Implement TARGET_PRINT_OPERAND_ADDRESS. */
+
+static void
+mips_print_operand_address (FILE *file, rtx x)
+{
+ struct mips_address_info addr;
+
+ if (mips_classify_address (&addr, x, word_mode, true))
+ switch (addr.type)
+ {
+ case ADDRESS_REG:
+ mips_print_operand (file, addr.offset, 0);
+ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
+ return;
+
+ case ADDRESS_LO_SUM:
+ mips_print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
+ mips_lo_relocs);
+ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
+ return;
+
+ case ADDRESS_CONST_INT:
+ output_addr_const (file, x);
+ fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
+ return;
+
+ case ADDRESS_SYMBOLIC:
+ output_addr_const (file, mips_strip_unspec_address (x));
+ return;
+ }
+ gcc_unreachable ();
+}
+
+/* Implement TARGET_ENCODE_SECTION_INFO. */
+
+static void
+mips_encode_section_info (tree decl, rtx rtl, int first)
+{
+ default_encode_section_info (decl, rtl, first);
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ rtx symbol = XEXP (rtl, 0);
+ tree type = TREE_TYPE (decl);
+
+ /* Encode whether the symbol is short or long. */
+ if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
+ || mips_far_type_p (type))
+ SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
+ }
+}
+
+/* Implement TARGET_SELECT_RTX_SECTION. */
+
+static section *
+mips_select_rtx_section (enum machine_mode mode, rtx x,
+ unsigned HOST_WIDE_INT align)
+{
+ /* ??? Consider using mergeable small data sections. */
+ if (mips_rtx_constant_in_small_data_p (mode))
+ return get_named_section (NULL, ".sdata", 0);
+
+ return default_elf_select_rtx_section (mode, x, align);
+}
+
+/* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
+
+ The complication here is that, with the combination TARGET_ABICALLS
+ && !TARGET_ABSOLUTE_ABICALLS && !TARGET_GPWORD, jump tables will use
+ absolute addresses, and should therefore not be included in the
+ read-only part of a DSO. Handle such cases by selecting a normal
+ data section instead of a read-only one. The logic apes that in
+ default_function_rodata_section. */
+
+static section *
+mips_function_rodata_section (tree decl)
+{
+ if (!TARGET_ABICALLS || TARGET_ABSOLUTE_ABICALLS || TARGET_GPWORD)
+ return default_function_rodata_section (decl);
+
+ if (decl && DECL_SECTION_NAME (decl))
+ {
+ const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
+ if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
+ {
+ char *rname = ASTRDUP (name);
+ rname[14] = 'd';
+ return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
+ }
+ else if (flag_function_sections
+ && flag_data_sections
+ && strncmp (name, ".text.", 6) == 0)
+ {
+ char *rname = ASTRDUP (name);
+ memcpy (rname + 1, "data", 4);
+ return get_section (rname, SECTION_WRITE, decl);
+ }
+ }
+ return data_section;
+}
+
+/* Implement TARGET_IN_SMALL_DATA_P. */
+
+static bool
+mips_in_small_data_p (const_tree decl)
+{
+ unsigned HOST_WIDE_INT size;
+
+ if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
+ return false;
+
+ /* We don't yet generate small-data references for -mabicalls
+ or VxWorks RTP code. See the related -G handling in
+ mips_option_override. */
+ if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
+ return false;
+
+ if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
+ {
+ const char *name;
+
+ /* Reject anything that isn't in a known small-data section. */
+ name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
+ if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
+ return false;
+
+ /* If a symbol is defined externally, the assembler will use the
+ usual -G rules when deciding how to implement macros. */
+ if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
+ return true;
+ }
+ else if (TARGET_EMBEDDED_DATA)
+ {
+ /* Don't put constants into the small data section: we want them
+ to be in ROM rather than RAM. */
+ if (TREE_CODE (decl) != VAR_DECL)
+ return false;
+
+ if (TREE_READONLY (decl)
+ && !TREE_SIDE_EFFECTS (decl)
+ && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
+ return false;
+ }
+
+ /* Enforce -mlocal-sdata. */
+ if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
+ return false;
+
+ /* Enforce -mextern-sdata. */
+ if (!TARGET_EXTERN_SDATA && DECL_P (decl))
+ {
+ if (DECL_EXTERNAL (decl))
+ return false;
+ if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
+ return false;
+ }
+
+ /* We have traditionally not treated zero-sized objects as small data,
+ so this is now effectively part of the ABI. */
+ size = int_size_in_bytes (TREE_TYPE (decl));
+ return size > 0 && size <= mips_small_data_threshold;
+}
+
+/* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
+ anchors for small data: the GP register acts as an anchor in that
+ case. We also don't want to use them for PC-relative accesses,
+ where the PC acts as an anchor. */
+
+static bool
+mips_use_anchors_for_symbol_p (const_rtx symbol)
+{
+ switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
+ {
+ case SYMBOL_PC_RELATIVE:
+ case SYMBOL_GP_RELATIVE:
+ return false;
+
+ default:
+ return default_use_anchors_for_symbol_p (symbol);
+ }
+}
+
+/* The MIPS debug format wants all automatic variables and arguments
+ to be in terms of the virtual frame pointer (stack pointer before
+ any adjustment in the function), while the MIPS 3.0 linker wants
+ the frame pointer to be the stack pointer after the initial
+ adjustment. So, we do the adjustment here. The arg pointer (which
+ is eliminated) points to the virtual frame pointer, while the frame
+ pointer (which may be eliminated) points to the stack pointer after
+ the initial adjustments. */
+
+HOST_WIDE_INT
+mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
+{
+ rtx offset2 = const0_rtx;
+ rtx reg = eliminate_constant_term (addr, &offset2);
+
+ if (offset == 0)
+ offset = INTVAL (offset2);
+
+ if (reg == stack_pointer_rtx
+ || reg == frame_pointer_rtx
+ || reg == hard_frame_pointer_rtx)
+ {
+ offset -= cfun->machine->frame.total_size;
+ if (reg == hard_frame_pointer_rtx)
+ offset += cfun->machine->frame.hard_frame_pointer_offset;
+ }
+
+ return offset;
+}
+
+/* Implement ASM_OUTPUT_EXTERNAL. */
+
+void
+mips_output_external (FILE *file, tree decl, const char *name)
+{
+ default_elf_asm_output_external (file, decl, name);
+
+ /* We output the name if and only if TREE_SYMBOL_REFERENCED is
+ set in order to avoid putting out names that are never really
+ used. */
+ if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
+ {
+ if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
+ {
+ /* When using assembler macros, emit .extern directives for
+ all small-data externs so that the assembler knows how
+ big they are.
+
+ In most cases it would be safe (though pointless) to emit
+ .externs for other symbols too. One exception is when an
+ object is within the -G limit but declared by the user to
+ be in a section other than .sbss or .sdata. */
+ fputs ("\t.extern\t", file);
+ assemble_name (file, name);
+ fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
+ int_size_in_bytes (TREE_TYPE (decl)));
+ }
+ }
+}
+
+/* Implement TARGET_ASM_OUTPUT_SOURCE_FILENAME. */
+
+static void
+mips_output_filename (FILE *stream, const char *name)
+{
+ /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
+ directives. */
+ if (write_symbols == DWARF2_DEBUG)
+ return;
+ else if (mips_output_filename_first_time)
+ {
+ mips_output_filename_first_time = 0;
+ num_source_filenames += 1;
+ current_function_file = name;
+ fprintf (stream, "\t.file\t%d ", num_source_filenames);
+ output_quoted_string (stream, name);
+ putc ('\n', stream);
+ }
+ /* If we are emitting stabs, let dbxout.c handle this (except for
+ the mips_output_filename_first_time case). */
+ else if (write_symbols == DBX_DEBUG)
+ return;
+ else if (name != current_function_file
+ && strcmp (name, current_function_file) != 0)
+ {
+ num_source_filenames += 1;
+ current_function_file = name;
+ fprintf (stream, "\t.file\t%d ", num_source_filenames);
+ output_quoted_string (stream, name);
+ putc ('\n', stream);
+ }
+}
+
+/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
+
+static void ATTRIBUTE_UNUSED
+mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
+{
+ switch (size)
+ {
+ case 4:
+ fputs ("\t.dtprelword\t", file);
+ break;
+
+ case 8:
+ fputs ("\t.dtpreldword\t", file);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ output_addr_const (file, x);
+ fputs ("+0x8000", file);
+}
+
+/* Implement TARGET_DWARF_REGISTER_SPAN. */
+
+static rtx
+mips_dwarf_register_span (rtx reg)
+{
+ rtx high, low;
+ enum machine_mode mode;
+
+ /* By default, GCC maps increasing register numbers to increasing
+ memory locations, but paired FPRs are always little-endian,
+ regardless of the prevailing endianness. */
+ mode = GET_MODE (reg);
+ if (FP_REG_P (REGNO (reg))
+ && TARGET_BIG_ENDIAN
+ && MAX_FPRS_PER_FMT > 1
+ && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
+ {
+ gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
+ high = mips_subword (reg, true);
+ low = mips_subword (reg, false);
+ return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
+ }
+
+ return NULL_RTX;
+}
+
+/* DSP ALU can bypass data with no delays for the following pairs. */
+enum insn_code dspalu_bypass_table[][2] =
+{
+ {CODE_FOR_mips_addsc, CODE_FOR_mips_addwc},
+ {CODE_FOR_mips_cmpu_eq_qb, CODE_FOR_mips_pick_qb},
+ {CODE_FOR_mips_cmpu_lt_qb, CODE_FOR_mips_pick_qb},
+ {CODE_FOR_mips_cmpu_le_qb, CODE_FOR_mips_pick_qb},
+ {CODE_FOR_mips_cmp_eq_ph, CODE_FOR_mips_pick_ph},
+ {CODE_FOR_mips_cmp_lt_ph, CODE_FOR_mips_pick_ph},
+ {CODE_FOR_mips_cmp_le_ph, CODE_FOR_mips_pick_ph},
+ {CODE_FOR_mips_wrdsp, CODE_FOR_mips_insv}
+};
+
+int
+mips_dspalu_bypass_p (rtx out_insn, rtx in_insn)
+{
+ int i;
+ int num_bypass = ARRAY_SIZE (dspalu_bypass_table);
+ enum insn_code out_icode = (enum insn_code) INSN_CODE (out_insn);
+ enum insn_code in_icode = (enum insn_code) INSN_CODE (in_insn);
+
+ for (i = 0; i < num_bypass; i++)
+ {
+ if (out_icode == dspalu_bypass_table[i][0]
+ && in_icode == dspalu_bypass_table[i][1])
+ return true;
+ }
+
+ return false;
+}
+/* Implement ASM_OUTPUT_ASCII. */
+
+void
+mips_output_ascii (FILE *stream, const char *string, size_t len)
+{
+ size_t i;
+ int cur_pos;
+
+ cur_pos = 17;
+ fprintf (stream, "\t.ascii\t\"");
+ for (i = 0; i < len; i++)
+ {
+ int c;
+
+ c = (unsigned char) string[i];
+ if (ISPRINT (c))
+ {
+ if (c == '\\' || c == '\"')
+ {
+ putc ('\\', stream);
+ cur_pos++;
+ }
+ putc (c, stream);
+ cur_pos++;
+ }
+ else
+ {
+ fprintf (stream, "\\%03o", c);
+ cur_pos += 4;
+ }
+
+ if (cur_pos > 72 && i+1 < len)
+ {
+ cur_pos = 17;
+ fprintf (stream, "\"\n\t.ascii\t\"");
+ }
+ }
+ fprintf (stream, "\"\n");
+}
+
+/* Return the pseudo-op for full SYMBOL_(D)TPREL address *ADDR.
+ Update *ADDR with the operand that should be printed. */
+
+const char *
+mips_output_tls_reloc_directive (rtx *addr)
+{
+ enum mips_symbol_type type;
+
+ type = mips_classify_symbolic_expression (*addr, SYMBOL_CONTEXT_LEA);
+ *addr = mips_strip_unspec_address (*addr);
+ switch (type)
+ {
+ case SYMBOL_DTPREL:
+ return Pmode == SImode ? ".dtprelword\t%0" : ".dtpreldword\t%0";
+
+ case SYMBOL_TPREL:
+ return Pmode == SImode ? ".tprelword\t%0" : ".tpreldword\t%0";
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Emit either a label, .comm, or .lcomm directive. When using assembler
+ macros, mark the symbol as written so that mips_asm_output_external
+ won't emit an .extern for it. STREAM is the output file, NAME is the
+ name of the symbol, INIT_STRING is the string that should be written
+ before the symbol and FINAL_STRING is the string that should be
+ written after it. FINAL_STRING is a printf format that consumes the
+ remaining arguments. */
+
+void
+mips_declare_object (FILE *stream, const char *name, const char *init_string,
+ const char *final_string, ...)
+{
+ va_list ap;
+
+ fputs (init_string, stream);
+ assemble_name (stream, name);
+ va_start (ap, final_string);
+ vfprintf (stream, final_string, ap);
+ va_end (ap);
+
+ if (!TARGET_EXPLICIT_RELOCS)
+ {
+ tree name_tree = get_identifier (name);
+ TREE_ASM_WRITTEN (name_tree) = 1;
+ }
+}
+
+/* Declare a common object of SIZE bytes using asm directive INIT_STRING.
+ NAME is the name of the object and ALIGN is the required alignment
+ in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
+ alignment argument. */
+
+void
+mips_declare_common_object (FILE *stream, const char *name,
+ const char *init_string,
+ unsigned HOST_WIDE_INT size,
+ unsigned int align, bool takes_alignment_p)
+{
+ if (!takes_alignment_p)
+ {
+ size += (align / BITS_PER_UNIT) - 1;
+ size -= size % (align / BITS_PER_UNIT);
+ mips_declare_object (stream, name, init_string,
+ "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
+ }
+ else
+ mips_declare_object (stream, name, init_string,
+ "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
+ size, align / BITS_PER_UNIT);
+}
+
+/* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
+ elfos.h version, but we also need to handle -muninit-const-in-rodata. */
+
+void
+mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
+ unsigned HOST_WIDE_INT size,
+ unsigned int align)
+{
+ /* If the target wants uninitialized const declarations in
+ .rdata then don't put them in .comm. */
+ if (TARGET_EMBEDDED_DATA
+ && TARGET_UNINIT_CONST_IN_RODATA
+ && TREE_CODE (decl) == VAR_DECL
+ && TREE_READONLY (decl)
+ && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
+ {
+ if (TREE_PUBLIC (decl) && DECL_NAME (decl))
+ targetm.asm_out.globalize_label (stream, name);
+
+ switch_to_section (readonly_data_section);
+ ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
+ mips_declare_object (stream, name, "",
+ ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
+ size);
+ }
+ else
+ mips_declare_common_object (stream, name, "\n\t.comm\t",
+ size, align, true);
+}
+
+#ifdef ASM_OUTPUT_SIZE_DIRECTIVE
+extern int size_directive_output;
+
+/* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
+ definitions except that it uses mips_declare_object to emit the label. */
+
+void
+mips_declare_object_name (FILE *stream, const char *name,
+ tree decl ATTRIBUTE_UNUSED)
+{
+#ifdef ASM_OUTPUT_TYPE_DIRECTIVE
+ ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
+#endif
+
+ size_directive_output = 0;
+ if (!flag_inhibit_size_directive && DECL_SIZE (decl))
+ {
+ HOST_WIDE_INT size;
+
+ size_directive_output = 1;
+ size = int_size_in_bytes (TREE_TYPE (decl));
+ ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
+ }
+
+ mips_declare_object (stream, name, "", ":\n");
+}
+
+/* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
+
+void
+mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
+{
+ const char *name;
+
+ name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
+ if (!flag_inhibit_size_directive
+ && DECL_SIZE (decl) != 0
+ && !at_end
+ && top_level
+ && DECL_INITIAL (decl) == error_mark_node
+ && !size_directive_output)
+ {
+ HOST_WIDE_INT size;
+
+ size_directive_output = 1;
+ size = int_size_in_bytes (TREE_TYPE (decl));
+ ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
+ }
+}
+#endif
+
+/* Return the FOO in the name of the ".mdebug.FOO" section associated
+ with the current ABI. */
+
+static const char *
+mips_mdebug_abi_name (void)
+{
+ switch (mips_abi)
+ {
+ case ABI_32:
+ return "abi32";
+ case ABI_O64:
+ return "abiO64";
+ case ABI_N32:
+ return "abiN32";
+ case ABI_64:
+ return "abi64";
+ case ABI_EABI:
+ return TARGET_64BIT ? "eabi64" : "eabi32";
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Implement TARGET_ASM_FILE_START. */
+
+static void
+mips_file_start (void)
+{
+ default_file_start ();
+
+ /* Generate a special section to describe the ABI switches used to
+ produce the resultant binary. */
+
+ /* Record the ABI itself. Modern versions of binutils encode
+ this information in the ELF header flags, but GDB needs the
+ information in order to correctly debug binaries produced by
+ older binutils. See the function mips_gdbarch_init in
+ gdb/mips-tdep.c. */
+ fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
+ mips_mdebug_abi_name ());
+
+ /* There is no ELF header flag to distinguish long32 forms of the
+ EABI from long64 forms. Emit a special section to help tools
+ such as GDB. Do the same for o64, which is sometimes used with
+ -mlong64. */
+ if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
+ fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
+ "\t.previous\n", TARGET_LONG64 ? 64 : 32);
+
+ /* Record the NaN encoding. */
+ if (HAVE_AS_NAN || mips_nan != MIPS_IEEE_754_DEFAULT)
+ fprintf (asm_out_file, "\t.nan\t%s\n",
+ mips_nan == MIPS_IEEE_754_2008 ? "2008" : "legacy");
+
+#ifdef HAVE_AS_GNU_ATTRIBUTE
+ {
+ int attr;
+
+ /* No floating-point operations, -mno-float. */
+ if (TARGET_NO_FLOAT)
+ attr = 0;
+ /* Soft-float code, -msoft-float. */
+ else if (!TARGET_HARD_FLOAT_ABI)
+ attr = 3;
+ /* Single-float code, -msingle-float. */
+ else if (!TARGET_DOUBLE_FLOAT)
+ attr = 2;
+ /* 64-bit FP registers on a 32-bit target, -mips32r2 -mfp64. */
+ else if (!TARGET_64BIT && TARGET_FLOAT64)
+ attr = 4;
+ /* Regular FP code, FP regs same size as GP regs, -mdouble-float. */
+ else
+ attr = 1;
+
+ fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", attr);
+ }
+#endif
+
+ /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
+ if (TARGET_ABICALLS)
+ {
+ fprintf (asm_out_file, "\t.abicalls\n");
+ if (TARGET_ABICALLS_PIC0)
+ fprintf (asm_out_file, "\t.option\tpic0\n");
+ }
+
+ if (flag_verbose_asm)
+ fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
+ ASM_COMMENT_START,
+ mips_small_data_threshold, mips_arch_info->name, mips_isa);
+}
+
+/* Implement TARGET_ASM_CODE_END. */
+
+static void
+mips_code_end (void)
+{
+ mips_finish_stub (&mips16_rdhwr_stub);
+ mips_finish_stub (&mips16_get_fcsr_stub);
+ mips_finish_stub (&mips16_set_fcsr_stub);
+}
+
+/* Make the last instruction frame-related and note that it performs
+ the operation described by FRAME_PATTERN. */
+
+static void
+mips_set_frame_expr (rtx frame_pattern)
+{
+ rtx insn;
+
+ insn = get_last_insn ();
+ RTX_FRAME_RELATED_P (insn) = 1;
+ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ frame_pattern,
+ REG_NOTES (insn));
+}
+
+/* Return a frame-related rtx that stores REG at MEM.
+ REG must be a single register. */
+
+static rtx
+mips_frame_set (rtx mem, rtx reg)
+{
+ rtx set;
+
+ set = gen_rtx_SET (VOIDmode, mem, reg);
+ RTX_FRAME_RELATED_P (set) = 1;
+
+ return set;
+}
+
+/* Record that the epilogue has restored call-saved register REG. */
+
+static void
+mips_add_cfa_restore (rtx reg)
+{
+ mips_epilogue.cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
+ mips_epilogue.cfa_restores);
+}
+
+/* If a MIPS16e SAVE or RESTORE instruction saves or restores register
+ mips16e_s2_s8_regs[X], it must also save the registers in indexes
+ X + 1 onwards. Likewise mips16e_a0_a3_regs. */
+static const unsigned char mips16e_s2_s8_regs[] = {
+ 30, 23, 22, 21, 20, 19, 18
+};
+static const unsigned char mips16e_a0_a3_regs[] = {
+ 4, 5, 6, 7
+};
+
+/* A list of the registers that can be saved by the MIPS16e SAVE instruction,
+ ordered from the uppermost in memory to the lowest in memory. */
+static const unsigned char mips16e_save_restore_regs[] = {
+ 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
+};
+
+/* Return the index of the lowest X in the range [0, SIZE) for which
+ bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
+
+static unsigned int
+mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
+ unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ if (BITSET_P (mask, regs[i]))
+ break;
+
+ return i;
+}
+
+/* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
+ is the number of set bits. If *MASK_PTR contains REGS[X] for some X
+ in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
+ is true for all indexes (X, SIZE). */
+
+static void
+mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
+ unsigned int size, unsigned int *num_regs_ptr)
+{
+ unsigned int i;
+
+ i = mips16e_find_first_register (*mask_ptr, regs, size);
+ for (i++; i < size; i++)
+ if (!BITSET_P (*mask_ptr, regs[i]))
+ {
+ *num_regs_ptr += 1;
+ *mask_ptr |= 1 << regs[i];
+ }
+}
+
+/* Return a simplified form of X using the register values in REG_VALUES.
+ REG_VALUES[R] is the last value assigned to hard register R, or null
+ if R has not been modified.
+
+ This function is rather limited, but is good enough for our purposes. */
+
+static rtx
+mips16e_collect_propagate_value (rtx x, rtx *reg_values)
+{
+ x = avoid_constant_pool_reference (x);
+
+ if (UNARY_P (x))
+ {
+ rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
+ return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
+ x0, GET_MODE (XEXP (x, 0)));
+ }
+
+ if (ARITHMETIC_P (x))
+ {
+ rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
+ rtx x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
+ return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
+ }
+
+ if (REG_P (x)
+ && reg_values[REGNO (x)]
+ && !rtx_unstable_p (reg_values[REGNO (x)]))
+ return reg_values[REGNO (x)];
+
+ return x;
+}
+
+/* Return true if (set DEST SRC) stores an argument register into its
+ caller-allocated save slot, storing the number of that argument
+ register in *REGNO_PTR if so. REG_VALUES is as for
+ mips16e_collect_propagate_value. */
+
+static bool
+mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
+ unsigned int *regno_ptr)
+{
+ unsigned int argno, regno;
+ HOST_WIDE_INT offset, required_offset;
+ rtx addr, base;
+
+ /* Check that this is a word-mode store. */
+ if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
+ return false;
+
+ /* Check that the register being saved is an unmodified argument
+ register. */
+ regno = REGNO (src);
+ if (!IN_RANGE (regno, GP_ARG_FIRST, GP_ARG_LAST) || reg_values[regno])
+ return false;
+ argno = regno - GP_ARG_FIRST;
+
+ /* Check whether the address is an appropriate stack-pointer or
+ frame-pointer access. */
+ addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
+ mips_split_plus (addr, &base, &offset);
+ required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
+ if (base == hard_frame_pointer_rtx)
+ required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
+ else if (base != stack_pointer_rtx)
+ return false;
+ if (offset != required_offset)
+ return false;
+
+ *regno_ptr = regno;
+ return true;
+}
+
+/* A subroutine of mips_expand_prologue, called only when generating
+ MIPS16e SAVE instructions. Search the start of the function for any
+ instructions that save argument registers into their caller-allocated
+ save slots. Delete such instructions and return a value N such that
+ saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
+ instructions redundant. */
+
+static unsigned int
+mips16e_collect_argument_saves (void)
+{
+ rtx reg_values[FIRST_PSEUDO_REGISTER];
+ rtx insn, next, set, dest, src;
+ unsigned int nargs, regno;
+
+ push_topmost_sequence ();
+ nargs = 0;
+ memset (reg_values, 0, sizeof (reg_values));
+ for (insn = get_insns (); insn; insn = next)
+ {
+ next = NEXT_INSN (insn);
+ if (NOTE_P (insn) || DEBUG_INSN_P (insn))
+ continue;
+
+ if (!INSN_P (insn))
+ break;
+
+ set = PATTERN (insn);
+ if (GET_CODE (set) != SET)
+ break;
+
+ dest = SET_DEST (set);
+ src = SET_SRC (set);
+ if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
+ {
+ if (!BITSET_P (cfun->machine->frame.mask, regno))
+ {
+ delete_insn (insn);
+ nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
+ }
+ }
+ else if (REG_P (dest) && GET_MODE (dest) == word_mode)
+ reg_values[REGNO (dest)]
+ = mips16e_collect_propagate_value (src, reg_values);
+ else
+ break;
+ }
+ pop_topmost_sequence ();
+
+ return nargs;
+}
+
+/* Return a move between register REGNO and memory location SP + OFFSET.
+ REG_PARM_P is true if SP + OFFSET belongs to REG_PARM_STACK_SPACE.
+ Make the move a load if RESTORE_P, otherwise make it a store. */
+
+static rtx
+mips16e_save_restore_reg (bool restore_p, bool reg_parm_p,
+ HOST_WIDE_INT offset, unsigned int regno)
+{
+ rtx reg, mem;
+
+ mem = gen_frame_mem (SImode, plus_constant (Pmode, stack_pointer_rtx,
+ offset));
+ reg = gen_rtx_REG (SImode, regno);
+ if (restore_p)
+ {
+ mips_add_cfa_restore (reg);
+ return gen_rtx_SET (VOIDmode, reg, mem);
+ }
+ if (reg_parm_p)
+ return gen_rtx_SET (VOIDmode, mem, reg);
+ return mips_frame_set (mem, reg);
+}
+
+/* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
+ The instruction must:
+
+ - Allocate or deallocate SIZE bytes in total; SIZE is known
+ to be nonzero.
+
+ - Save or restore as many registers in *MASK_PTR as possible.
+ The instruction saves the first registers at the top of the
+ allocated area, with the other registers below it.
+
+ - Save NARGS argument registers above the allocated area.
+
+ (NARGS is always zero if RESTORE_P.)
+
+ The SAVE and RESTORE instructions cannot save and restore all general
+ registers, so there may be some registers left over for the caller to
+ handle. Destructively modify *MASK_PTR so that it contains the registers
+ that still need to be saved or restored. The caller can save these
+ registers in the memory immediately below *OFFSET_PTR, which is a
+ byte offset from the bottom of the allocated stack area. */
+
+static rtx
+mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
+ HOST_WIDE_INT *offset_ptr, unsigned int nargs,
+ HOST_WIDE_INT size)
+{
+ rtx pattern, set;
+ HOST_WIDE_INT offset, top_offset;
+ unsigned int i, regno;
+ int n;
+
+ gcc_assert (cfun->machine->frame.num_fp == 0);
+
+ /* Calculate the number of elements in the PARALLEL. We need one element
+ for the stack adjustment, one for each argument register save, and one
+ for each additional register move. */
+ n = 1 + nargs;
+ for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
+ if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
+ n++;
+
+ /* Create the final PARALLEL. */
+ pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
+ n = 0;
+
+ /* Add the stack pointer adjustment. */
+ set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
+ restore_p ? size : -size));
+ RTX_FRAME_RELATED_P (set) = 1;
+ XVECEXP (pattern, 0, n++) = set;
+
+ /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
+ top_offset = restore_p ? size : 0;
+
+ /* Save the arguments. */
+ for (i = 0; i < nargs; i++)
+ {
+ offset = top_offset + i * UNITS_PER_WORD;
+ set = mips16e_save_restore_reg (restore_p, true, offset,
+ GP_ARG_FIRST + i);
+ XVECEXP (pattern, 0, n++) = set;
+ }
+
+ /* Then fill in the other register moves. */
+ offset = top_offset;
+ for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
+ {
+ regno = mips16e_save_restore_regs[i];
+ if (BITSET_P (*mask_ptr, regno))
+ {
+ offset -= UNITS_PER_WORD;
+ set = mips16e_save_restore_reg (restore_p, false, offset, regno);
+ XVECEXP (pattern, 0, n++) = set;
+ *mask_ptr &= ~(1 << regno);
+ }
+ }
+
+ /* Tell the caller what offset it should use for the remaining registers. */
+ *offset_ptr = size + (offset - top_offset);
+
+ gcc_assert (n == XVECLEN (pattern, 0));
+
+ return pattern;
+}
+
+/* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
+ pointer. Return true if PATTERN matches the kind of instruction
+ generated by mips16e_build_save_restore. If INFO is nonnull,
+ initialize it when returning true. */
+
+bool
+mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
+ struct mips16e_save_restore_info *info)
+{
+ unsigned int i, nargs, mask, extra;
+ HOST_WIDE_INT top_offset, save_offset, offset;
+ rtx set, reg, mem, base;
+ int n;
+
+ if (!GENERATE_MIPS16E_SAVE_RESTORE)
+ return false;
+
+ /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
+ top_offset = adjust > 0 ? adjust : 0;
+
+ /* Interpret all other members of the PARALLEL. */
+ save_offset = top_offset - UNITS_PER_WORD;
+ mask = 0;
+ nargs = 0;
+ i = 0;
+ for (n = 1; n < XVECLEN (pattern, 0); n++)
+ {
+ /* Check that we have a SET. */
+ set = XVECEXP (pattern, 0, n);
+ if (GET_CODE (set) != SET)
+ return false;
+
+ /* Check that the SET is a load (if restoring) or a store
+ (if saving). */
+ mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
+ if (!MEM_P (mem))
+ return false;
+
+ /* Check that the address is the sum of the stack pointer and a
+ possibly-zero constant offset. */
+ mips_split_plus (XEXP (mem, 0), &base, &offset);
+ if (base != stack_pointer_rtx)
+ return false;
+
+ /* Check that SET's other operand is a register. */
+ reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
+ if (!REG_P (reg))
+ return false;
+
+ /* Check for argument saves. */
+ if (offset == top_offset + nargs * UNITS_PER_WORD
+ && REGNO (reg) == GP_ARG_FIRST + nargs)
+ nargs++;
+ else if (offset == save_offset)
+ {
+ while (mips16e_save_restore_regs[i++] != REGNO (reg))
+ if (i == ARRAY_SIZE (mips16e_save_restore_regs))
+ return false;
+
+ mask |= 1 << REGNO (reg);
+ save_offset -= UNITS_PER_WORD;
+ }
+ else
+ return false;
+ }
+
+ /* Check that the restrictions on register ranges are met. */
+ extra = 0;
+ mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
+ ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
+ mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
+ ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
+ if (extra != 0)
+ return false;
+
+ /* Make sure that the topmost argument register is not saved twice.
+ The checks above ensure that the same is then true for the other
+ argument registers. */
+ if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
+ return false;
+
+ /* Pass back information, if requested. */
+ if (info)
+ {
+ info->nargs = nargs;
+ info->mask = mask;
+ info->size = (adjust > 0 ? adjust : -adjust);
+ }
+
+ return true;
+}
+
+/* Add a MIPS16e SAVE or RESTORE register-range argument to string S
+ for the register range [MIN_REG, MAX_REG]. Return a pointer to
+ the null terminator. */
+
+static char *
+mips16e_add_register_range (char *s, unsigned int min_reg,
+ unsigned int max_reg)
+{
+ if (min_reg != max_reg)
+ s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
+ else
+ s += sprintf (s, ",%s", reg_names[min_reg]);
+ return s;
+}
+
+/* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
+ PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
+
+const char *
+mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
+{
+ static char buffer[300];
+
+ struct mips16e_save_restore_info info;
+ unsigned int i, end;
+ char *s;
+
+ /* Parse the pattern. */
+ if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
+ gcc_unreachable ();
+
+ /* Add the mnemonic. */
+ s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
+ s += strlen (s);
+
+ /* Save the arguments. */
+ if (info.nargs > 1)
+ s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
+ reg_names[GP_ARG_FIRST + info.nargs - 1]);
+ else if (info.nargs == 1)
+ s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
+
+ /* Emit the amount of stack space to allocate or deallocate. */
+ s += sprintf (s, "%d", (int) info.size);
+
+ /* Save or restore $16. */
+ if (BITSET_P (info.mask, 16))
+ s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
+
+ /* Save or restore $17. */
+ if (BITSET_P (info.mask, 17))
+ s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
+
+ /* Save or restore registers in the range $s2...$s8, which
+ mips16e_s2_s8_regs lists in decreasing order. Note that this
+ is a software register range; the hardware registers are not
+ numbered consecutively. */
+ end = ARRAY_SIZE (mips16e_s2_s8_regs);
+ i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
+ if (i < end)
+ s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
+ mips16e_s2_s8_regs[i]);
+
+ /* Save or restore registers in the range $a0...$a3. */
+ end = ARRAY_SIZE (mips16e_a0_a3_regs);
+ i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
+ if (i < end)
+ s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
+ mips16e_a0_a3_regs[end - 1]);
+
+ /* Save or restore $31. */
+ if (BITSET_P (info.mask, RETURN_ADDR_REGNUM))
+ s += sprintf (s, ",%s", reg_names[RETURN_ADDR_REGNUM]);
+
+ return buffer;
+}
+
+/* Return true if the current function returns its value in a floating-point
+ register in MIPS16 mode. */
+
+static bool
+mips16_cfun_returns_in_fpr_p (void)
+{
+ tree return_type = DECL_RESULT (current_function_decl);
+ return (TARGET_MIPS16
+ && TARGET_HARD_FLOAT_ABI
+ && !aggregate_value_p (return_type, current_function_decl)
+ && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
+}
+
+/* Return true if predicate PRED is true for at least one instruction.
+ Cache the result in *CACHE, and assume that the result is true
+ if *CACHE is already true. */
+
+static bool
+mips_find_gp_ref (bool *cache, bool (*pred) (rtx))
+{
+ rtx insn;
+
+ if (!*cache)
+ {
+ push_topmost_sequence ();
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (USEFUL_INSN_P (insn) && pred (insn))
+ {
+ *cache = true;
+ break;
+ }
+ pop_topmost_sequence ();
+ }
+ return *cache;
+}
+
+/* Return true if INSN refers to the global pointer in an "inflexible" way.
+ See mips_cfun_has_inflexible_gp_ref_p for details. */
+
+static bool
+mips_insn_has_inflexible_gp_ref_p (rtx insn)
+{
+ /* Uses of pic_offset_table_rtx in CALL_INSN_FUNCTION_USAGE
+ indicate that the target could be a traditional MIPS
+ lazily-binding stub. */
+ return find_reg_fusage (insn, USE, pic_offset_table_rtx);
+}
+
+/* Return true if the current function refers to the global pointer
+ in a way that forces $28 to be valid. This means that we can't
+ change the choice of global pointer, even for NewABI code.
+
+ One example of this (and one which needs several checks) is that
+ $28 must be valid when calling traditional MIPS lazy-binding stubs.
+ (This restriction does not apply to PLTs.) */
+
+static bool
+mips_cfun_has_inflexible_gp_ref_p (void)
+{
+ /* If the function has a nonlocal goto, $28 must hold the correct
+ global pointer for the target function. That is, the target
+ of the goto implicitly uses $28. */
+ if (crtl->has_nonlocal_goto)
+ return true;
+
+ if (TARGET_ABICALLS_PIC2)
+ {
+ /* Symbolic accesses implicitly use the global pointer unless
+ -mexplicit-relocs is in effect. JAL macros to symbolic addresses
+ might go to traditional MIPS lazy-binding stubs. */
+ if (!TARGET_EXPLICIT_RELOCS)
+ return true;
+
+ /* FUNCTION_PROFILER includes a JAL to _mcount, which again
+ can be lazily-bound. */
+ if (crtl->profile)
+ return true;
+
+ /* MIPS16 functions that return in FPRs need to call an
+ external libgcc routine. This call is only made explict
+ during mips_expand_epilogue, and it too might be lazily bound. */
+ if (mips16_cfun_returns_in_fpr_p ())
+ return true;
+ }
+
+ return mips_find_gp_ref (&cfun->machine->has_inflexible_gp_insn_p,
+ mips_insn_has_inflexible_gp_ref_p);
+}
+
+/* Return true if INSN refers to the global pointer in a "flexible" way.
+ See mips_cfun_has_flexible_gp_ref_p for details. */
+
+static bool
+mips_insn_has_flexible_gp_ref_p (rtx insn)
+{
+ return (get_attr_got (insn) != GOT_UNSET
+ || mips_small_data_pattern_p (PATTERN (insn))
+ || reg_overlap_mentioned_p (pic_offset_table_rtx, PATTERN (insn)));
+}
+
+/* Return true if the current function references the global pointer,
+ but if those references do not inherently require the global pointer
+ to be $28. Assume !mips_cfun_has_inflexible_gp_ref_p (). */
+
+static bool
+mips_cfun_has_flexible_gp_ref_p (void)
+{
+ /* Reload can sometimes introduce constant pool references
+ into a function that otherwise didn't need them. For example,
+ suppose we have an instruction like:
+
+ (set (reg:DF R1) (float:DF (reg:SI R2)))
+
+ If R2 turns out to be a constant such as 1, the instruction may
+ have a REG_EQUAL note saying that R1 == 1.0. Reload then has
+ the option of using this constant if R2 doesn't get allocated
+ to a register.
+
+ In cases like these, reload will have added the constant to the
+ pool but no instruction will yet refer to it. */
+ if (TARGET_ABICALLS_PIC2 && !reload_completed && crtl->uses_const_pool)
+ return true;
+
+ return mips_find_gp_ref (&cfun->machine->has_flexible_gp_insn_p,
+ mips_insn_has_flexible_gp_ref_p);
+}
+
+/* Return the register that should be used as the global pointer
+ within this function. Return INVALID_REGNUM if the function
+ doesn't need a global pointer. */
+
+static unsigned int
+mips_global_pointer (void)
+{
+ unsigned int regno;
+
+ /* $gp is always available unless we're using a GOT. */
+ if (!TARGET_USE_GOT)
+ return GLOBAL_POINTER_REGNUM;
+
+ /* If there are inflexible references to $gp, we must use the
+ standard register. */
+ if (mips_cfun_has_inflexible_gp_ref_p ())
+ return GLOBAL_POINTER_REGNUM;
+
+ /* If there are no current references to $gp, then the only uses
+ we can introduce later are those involved in long branches. */
+ if (TARGET_ABSOLUTE_JUMPS && !mips_cfun_has_flexible_gp_ref_p ())
+ return INVALID_REGNUM;
+
+ /* If the global pointer is call-saved, try to use a call-clobbered
+ alternative. */
+ if (TARGET_CALL_SAVED_GP && crtl->is_leaf)
+ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
+ if (!df_regs_ever_live_p (regno)
+ && call_really_used_regs[regno]
+ && !fixed_regs[regno]
+ && regno != PIC_FUNCTION_ADDR_REGNUM)
+ return regno;
+
+ return GLOBAL_POINTER_REGNUM;
+}
+
+/* Return true if the current function's prologue must load the global
+ pointer value into pic_offset_table_rtx and store the same value in
+ the function's cprestore slot (if any).
+
+ One problem we have to deal with is that, when emitting GOT-based
+ position independent code, long-branch sequences will need to load
+ the address of the branch target from the GOT. We don't know until
+ the very end of compilation whether (and where) the function needs
+ long branches, so we must ensure that _any_ branch can access the
+ global pointer in some form. However, we do not want to pessimize
+ the usual case in which all branches are short.
+
+ We handle this as follows:
+
+ (1) During reload, we set cfun->machine->global_pointer to
+ INVALID_REGNUM if we _know_ that the current function
+ doesn't need a global pointer. This is only valid if
+ long branches don't need the GOT.
+
+ Otherwise, we assume that we might need a global pointer
+ and pick an appropriate register.
+
+ (2) If cfun->machine->global_pointer != INVALID_REGNUM,
+ we ensure that the global pointer is available at every
+ block boundary bar entry and exit. We do this in one of two ways:
+
+ - If the function has a cprestore slot, we ensure that this
+ slot is valid at every branch. However, as explained in
+ point (6) below, there is no guarantee that pic_offset_table_rtx
+ itself is valid if new uses of the global pointer are introduced
+ after the first post-epilogue split.
+
+ We guarantee that the cprestore slot is valid by loading it
+ into a fake register, CPRESTORE_SLOT_REGNUM. We then make
+ this register live at every block boundary bar function entry
+ and exit. It is then invalid to move the load (and thus the
+ preceding store) across a block boundary.
+
+ - If the function has no cprestore slot, we guarantee that
+ pic_offset_table_rtx itself is valid at every branch.
+
+ See mips_eh_uses for the handling of the register liveness.
+
+ (3) During prologue and epilogue generation, we emit "ghost"
+ placeholder instructions to manipulate the global pointer.
+
+ (4) During prologue generation, we set cfun->machine->must_initialize_gp_p
+ and cfun->machine->must_restore_gp_when_clobbered_p if we already know
+ that the function needs a global pointer. (There is no need to set
+ them earlier than this, and doing it as late as possible leads to
+ fewer false positives.)
+
+ (5) If cfun->machine->must_initialize_gp_p is true during a
+ split_insns pass, we split the ghost instructions into real
+ instructions. These split instructions can then be optimized in
+ the usual way. Otherwise, we keep the ghost instructions intact,
+ and optimize for the case where they aren't needed. We still
+ have the option of splitting them later, if we need to introduce
+ new uses of the global pointer.
+
+ For example, the scheduler ignores a ghost instruction that
+ stores $28 to the stack, but it handles the split form of
+ the ghost instruction as an ordinary store.
+
+ (6) [OldABI only.] If cfun->machine->must_restore_gp_when_clobbered_p
+ is true during the first post-epilogue split_insns pass, we split
+ calls and restore_gp patterns into instructions that explicitly
+ load pic_offset_table_rtx from the cprestore slot. Otherwise,
+ we split these patterns into instructions that _don't_ load from
+ the cprestore slot.
+
+ If cfun->machine->must_restore_gp_when_clobbered_p is true at the
+ time of the split, then any instructions that exist at that time
+ can make free use of pic_offset_table_rtx. However, if we want
+ to introduce new uses of the global pointer after the split,
+ we must explicitly load the value from the cprestore slot, since
+ pic_offset_table_rtx itself might not be valid at a given point
+ in the function.
+
+ The idea is that we want to be able to delete redundant
+ loads from the cprestore slot in the usual case where no
+ long branches are needed.
+
+ (7) If cfun->machine->must_initialize_gp_p is still false at the end
+ of md_reorg, we decide whether the global pointer is needed for
+ long branches. If so, we set cfun->machine->must_initialize_gp_p
+ to true and split the ghost instructions into real instructions
+ at that stage.
+
+ Note that the ghost instructions must have a zero length for three reasons:
+
+ - Giving the length of the underlying $gp sequence might cause
+ us to use long branches in cases where they aren't really needed.
+
+ - They would perturb things like alignment calculations.
+
+ - More importantly, the hazard detection in md_reorg relies on
+ empty instructions having a zero length.
+
+ If we find a long branch and split the ghost instructions at the
+ end of md_reorg, the split could introduce more long branches.
+ That isn't a problem though, because we still do the split before
+ the final shorten_branches pass.
+
+ This is extremely ugly, but it seems like the best compromise between
+ correctness and efficiency. */
+
+bool
+mips_must_initialize_gp_p (void)
+{
+ return cfun->machine->must_initialize_gp_p;
+}
+
+/* Return true if REGNO is a register that is ordinarily call-clobbered
+ but must nevertheless be preserved by an interrupt handler. */
+
+static bool
+mips_interrupt_extra_call_saved_reg_p (unsigned int regno)
+{
+ if (MD_REG_P (regno))
+ return true;
+
+ if (TARGET_DSP && DSP_ACC_REG_P (regno))
+ return true;
+
+ if (GP_REG_P (regno) && !cfun->machine->use_shadow_register_set_p)
+ {
+ /* $0 is hard-wired. */
+ if (regno == GP_REG_FIRST)
+ return false;
+
+ /* The interrupt handler can treat kernel registers as
+ scratch registers. */
+ if (KERNEL_REG_P (regno))
+ return false;
+
+ /* The function will return the stack pointer to its original value
+ anyway. */
+ if (regno == STACK_POINTER_REGNUM)
+ return false;
+
+ /* Otherwise, return true for registers that aren't ordinarily
+ call-clobbered. */
+ return call_really_used_regs[regno];
+ }
+
+ return false;
+}
+
+/* Return true if the current function should treat register REGNO
+ as call-saved. */
+
+static bool
+mips_cfun_call_saved_reg_p (unsigned int regno)
+{
+ /* If the user makes an ordinarily-call-saved register global,
+ that register is no longer call-saved. */
+ if (global_regs[regno])
+ return false;
+
+ /* Interrupt handlers need to save extra registers. */
+ if (cfun->machine->interrupt_handler_p
+ && mips_interrupt_extra_call_saved_reg_p (regno))
+ return true;
+
+ /* call_insns preserve $28 unless they explicitly say otherwise,
+ so call_really_used_regs[] treats $28 as call-saved. However,
+ we want the ABI property rather than the default call_insn
+ property here. */
+ return (regno == GLOBAL_POINTER_REGNUM
+ ? TARGET_CALL_SAVED_GP
+ : !call_really_used_regs[regno]);
+}
+
+/* Return true if the function body might clobber register REGNO.
+ We know that REGNO is call-saved. */
+
+static bool
+mips_cfun_might_clobber_call_saved_reg_p (unsigned int regno)
+{
+ /* Some functions should be treated as clobbering all call-saved
+ registers. */
+ if (crtl->saves_all_registers)
+ return true;
+
+ /* DF handles cases where a register is explicitly referenced in
+ the rtl. Incoming values are passed in call-clobbered registers,
+ so we can assume that any live call-saved register is set within
+ the function. */
+ if (df_regs_ever_live_p (regno))
+ return true;
+
+ /* Check for registers that are clobbered by FUNCTION_PROFILER.
+ These clobbers are not explicit in the rtl. */
+ if (crtl->profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
+ return true;
+
+ /* If we're using a call-saved global pointer, the function's
+ prologue will need to set it up. */
+ if (cfun->machine->global_pointer == regno)
+ return true;
+
+ /* The function's prologue will need to set the frame pointer if
+ frame_pointer_needed. */
+ if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
+ return true;
+
+ /* If a MIPS16 function returns a value in FPRs, its epilogue
+ will need to call an external libgcc routine. This yet-to-be
+ generated call_insn will clobber $31. */
+ if (regno == RETURN_ADDR_REGNUM && mips16_cfun_returns_in_fpr_p ())
+ return true;
+
+ /* If REGNO is ordinarily call-clobbered, we must assume that any
+ called function could modify it. */
+ if (cfun->machine->interrupt_handler_p
+ && !crtl->is_leaf
+ && mips_interrupt_extra_call_saved_reg_p (regno))
+ return true;
+
+ return false;
+}
+
+/* Return true if the current function must save register REGNO. */
+
+static bool
+mips_save_reg_p (unsigned int regno)
+{
+ if (mips_cfun_call_saved_reg_p (regno))
+ {
+ if (mips_cfun_might_clobber_call_saved_reg_p (regno))
+ return true;
+
+ /* Save both registers in an FPR pair if either one is used. This is
+ needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
+ register to be used without the even register. */
+ if (FP_REG_P (regno)
+ && MAX_FPRS_PER_FMT == 2
+ && mips_cfun_might_clobber_call_saved_reg_p (regno + 1))
+ return true;
+ }
+
+ /* We need to save the incoming return address if __builtin_eh_return
+ is being used to set a different return address. */
+ if (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return)
+ return true;
+
+ return false;
+}
+
+/* Populate the current function's mips_frame_info structure.
+
+ MIPS stack frames look like:
+
+ +-------------------------------+
+ | |
+ | incoming stack arguments |
+ | |
+ +-------------------------------+
+ | |
+ | caller-allocated save area |
+ A | for register arguments |
+ | |
+ +-------------------------------+ <-- incoming stack pointer
+ | |
+ | callee-allocated save area |
+ B | for arguments that are |
+ | split between registers and |
+ | the stack |
+ | |
+ +-------------------------------+ <-- arg_pointer_rtx
+ | |
+ C | callee-allocated save area |
+ | for register varargs |
+ | |
+ +-------------------------------+ <-- frame_pointer_rtx
+ | | + cop0_sp_offset
+ | COP0 reg save area | + UNITS_PER_WORD
+ | |
+ +-------------------------------+ <-- frame_pointer_rtx + acc_sp_offset
+ | | + UNITS_PER_WORD
+ | accumulator save area |
+ | |
+ +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
+ | | + UNITS_PER_HWFPVALUE
+ | FPR save area |
+ | |
+ +-------------------------------+ <-- stack_pointer_rtx + gp_sp_offset
+ | | + UNITS_PER_WORD
+ | GPR save area |
+ | |
+ +-------------------------------+ <-- frame_pointer_rtx with
+ | | \ -fstack-protector
+ | local variables | | var_size
+ | | /
+ +-------------------------------+
+ | | \
+ | $gp save area | | cprestore_size
+ | | /
+ P +-------------------------------+ <-- hard_frame_pointer_rtx for
+ | | \ MIPS16 code
+ | outgoing stack arguments | |
+ | | |
+ +-------------------------------+ | args_size
+ | | |
+ | caller-allocated save area | |
+ | for register arguments | |
+ | | /
+ +-------------------------------+ <-- stack_pointer_rtx
+ frame_pointer_rtx without
+ -fstack-protector
+ hard_frame_pointer_rtx for
+ non-MIPS16 code.
+
+ At least two of A, B and C will be empty.
+
+ Dynamic stack allocations such as alloca insert data at point P.
+ They decrease stack_pointer_rtx but leave frame_pointer_rtx and
+ hard_frame_pointer_rtx unchanged. */
+
+static void
+mips_compute_frame_info (void)
+{
+ struct mips_frame_info *frame;
+ HOST_WIDE_INT offset, size;
+ unsigned int regno, i;
+
+ /* Set this function's interrupt properties. */
+ if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
+ {
+ if (!ISA_MIPS32R2)
+ error ("the %<interrupt%> attribute requires a MIPS32r2 processor");
+ else if (TARGET_HARD_FLOAT)
+ error ("the %<interrupt%> attribute requires %<-msoft-float%>");
+ else if (TARGET_MIPS16)
+ error ("interrupt handlers cannot be MIPS16 functions");
+ else
+ {
+ cfun->machine->interrupt_handler_p = true;
+ cfun->machine->use_shadow_register_set_p =
+ mips_use_shadow_register_set_p (TREE_TYPE (current_function_decl));
+ cfun->machine->keep_interrupts_masked_p =
+ mips_keep_interrupts_masked_p (TREE_TYPE (current_function_decl));
+ cfun->machine->use_debug_exception_return_p =
+ mips_use_debug_exception_return_p (TREE_TYPE
+ (current_function_decl));
+ }
+ }
+
+ frame = &cfun->machine->frame;
+ memset (frame, 0, sizeof (*frame));
+ size = get_frame_size ();
+
+ cfun->machine->global_pointer = mips_global_pointer ();
+
+ /* The first two blocks contain the outgoing argument area and the $gp save
+ slot. This area isn't needed in leaf functions, but if the
+ target-independent frame size is nonzero, we have already committed to
+ allocating these in STARTING_FRAME_OFFSET for !FRAME_GROWS_DOWNWARD. */
+ if ((size == 0 || FRAME_GROWS_DOWNWARD) && crtl->is_leaf)
+ {
+ /* The MIPS 3.0 linker does not like functions that dynamically
+ allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
+ looks like we are trying to create a second frame pointer to the
+ function, so allocate some stack space to make it happy. */
+ if (cfun->calls_alloca)
+ frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
+ else
+ frame->args_size = 0;
+ frame->cprestore_size = 0;
+ }
+ else
+ {
+ frame->args_size = crtl->outgoing_args_size;
+ frame->cprestore_size = MIPS_GP_SAVE_AREA_SIZE;
+ }
+ offset = frame->args_size + frame->cprestore_size;
+
+ /* Move above the local variables. */
+ frame->var_size = MIPS_STACK_ALIGN (size);
+ offset += frame->var_size;
+
+ /* Find out which GPRs we need to save. */
+ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
+ if (mips_save_reg_p (regno))
+ {
+ frame->num_gp++;
+ frame->mask |= 1 << (regno - GP_REG_FIRST);
+ }
+
+ /* If this function calls eh_return, we must also save and restore the
+ EH data registers. */
+ if (crtl->calls_eh_return)
+ for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
+ {
+ frame->num_gp++;
+ frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
+ }
+
+ /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
+ $a3-$a0 and $s2-$s8. If we save one register in the range, we must
+ save all later registers too. */
+ if (GENERATE_MIPS16E_SAVE_RESTORE)
+ {
+ mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
+ ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
+ mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
+ ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
+ }
+
+ /* Move above the GPR save area. */
+ if (frame->num_gp > 0)
+ {
+ offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
+ frame->gp_sp_offset = offset - UNITS_PER_WORD;
+ }
+
+ /* Find out which FPRs we need to save. This loop must iterate over
+ the same space as its companion in mips_for_each_saved_gpr_and_fpr. */
+ if (TARGET_HARD_FLOAT)
+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
+ if (mips_save_reg_p (regno))
+ {
+ frame->num_fp += MAX_FPRS_PER_FMT;
+ frame->fmask |= ~(~0 << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
+ }
+
+ /* Move above the FPR save area. */
+ if (frame->num_fp > 0)
+ {
+ offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
+ frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
+ }
+
+ /* Add in space for the interrupt context information. */
+ if (cfun->machine->interrupt_handler_p)
+ {
+ /* Check HI/LO. */
+ if (mips_save_reg_p (LO_REGNUM) || mips_save_reg_p (HI_REGNUM))
+ {
+ frame->num_acc++;
+ frame->acc_mask |= (1 << 0);
+ }
+
+ /* Check accumulators 1, 2, 3. */
+ for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
+ if (mips_save_reg_p (i) || mips_save_reg_p (i + 1))
+ {
+ frame->num_acc++;
+ frame->acc_mask |= 1 << (((i - DSP_ACC_REG_FIRST) / 2) + 1);
+ }
+
+ /* All interrupt context functions need space to preserve STATUS. */
+ frame->num_cop0_regs++;
+
+ /* If we don't keep interrupts masked, we need to save EPC. */
+ if (!cfun->machine->keep_interrupts_masked_p)
+ frame->num_cop0_regs++;
+ }
+
+ /* Move above the accumulator save area. */
+ if (frame->num_acc > 0)
+ {
+ /* Each accumulator needs 2 words. */
+ offset += frame->num_acc * 2 * UNITS_PER_WORD;
+ frame->acc_sp_offset = offset - UNITS_PER_WORD;
+ }
+
+ /* Move above the COP0 register save area. */
+ if (frame->num_cop0_regs > 0)
+ {
+ offset += frame->num_cop0_regs * UNITS_PER_WORD;
+ frame->cop0_sp_offset = offset - UNITS_PER_WORD;
+ }
+
+ /* Move above the callee-allocated varargs save area. */
+ offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
+ frame->arg_pointer_offset = offset;
+
+ /* Move above the callee-allocated area for pretend stack arguments. */
+ offset += crtl->args.pretend_args_size;
+ frame->total_size = offset;
+
+ /* Work out the offsets of the save areas from the top of the frame. */
+ if (frame->gp_sp_offset > 0)
+ frame->gp_save_offset = frame->gp_sp_offset - offset;
+ if (frame->fp_sp_offset > 0)
+ frame->fp_save_offset = frame->fp_sp_offset - offset;
+ if (frame->acc_sp_offset > 0)
+ frame->acc_save_offset = frame->acc_sp_offset - offset;
+ if (frame->num_cop0_regs > 0)
+ frame->cop0_save_offset = frame->cop0_sp_offset - offset;
+
+ /* MIPS16 code offsets the frame pointer by the size of the outgoing
+ arguments. This tends to increase the chances of using unextended
+ instructions for local variables and incoming arguments. */
+ if (TARGET_MIPS16)
+ frame->hard_frame_pointer_offset = frame->args_size;
+}
+
+/* Return the style of GP load sequence that is being used for the
+ current function. */
+
+enum mips_loadgp_style
+mips_current_loadgp_style (void)
+{
+ if (!TARGET_USE_GOT || cfun->machine->global_pointer == INVALID_REGNUM)
+ return LOADGP_NONE;
+
+ if (TARGET_RTP_PIC)
+ return LOADGP_RTP;
+
+ if (TARGET_ABSOLUTE_ABICALLS)
+ return LOADGP_ABSOLUTE;
+
+ return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
+}
+
+/* Implement TARGET_FRAME_POINTER_REQUIRED. */
+
+static bool
+mips_frame_pointer_required (void)
+{
+ /* If the function contains dynamic stack allocations, we need to
+ use the frame pointer to access the static parts of the frame. */
+ if (cfun->calls_alloca)
+ return true;
+
+ /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
+ reload may be unable to compute the address of a local variable,
+ since there is no way to add a large constant to the stack pointer
+ without using a second temporary register. */
+ if (TARGET_MIPS16)
+ {
+ mips_compute_frame_info ();
+ if (!SMALL_OPERAND (cfun->machine->frame.total_size))
+ return true;
+ }
+
+ return false;
+}
+
+/* Make sure that we're not trying to eliminate to the wrong hard frame
+ pointer. */
+
+static bool
+mips_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
+{
+ return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
+}
+
+/* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
+ or argument pointer. TO is either the stack pointer or hard frame
+ pointer. */
+
+HOST_WIDE_INT
+mips_initial_elimination_offset (int from, int to)
+{
+ HOST_WIDE_INT offset;
+
+ mips_compute_frame_info ();
+
+ /* Set OFFSET to the offset from the end-of-prologue stack pointer. */
+ switch (from)
+ {
+ case FRAME_POINTER_REGNUM:
+ if (FRAME_GROWS_DOWNWARD)
+ offset = (cfun->machine->frame.args_size
+ + cfun->machine->frame.cprestore_size
+ + cfun->machine->frame.var_size);
+ else
+ offset = 0;
+ break;
+
+ case ARG_POINTER_REGNUM:
+ offset = cfun->machine->frame.arg_pointer_offset;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (to == HARD_FRAME_POINTER_REGNUM)
+ offset -= cfun->machine->frame.hard_frame_pointer_offset;
+
+ return offset;
+}
+
+/* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
+
+static void
+mips_extra_live_on_entry (bitmap regs)
+{
+ if (TARGET_USE_GOT)
+ {
+ /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
+ the global pointer. */
+ if (!TARGET_ABSOLUTE_ABICALLS)
+ bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
+
+ /* The prologue may set MIPS16_PIC_TEMP_REGNUM to the value of
+ the global pointer. */
+ if (TARGET_MIPS16)
+ bitmap_set_bit (regs, MIPS16_PIC_TEMP_REGNUM);
+
+ /* See the comment above load_call<mode> for details. */
+ bitmap_set_bit (regs, GOT_VERSION_REGNUM);
+ }
+}
+
+/* Implement RETURN_ADDR_RTX. We do not support moving back to a
+ previous frame. */
+
+rtx
+mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
+{
+ if (count != 0)
+ return const0_rtx;
+
+ return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
+}
+
+/* Emit code to change the current function's return address to
+ ADDRESS. SCRATCH is available as a scratch register, if needed.
+ ADDRESS and SCRATCH are both word-mode GPRs. */
+
+void
+mips_set_return_address (rtx address, rtx scratch)
+{
+ rtx slot_address;
+
+ gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
+ slot_address = mips_add_offset (scratch, stack_pointer_rtx,
+ cfun->machine->frame.gp_sp_offset);
+ mips_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
+}
+
+/* Return true if the current function has a cprestore slot. */
+
+bool
+mips_cfun_has_cprestore_slot_p (void)
+{
+ return (cfun->machine->global_pointer != INVALID_REGNUM
+ && cfun->machine->frame.cprestore_size > 0);
+}
+
+/* Fill *BASE and *OFFSET such that *BASE + *OFFSET refers to the
+ cprestore slot. LOAD_P is true if the caller wants to load from
+ the cprestore slot; it is false if the caller wants to store to
+ the slot. */
+
+static void
+mips_get_cprestore_base_and_offset (rtx *base, HOST_WIDE_INT *offset,
+ bool load_p)
+{
+ const struct mips_frame_info *frame;
+
+ frame = &cfun->machine->frame;
+ /* .cprestore always uses the stack pointer instead of the frame pointer.
+ We have a free choice for direct stores for non-MIPS16 functions,
+ and for MIPS16 functions whose cprestore slot is in range of the
+ stack pointer. Using the stack pointer would sometimes give more
+ (early) scheduling freedom, but using the frame pointer would
+ sometimes give more (late) scheduling freedom. It's hard to
+ predict which applies to a given function, so let's keep things
+ simple.
+
+ Loads must always use the frame pointer in functions that call
+ alloca, and there's little benefit to using the stack pointer
+ otherwise. */
+ if (frame_pointer_needed && !(TARGET_CPRESTORE_DIRECTIVE && !load_p))
+ {
+ *base = hard_frame_pointer_rtx;
+ *offset = frame->args_size - frame->hard_frame_pointer_offset;
+ }
+ else
+ {
+ *base = stack_pointer_rtx;
+ *offset = frame->args_size;
+ }
+}
+
+/* Return true if X is the load or store address of the cprestore slot;
+ LOAD_P says which. */
+
+bool
+mips_cprestore_address_p (rtx x, bool load_p)
+{
+ rtx given_base, required_base;
+ HOST_WIDE_INT given_offset, required_offset;
+
+ mips_split_plus (x, &given_base, &given_offset);
+ mips_get_cprestore_base_and_offset (&required_base, &required_offset, load_p);
+ return given_base == required_base && given_offset == required_offset;
+}
+
+/* Return a MEM rtx for the cprestore slot. LOAD_P is true if we are
+ going to load from it, false if we are going to store to it.
+ Use TEMP as a temporary register if need be. */
+
+static rtx
+mips_cprestore_slot (rtx temp, bool load_p)
+{
+ rtx base;
+ HOST_WIDE_INT offset;
+
+ mips_get_cprestore_base_and_offset (&base, &offset, load_p);
+ return gen_frame_mem (Pmode, mips_add_offset (temp, base, offset));
+}
+
+/* Emit instructions to save global pointer value GP into cprestore
+ slot MEM. OFFSET is the offset that MEM applies to the base register.
+
+ MEM may not be a legitimate address. If it isn't, TEMP is a
+ temporary register that can be used, otherwise it is a SCRATCH. */
+
+void
+mips_save_gp_to_cprestore_slot (rtx mem, rtx offset, rtx gp, rtx temp)
+{
+ if (TARGET_CPRESTORE_DIRECTIVE)
+ {
+ gcc_assert (gp == pic_offset_table_rtx);
+ emit_insn (PMODE_INSN (gen_cprestore, (mem, offset)));
+ }
+ else
+ mips_emit_move (mips_cprestore_slot (temp, false), gp);
+}
+
+/* Restore $gp from its save slot, using TEMP as a temporary base register
+ if need be. This function is for o32 and o64 abicalls only.
+
+ See mips_must_initialize_gp_p for details about how we manage the
+ global pointer. */
+
+void
+mips_restore_gp_from_cprestore_slot (rtx temp)
+{
+ gcc_assert (TARGET_ABICALLS && TARGET_OLDABI && epilogue_completed);
+
+ if (!cfun->machine->must_restore_gp_when_clobbered_p)
+ {
+ emit_note (NOTE_INSN_DELETED);
+ return;
+ }
+
+ if (TARGET_MIPS16)
+ {
+ mips_emit_move (temp, mips_cprestore_slot (temp, true));
+ mips_emit_move (pic_offset_table_rtx, temp);
+ }
+ else
+ mips_emit_move (pic_offset_table_rtx, mips_cprestore_slot (temp, true));
+ if (!TARGET_EXPLICIT_RELOCS)
+ emit_insn (gen_blockage ());
+}
+
+/* A function to save or store a register. The first argument is the
+ register and the second is the stack slot. */
+typedef void (*mips_save_restore_fn) (rtx, rtx);
+
+/* Use FN to save or restore register REGNO. MODE is the register's
+ mode and OFFSET is the offset of its save slot from the current
+ stack pointer. */
+
+static void
+mips_save_restore_reg (enum machine_mode mode, int regno,
+ HOST_WIDE_INT offset, mips_save_restore_fn fn)
+{
+ rtx mem;
+
+ mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx,
+ offset));
+ fn (gen_rtx_REG (mode, regno), mem);
+}
+
+/* Call FN for each accumlator that is saved by the current function.
+ SP_OFFSET is the offset of the current stack pointer from the start
+ of the frame. */
+
+static void
+mips_for_each_saved_acc (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
+{
+ HOST_WIDE_INT offset;
+ int regno;
+
+ offset = cfun->machine->frame.acc_sp_offset - sp_offset;
+ if (BITSET_P (cfun->machine->frame.acc_mask, 0))
+ {
+ mips_save_restore_reg (word_mode, LO_REGNUM, offset, fn);
+ offset -= UNITS_PER_WORD;
+ mips_save_restore_reg (word_mode, HI_REGNUM, offset, fn);
+ offset -= UNITS_PER_WORD;
+ }
+
+ for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
+ if (BITSET_P (cfun->machine->frame.acc_mask,
+ ((regno - DSP_ACC_REG_FIRST) / 2) + 1))
+ {
+ mips_save_restore_reg (word_mode, regno, offset, fn);
+ offset -= UNITS_PER_WORD;
+ }
+}
+
+/* Save register REG to MEM. Make the instruction frame-related. */
+
+static void
+mips_save_reg (rtx reg, rtx mem)
+{
+ if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
+ {
+ rtx x1, x2;
+
+ mips_emit_move_or_split (mem, reg, SPLIT_IF_NECESSARY);
+
+ x1 = mips_frame_set (mips_subword (mem, false),
+ mips_subword (reg, false));
+ x2 = mips_frame_set (mips_subword (mem, true),
+ mips_subword (reg, true));
+ mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
+ }
+ else
+ mips_emit_save_slot_move (mem, reg, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
+}
+
+/* Capture the register combinations that are allowed in a SWM or LWM
+ instruction. The entries are ordered by number of registers set in
+ the mask. We also ignore the single register encodings because a
+ normal SW/LW is preferred. */
+
+static const unsigned int umips_swm_mask[17] = {
+ 0xc0ff0000, 0x80ff0000, 0x40ff0000, 0x807f0000,
+ 0x00ff0000, 0x803f0000, 0x007f0000, 0x801f0000,
+ 0x003f0000, 0x800f0000, 0x001f0000, 0x80070000,
+ 0x000f0000, 0x80030000, 0x00070000, 0x80010000,
+ 0x00030000
+};
+
+static const unsigned int umips_swm_encoding[17] = {
+ 25, 24, 9, 23, 8, 22, 7, 21, 6, 20, 5, 19, 4, 18, 3, 17, 2
+};
+
+/* Try to use a microMIPS LWM or SWM instruction to save or restore
+ as many GPRs in *MASK as possible. *OFFSET is the offset from the
+ stack pointer of the topmost save slot.
+
+ Remove from *MASK all registers that were handled using LWM and SWM.
+ Update *OFFSET so that it points to the first unused save slot. */
+
+static bool
+umips_build_save_restore (mips_save_restore_fn fn,
+ unsigned *mask, HOST_WIDE_INT *offset)
+{
+ int nregs;
+ unsigned int i, j;
+ rtx pattern, set, reg, mem;
+ HOST_WIDE_INT this_offset;
+ rtx this_base;
+
+ /* Try matching $16 to $31 (s0 to ra). */
+ for (i = 0; i < ARRAY_SIZE (umips_swm_mask); i++)
+ if ((*mask & 0xffff0000) == umips_swm_mask[i])
+ break;
+
+ if (i == ARRAY_SIZE (umips_swm_mask))
+ return false;
+
+ /* Get the offset of the lowest save slot. */
+ nregs = (umips_swm_encoding[i] & 0xf) + (umips_swm_encoding[i] >> 4);
+ this_offset = *offset - UNITS_PER_WORD * (nregs - 1);
+
+ /* LWM/SWM can only support offsets from -2048 to 2047. */
+ if (!UMIPS_12BIT_OFFSET_P (this_offset))
+ return false;
+
+ /* Create the final PARALLEL. */
+ pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nregs));
+ this_base = stack_pointer_rtx;
+
+ /* For registers $16-$23 and $30. */
+ for (j = 0; j < (umips_swm_encoding[i] & 0xf); j++)
+ {
+ HOST_WIDE_INT offset = this_offset + j * UNITS_PER_WORD;
+ mem = gen_frame_mem (SImode, plus_constant (Pmode, this_base, offset));
+ unsigned int regno = (j != 8) ? 16 + j : 30;
+ *mask &= ~(1 << regno);
+ reg = gen_rtx_REG (SImode, regno);
+ if (fn == mips_save_reg)
+ set = mips_frame_set (mem, reg);
+ else
+ {
+ set = gen_rtx_SET (VOIDmode, reg, mem);
+ mips_add_cfa_restore (reg);
+ }
+ XVECEXP (pattern, 0, j) = set;
+ }
+
+ /* For register $31. */
+ if (umips_swm_encoding[i] >> 4)
+ {
+ HOST_WIDE_INT offset = this_offset + j * UNITS_PER_WORD;
+ *mask &= ~(1 << 31);
+ mem = gen_frame_mem (SImode, plus_constant (Pmode, this_base, offset));
+ reg = gen_rtx_REG (SImode, 31);
+ if (fn == mips_save_reg)
+ set = mips_frame_set (mem, reg);
+ else
+ {
+ set = gen_rtx_SET (VOIDmode, reg, mem);
+ mips_add_cfa_restore (reg);
+ }
+ XVECEXP (pattern, 0, j) = set;
+ }
+
+ pattern = emit_insn (pattern);
+ if (fn == mips_save_reg)
+ RTX_FRAME_RELATED_P (pattern) = 1;
+
+ /* Adjust the last offset. */
+ *offset -= UNITS_PER_WORD * nregs;
+
+ return true;
+}
+
+/* Call FN for each register that is saved by the current function.
+ SP_OFFSET is the offset of the current stack pointer from the start
+ of the frame. */
+
+static void
+mips_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset,
+ mips_save_restore_fn fn)
+{
+ enum machine_mode fpr_mode;
+ int regno;
+ const struct mips_frame_info *frame = &cfun->machine->frame;
+ HOST_WIDE_INT offset;
+ unsigned int mask;
+
+ /* Save registers starting from high to low. The debuggers prefer at least
+ the return register be stored at func+4, and also it allows us not to
+ need a nop in the epilogue if at least one register is reloaded in
+ addition to return address. */
+ offset = frame->gp_sp_offset - sp_offset;
+ mask = frame->mask;
+
+ if (TARGET_MICROMIPS)
+ umips_build_save_restore (fn, &mask, &offset);
+
+ for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
+ if (BITSET_P (mask, regno - GP_REG_FIRST))
+ {
+ /* Record the ra offset for use by mips_function_profiler. */
+ if (regno == RETURN_ADDR_REGNUM)
+ cfun->machine->frame.ra_fp_offset = offset + sp_offset;
+ mips_save_restore_reg (word_mode, regno, offset, fn);
+ offset -= UNITS_PER_WORD;
+ }
+
+ /* This loop must iterate over the same space as its companion in
+ mips_compute_frame_info. */
+ offset = cfun->machine->frame.fp_sp_offset - sp_offset;
+ fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
+ for (regno = FP_REG_LAST - MAX_FPRS_PER_FMT + 1;
+ regno >= FP_REG_FIRST;
+ regno -= MAX_FPRS_PER_FMT)
+ if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
+ {
+ mips_save_restore_reg (fpr_mode, regno, offset, fn);
+ offset -= GET_MODE_SIZE (fpr_mode);
+ }
+}
+
+/* Return true if a move between register REGNO and its save slot (MEM)
+ can be done in a single move. LOAD_P is true if we are loading
+ from the slot, false if we are storing to it. */
+
+static bool
+mips_direct_save_slot_move_p (unsigned int regno, rtx mem, bool load_p)
+{
+ /* There is a specific MIPS16 instruction for saving $31 to the stack. */
+ if (TARGET_MIPS16 && !load_p && regno == RETURN_ADDR_REGNUM)
+ return false;
+
+ return mips_secondary_reload_class (REGNO_REG_CLASS (regno),
+ GET_MODE (mem), mem, load_p) == NO_REGS;
+}
+
+/* Emit a move from SRC to DEST, given that one of them is a register
+ save slot and that the other is a register. TEMP is a temporary
+ GPR of the same mode that is available if need be. */
+
+void
+mips_emit_save_slot_move (rtx dest, rtx src, rtx temp)
+{
+ unsigned int regno;
+ rtx mem;
+
+ if (REG_P (src))
+ {
+ regno = REGNO (src);
+ mem = dest;
+ }
+ else
+ {
+ regno = REGNO (dest);
+ mem = src;
+ }
+
+ if (regno == cfun->machine->global_pointer && !mips_must_initialize_gp_p ())
+ {
+ /* We don't yet know whether we'll need this instruction or not.
+ Postpone the decision by emitting a ghost move. This move
+ is specifically not frame-related; only the split version is. */
+ if (TARGET_64BIT)
+ emit_insn (gen_move_gpdi (dest, src));
+ else
+ emit_insn (gen_move_gpsi (dest, src));
+ return;
+ }
+
+ if (regno == HI_REGNUM)
+ {
+ if (REG_P (dest))
+ {
+ mips_emit_move (temp, src);
+ if (TARGET_64BIT)
+ emit_insn (gen_mthisi_di (gen_rtx_REG (TImode, MD_REG_FIRST),
+ temp, gen_rtx_REG (DImode, LO_REGNUM)));
+ else
+ emit_insn (gen_mthisi_di (gen_rtx_REG (DImode, MD_REG_FIRST),
+ temp, gen_rtx_REG (SImode, LO_REGNUM)));
+ }
+ else
+ {
+ if (TARGET_64BIT)
+ emit_insn (gen_mfhidi_ti (temp,
+ gen_rtx_REG (TImode, MD_REG_FIRST)));
+ else
+ emit_insn (gen_mfhisi_di (temp,
+ gen_rtx_REG (DImode, MD_REG_FIRST)));
+ mips_emit_move (dest, temp);
+ }
+ }
+ else if (mips_direct_save_slot_move_p (regno, mem, mem == src))
+ mips_emit_move (dest, src);
+ else
+ {
+ gcc_assert (!reg_overlap_mentioned_p (dest, temp));
+ mips_emit_move (temp, src);
+ mips_emit_move (dest, temp);
+ }
+ if (MEM_P (dest))
+ mips_set_frame_expr (mips_frame_set (dest, src));
+}
+
+/* If we're generating n32 or n64 abicalls, and the current function
+ does not use $28 as its global pointer, emit a cplocal directive.
+ Use pic_offset_table_rtx as the argument to the directive. */
+
+static void
+mips_output_cplocal (void)
+{
+ if (!TARGET_EXPLICIT_RELOCS
+ && mips_must_initialize_gp_p ()
+ && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
+ output_asm_insn (".cplocal %+", 0);
+}
+
+/* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */
+
+static void
+mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
+{
+ const char *fnname;
+
+ /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
+ floating-point arguments. */
+ if (TARGET_MIPS16
+ && TARGET_HARD_FLOAT_ABI
+ && crtl->args.info.fp_code != 0)
+ mips16_build_function_stub ();
+
+ /* Get the function name the same way that toplev.c does before calling
+ assemble_start_function. This is needed so that the name used here
+ exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
+ fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+ mips_start_function_definition (fnname, TARGET_MIPS16);
+
+ /* Output MIPS-specific frame information. */
+ if (!flag_inhibit_size_directive)
+ {
+ const struct mips_frame_info *frame;
+
+ frame = &cfun->machine->frame;
+
+ /* .frame FRAMEREG, FRAMESIZE, RETREG. */
+ fprintf (file,
+ "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
+ "# vars= " HOST_WIDE_INT_PRINT_DEC
+ ", regs= %d/%d"
+ ", args= " HOST_WIDE_INT_PRINT_DEC
+ ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
+ reg_names[frame_pointer_needed
+ ? HARD_FRAME_POINTER_REGNUM
+ : STACK_POINTER_REGNUM],
+ (frame_pointer_needed
+ ? frame->total_size - frame->hard_frame_pointer_offset
+ : frame->total_size),
+ reg_names[RETURN_ADDR_REGNUM],
+ frame->var_size,
+ frame->num_gp, frame->num_fp,
+ frame->args_size,
+ frame->cprestore_size);
+
+ /* .mask MASK, OFFSET. */
+ fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
+ frame->mask, frame->gp_save_offset);
+
+ /* .fmask MASK, OFFSET. */
+ fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
+ frame->fmask, frame->fp_save_offset);
+ }
+
+ /* Handle the initialization of $gp for SVR4 PIC, if applicable.
+ Also emit the ".set noreorder; .set nomacro" sequence for functions
+ that need it. */
+ if (mips_must_initialize_gp_p ()
+ && mips_current_loadgp_style () == LOADGP_OLDABI)
+ {
+ if (TARGET_MIPS16)
+ {
+ /* This is a fixed-form sequence. The position of the
+ first two instructions is important because of the
+ way _gp_disp is defined. */
+ output_asm_insn ("li\t$2,%%hi(_gp_disp)", 0);
+ output_asm_insn ("addiu\t$3,$pc,%%lo(_gp_disp)", 0);
+ output_asm_insn ("sll\t$2,16", 0);
+ output_asm_insn ("addu\t$2,$3", 0);
+ }
+ else
+ {
+ /* .cpload must be in a .set noreorder but not a
+ .set nomacro block. */
+ mips_push_asm_switch (&mips_noreorder);
+ output_asm_insn (".cpload\t%^", 0);
+ if (!cfun->machine->all_noreorder_p)
+ mips_pop_asm_switch (&mips_noreorder);
+ else
+ mips_push_asm_switch (&mips_nomacro);
+ }
+ }
+ else if (cfun->machine->all_noreorder_p)
+ {
+ mips_push_asm_switch (&mips_noreorder);
+ mips_push_asm_switch (&mips_nomacro);
+ }
+
+ /* Tell the assembler which register we're using as the global
+ pointer. This is needed for thunks, since they can use either
+ explicit relocs or assembler macros. */
+ mips_output_cplocal ();
+}
+
+/* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */
+
+static void
+mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT size ATTRIBUTE_UNUSED)
+{
+ const char *fnname;
+
+ /* Reinstate the normal $gp. */
+ SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
+ mips_output_cplocal ();
+
+ if (cfun->machine->all_noreorder_p)
+ {
+ mips_pop_asm_switch (&mips_nomacro);
+ mips_pop_asm_switch (&mips_noreorder);
+ }
+
+ /* Get the function name the same way that toplev.c does before calling
+ assemble_start_function. This is needed so that the name used here
+ exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
+ fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+ mips_end_function_definition (fnname);
+}
+
+/* Emit an optimisation barrier for accesses to the current frame. */
+
+static void
+mips_frame_barrier (void)
+{
+ emit_clobber (gen_frame_mem (BLKmode, stack_pointer_rtx));
+}
+
+
+/* The __gnu_local_gp symbol. */
+
+static GTY(()) rtx mips_gnu_local_gp;
+
+/* If we're generating n32 or n64 abicalls, emit instructions
+ to set up the global pointer. */
+
+static void
+mips_emit_loadgp (void)
+{
+ rtx addr, offset, incoming_address, base, index, pic_reg;
+
+ pic_reg = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
+ switch (mips_current_loadgp_style ())
+ {
+ case LOADGP_ABSOLUTE:
+ if (mips_gnu_local_gp == NULL)
+ {
+ mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
+ SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
+ }
+ emit_insn (PMODE_INSN (gen_loadgp_absolute,
+ (pic_reg, mips_gnu_local_gp)));
+ break;
+
+ case LOADGP_OLDABI:
+ /* Added by mips_output_function_prologue. */
+ break;
+
+ case LOADGP_NEWABI:
+ addr = XEXP (DECL_RTL (current_function_decl), 0);
+ offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
+ incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
+ emit_insn (PMODE_INSN (gen_loadgp_newabi,
+ (pic_reg, offset, incoming_address)));
+ break;
+
+ case LOADGP_RTP:
+ base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
+ index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
+ emit_insn (PMODE_INSN (gen_loadgp_rtp, (pic_reg, base, index)));
+ break;
+
+ default:
+ return;
+ }
+
+ if (TARGET_MIPS16)
+ emit_insn (PMODE_INSN (gen_copygp_mips16,
+ (pic_offset_table_rtx, pic_reg)));
+
+ /* Emit a blockage if there are implicit uses of the GP register.
+ This includes profiled functions, because FUNCTION_PROFILE uses
+ a jal macro. */
+ if (!TARGET_EXPLICIT_RELOCS || crtl->profile)
+ emit_insn (gen_loadgp_blockage ());
+}
+
+#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
+
+#if PROBE_INTERVAL > 32768
+#error Cannot use indexed addressing mode for stack probing
+#endif
+
+/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
+ inclusive. These are offsets from the current stack pointer. */
+
+static void
+mips_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
+{
+ if (TARGET_MIPS16)
+ sorry ("-fstack-check=specific not implemented for MIPS16");
+
+ /* See if we have a constant small number of probes to generate. If so,
+ that's the easy case. */
+ if (first + size <= 32768)
+ {
+ HOST_WIDE_INT i;
+
+ /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
+ it exceeds SIZE. If only one probe is needed, this will not
+ generate any code. Then probe at FIRST + SIZE. */
+ for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
+ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+ -(first + i)));
+
+ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+ -(first + size)));
+ }
+
+ /* Otherwise, do the same as above, but in a loop. Note that we must be
+ extra careful with variables wrapping around because we might be at
+ the very top (or the very bottom) of the address space and we have
+ to be able to handle this case properly; in particular, we use an
+ equality test for the loop condition. */
+ else
+ {
+ HOST_WIDE_INT rounded_size;
+ rtx r3 = MIPS_PROLOGUE_TEMP (Pmode);
+ rtx r12 = MIPS_PROLOGUE_TEMP2 (Pmode);
+
+ /* Sanity check for the addressing mode we're going to use. */
+ gcc_assert (first <= 32768);
+
+
+ /* Step 1: round SIZE to the previous multiple of the interval. */
+
+ rounded_size = size & -PROBE_INTERVAL;
+
+
+ /* Step 2: compute initial and final value of the loop counter. */
+
+ /* TEST_ADDR = SP + FIRST. */
+ emit_insn (gen_rtx_SET (VOIDmode, r3,
+ plus_constant (Pmode, stack_pointer_rtx,
+ -first)));
+
+ /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
+ if (rounded_size > 32768)
+ {
+ emit_move_insn (r12, GEN_INT (rounded_size));
+ emit_insn (gen_rtx_SET (VOIDmode, r12,
+ gen_rtx_MINUS (Pmode, r3, r12)));
+ }
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, r12,
+ plus_constant (Pmode, r3, -rounded_size)));
+
+
+ /* Step 3: the loop
+
+ while (TEST_ADDR != LAST_ADDR)
+ {
+ TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
+ probe at TEST_ADDR
+ }
+
+ probes at FIRST + N * PROBE_INTERVAL for values of N from 1
+ until it is equal to ROUNDED_SIZE. */
+
+ emit_insn (PMODE_INSN (gen_probe_stack_range, (r3, r3, r12)));
+
+
+ /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
+ that SIZE is equal to ROUNDED_SIZE. */
+
+ if (size != rounded_size)
+ emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
+ }
+
+ /* Make sure nothing is scheduled before we are done. */
+ emit_insn (gen_blockage ());
+}
+
+/* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
+ absolute addresses. */
+
+const char *
+mips_output_probe_stack_range (rtx reg1, rtx reg2)
+{
+ static int labelno = 0;
+ char loop_lab[32], end_lab[32], tmp[64];
+ rtx xops[2];
+
+ ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
+ ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
+
+ /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
+ xops[0] = reg1;
+ xops[1] = reg2;
+ strcpy (tmp, "%(%<beq\t%0,%1,");
+ output_asm_insn (strcat (tmp, &end_lab[1]), xops);
+
+ /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
+ xops[1] = GEN_INT (-PROBE_INTERVAL);
+ if (TARGET_64BIT && TARGET_LONG64)
+ output_asm_insn ("daddiu\t%0,%0,%1", xops);
+ else
+ output_asm_insn ("addiu\t%0,%0,%1", xops);
+
+ /* Probe at TEST_ADDR and branch. */
+ fprintf (asm_out_file, "\tb\t");
+ assemble_name_raw (asm_out_file, loop_lab);
+ fputc ('\n', asm_out_file);
+ if (TARGET_64BIT)
+ output_asm_insn ("sd\t$0,0(%0)%)", xops);
+ else
+ output_asm_insn ("sw\t$0,0(%0)%)", xops);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
+
+ return "";
+}
+
+/* A for_each_rtx callback. Stop the search if *X is a kernel register. */
+
+static int
+mips_kernel_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
+{
+ return REG_P (*x) && KERNEL_REG_P (REGNO (*x));
+}
+
+/* Expand the "prologue" pattern. */
+
+void
+mips_expand_prologue (void)
+{
+ const struct mips_frame_info *frame;
+ HOST_WIDE_INT size;
+ unsigned int nargs;
+ rtx insn;
+
+ if (cfun->machine->global_pointer != INVALID_REGNUM)
+ {
+ /* Check whether an insn uses pic_offset_table_rtx, either explicitly
+ or implicitly. If so, we can commit to using a global pointer
+ straight away, otherwise we need to defer the decision. */
+ if (mips_cfun_has_inflexible_gp_ref_p ()
+ || mips_cfun_has_flexible_gp_ref_p ())
+ {
+ cfun->machine->must_initialize_gp_p = true;
+ cfun->machine->must_restore_gp_when_clobbered_p = true;
+ }
+
+ SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
+ }
+
+ frame = &cfun->machine->frame;
+ size = frame->total_size;
+
+ if (flag_stack_usage_info)
+ current_function_static_stack_size = size;
+
+ if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
+ {
+ if (crtl->is_leaf && !cfun->calls_alloca)
+ {
+ if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
+ mips_emit_probe_stack_range (STACK_CHECK_PROTECT,
+ size - STACK_CHECK_PROTECT);
+ }
+ else if (size > 0)
+ mips_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+ }
+
+ /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
+ bytes beforehand; this is enough to cover the register save area
+ without going out of range. */
+ if (((frame->mask | frame->fmask | frame->acc_mask) != 0)
+ || frame->num_cop0_regs > 0)
+ {
+ HOST_WIDE_INT step1;
+
+ step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
+ if (GENERATE_MIPS16E_SAVE_RESTORE)
+ {
+ HOST_WIDE_INT offset;
+ unsigned int mask, regno;
+
+ /* Try to merge argument stores into the save instruction. */
+ nargs = mips16e_collect_argument_saves ();
+
+ /* Build the save instruction. */
+ mask = frame->mask;
+ insn = mips16e_build_save_restore (false, &mask, &offset,
+ nargs, step1);
+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
+ mips_frame_barrier ();
+ size -= step1;
+
+ /* Check if we need to save other registers. */
+ for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
+ if (BITSET_P (mask, regno - GP_REG_FIRST))
+ {
+ offset -= UNITS_PER_WORD;
+ mips_save_restore_reg (word_mode, regno,
+ offset, mips_save_reg);
+ }
+ }
+ else
+ {
+ if (cfun->machine->interrupt_handler_p)
+ {
+ HOST_WIDE_INT offset;
+ rtx mem;
+
+ /* If this interrupt is using a shadow register set, we need to
+ get the stack pointer from the previous register set. */
+ if (cfun->machine->use_shadow_register_set_p)
+ emit_insn (gen_mips_rdpgpr (stack_pointer_rtx,
+ stack_pointer_rtx));
+
+ if (!cfun->machine->keep_interrupts_masked_p)
+ {
+ /* Move from COP0 Cause to K0. */
+ emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K0_REG_NUM),
+ gen_rtx_REG (SImode,
+ COP0_CAUSE_REG_NUM)));
+ /* Move from COP0 EPC to K1. */
+ emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
+ gen_rtx_REG (SImode,
+ COP0_EPC_REG_NUM)));
+ }
+
+ /* Allocate the first part of the frame. */
+ insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-step1));
+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
+ mips_frame_barrier ();
+ size -= step1;
+
+ /* Start at the uppermost location for saving. */
+ offset = frame->cop0_sp_offset - size;
+ if (!cfun->machine->keep_interrupts_masked_p)
+ {
+ /* Push EPC into its stack slot. */
+ mem = gen_frame_mem (word_mode,
+ plus_constant (Pmode, stack_pointer_rtx,
+ offset));
+ mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
+ offset -= UNITS_PER_WORD;
+ }
+
+ /* Move from COP0 Status to K1. */
+ emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
+ gen_rtx_REG (SImode,
+ COP0_STATUS_REG_NUM)));
+
+ /* Right justify the RIPL in k0. */
+ if (!cfun->machine->keep_interrupts_masked_p)
+ emit_insn (gen_lshrsi3 (gen_rtx_REG (SImode, K0_REG_NUM),
+ gen_rtx_REG (SImode, K0_REG_NUM),
+ GEN_INT (CAUSE_IPL)));
+
+ /* Push Status into its stack slot. */
+ mem = gen_frame_mem (word_mode,
+ plus_constant (Pmode, stack_pointer_rtx,
+ offset));
+ mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
+ offset -= UNITS_PER_WORD;
+
+ /* Insert the RIPL into our copy of SR (k1) as the new IPL. */
+ if (!cfun->machine->keep_interrupts_masked_p)
+ emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
+ GEN_INT (6),
+ GEN_INT (SR_IPL),
+ gen_rtx_REG (SImode, K0_REG_NUM)));
+
+ if (!cfun->machine->keep_interrupts_masked_p)
+ /* Enable interrupts by clearing the KSU ERL and EXL bits.
+ IE is already the correct value, so we don't have to do
+ anything explicit. */
+ emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
+ GEN_INT (4),
+ GEN_INT (SR_EXL),
+ gen_rtx_REG (SImode, GP_REG_FIRST)));
+ else
+ /* Disable interrupts by clearing the KSU, ERL, EXL,
+ and IE bits. */
+ emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
+ GEN_INT (5),
+ GEN_INT (SR_IE),
+ gen_rtx_REG (SImode, GP_REG_FIRST)));
+ }
+ else
+ {
+ insn = gen_add3_insn (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-step1));
+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
+ mips_frame_barrier ();
+ size -= step1;
+ }
+ mips_for_each_saved_acc (size, mips_save_reg);
+ mips_for_each_saved_gpr_and_fpr (size, mips_save_reg);
+ }
+ }
+
+ /* Allocate the rest of the frame. */
+ if (size > 0)
+ {
+ if (SMALL_OPERAND (-size))
+ RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-size)))) = 1;
+ else
+ {
+ mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
+ if (TARGET_MIPS16)
+ {
+ /* There are no instructions to add or subtract registers
+ from the stack pointer, so use the frame pointer as a
+ temporary. We should always be using a frame pointer
+ in this case anyway. */
+ gcc_assert (frame_pointer_needed);
+ mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
+ emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
+ hard_frame_pointer_rtx,
+ MIPS_PROLOGUE_TEMP (Pmode)));
+ mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
+ }
+ else
+ emit_insn (gen_sub3_insn (stack_pointer_rtx,
+ stack_pointer_rtx,
+ MIPS_PROLOGUE_TEMP (Pmode)));
+
+ /* Describe the combined effect of the previous instructions. */
+ mips_set_frame_expr
+ (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx, -size)));
+ }
+ mips_frame_barrier ();
+ }
+
+ /* Set up the frame pointer, if we're using one. */
+ if (frame_pointer_needed)
+ {
+ HOST_WIDE_INT offset;
+
+ offset = frame->hard_frame_pointer_offset;
+ if (offset == 0)
+ {
+ insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ else if (SMALL_OPERAND (offset))
+ {
+ insn = gen_add3_insn (hard_frame_pointer_rtx,
+ stack_pointer_rtx, GEN_INT (offset));
+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
+ }
+ else
+ {
+ mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
+ mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
+ emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
+ hard_frame_pointer_rtx,
+ MIPS_PROLOGUE_TEMP (Pmode)));
+ mips_set_frame_expr
+ (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx, offset)));
+ }
+ }
+
+ mips_emit_loadgp ();
+
+ /* Initialize the $gp save slot. */
+ if (mips_cfun_has_cprestore_slot_p ())
+ {
+ rtx base, mem, gp, temp;
+ HOST_WIDE_INT offset;
+
+ mips_get_cprestore_base_and_offset (&base, &offset, false);
+ mem = gen_frame_mem (Pmode, plus_constant (Pmode, base, offset));
+ gp = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
+ temp = (SMALL_OPERAND (offset)
+ ? gen_rtx_SCRATCH (Pmode)
+ : MIPS_PROLOGUE_TEMP (Pmode));
+ emit_insn (PMODE_INSN (gen_potential_cprestore,
+ (mem, GEN_INT (offset), gp, temp)));
+
+ mips_get_cprestore_base_and_offset (&base, &offset, true);
+ mem = gen_frame_mem (Pmode, plus_constant (Pmode, base, offset));
+ emit_insn (PMODE_INSN (gen_use_cprestore, (mem)));
+ }
+
+ /* We need to search back to the last use of K0 or K1. */
+ if (cfun->machine->interrupt_handler_p)
+ {
+ for (insn = get_last_insn (); insn != NULL_RTX; insn = PREV_INSN (insn))
+ if (INSN_P (insn)
+ && for_each_rtx (&PATTERN (insn), mips_kernel_reg_p, NULL))
+ break;
+ /* Emit a move from K1 to COP0 Status after insn. */
+ gcc_assert (insn != NULL_RTX);
+ emit_insn_after (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
+ gen_rtx_REG (SImode, K1_REG_NUM)),
+ insn);
+ }
+
+ /* If we are profiling, make sure no instructions are scheduled before
+ the call to mcount. */
+ if (crtl->profile)
+ emit_insn (gen_blockage ());
+}
+
+/* Attach all pending register saves to the previous instruction.
+ Return that instruction. */
+
+static rtx
+mips_epilogue_emit_cfa_restores (void)
+{
+ rtx insn;
+
+ insn = get_last_insn ();
+ gcc_assert (insn && !REG_NOTES (insn));
+ if (mips_epilogue.cfa_restores)
+ {
+ RTX_FRAME_RELATED_P (insn) = 1;
+ REG_NOTES (insn) = mips_epilogue.cfa_restores;
+ mips_epilogue.cfa_restores = 0;
+ }
+ return insn;
+}
+
+/* Like mips_epilogue_emit_cfa_restores, but also record that the CFA is
+ now at REG + OFFSET. */
+
+static void
+mips_epilogue_set_cfa (rtx reg, HOST_WIDE_INT offset)
+{
+ rtx insn;
+
+ insn = mips_epilogue_emit_cfa_restores ();
+ if (reg != mips_epilogue.cfa_reg || offset != mips_epilogue.cfa_offset)
+ {
+ RTX_FRAME_RELATED_P (insn) = 1;
+ REG_NOTES (insn) = alloc_reg_note (REG_CFA_DEF_CFA,
+ plus_constant (Pmode, reg, offset),
+ REG_NOTES (insn));
+ mips_epilogue.cfa_reg = reg;
+ mips_epilogue.cfa_offset = offset;
+ }
+}
+
+/* Emit instructions to restore register REG from slot MEM. Also update
+ the cfa_restores list. */
+
+static void
+mips_restore_reg (rtx reg, rtx mem)
+{
+ /* There's no MIPS16 instruction to load $31 directly. Load into
+ $7 instead and adjust the return insn appropriately. */
+ if (TARGET_MIPS16 && REGNO (reg) == RETURN_ADDR_REGNUM)
+ reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
+ else if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
+ {
+ mips_add_cfa_restore (mips_subword (reg, true));
+ mips_add_cfa_restore (mips_subword (reg, false));
+ }
+ else
+ mips_add_cfa_restore (reg);
+
+ mips_emit_save_slot_move (reg, mem, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
+ if (REGNO (reg) == REGNO (mips_epilogue.cfa_reg))
+ /* The CFA is currently defined in terms of the register whose
+ value we have just restored. Redefine the CFA in terms of
+ the stack pointer. */
+ mips_epilogue_set_cfa (stack_pointer_rtx,
+ mips_epilogue.cfa_restore_sp_offset);
+}
+
+/* Emit code to set the stack pointer to BASE + OFFSET, given that
+ BASE + OFFSET is NEW_FRAME_SIZE bytes below the top of the frame.
+ BASE, if not the stack pointer, is available as a temporary. */
+
+static void
+mips_deallocate_stack (rtx base, rtx offset, HOST_WIDE_INT new_frame_size)
+{
+ if (base == stack_pointer_rtx && offset == const0_rtx)
+ return;
+
+ mips_frame_barrier ();
+ if (offset == const0_rtx)
+ {
+ emit_move_insn (stack_pointer_rtx, base);
+ mips_epilogue_set_cfa (stack_pointer_rtx, new_frame_size);
+ }
+ else if (TARGET_MIPS16 && base != stack_pointer_rtx)
+ {
+ emit_insn (gen_add3_insn (base, base, offset));
+ mips_epilogue_set_cfa (base, new_frame_size);
+ emit_move_insn (stack_pointer_rtx, base);
+ }
+ else
+ {
+ emit_insn (gen_add3_insn (stack_pointer_rtx, base, offset));
+ mips_epilogue_set_cfa (stack_pointer_rtx, new_frame_size);
+ }
+}
+
+/* Emit any instructions needed before a return. */
+
+void
+mips_expand_before_return (void)
+{
+ /* When using a call-clobbered gp, we start out with unified call
+ insns that include instructions to restore the gp. We then split
+ these unified calls after reload. These split calls explicitly
+ clobber gp, so there is no need to define
+ PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
+
+ For consistency, we should also insert an explicit clobber of $28
+ before return insns, so that the post-reload optimizers know that
+ the register is not live on exit. */
+ if (TARGET_CALL_CLOBBERED_GP)
+ emit_clobber (pic_offset_table_rtx);
+}
+
+/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
+ says which. */
+
+void
+mips_expand_epilogue (bool sibcall_p)
+{
+ const struct mips_frame_info *frame;
+ HOST_WIDE_INT step1, step2;
+ rtx base, adjust, insn;
+ bool use_jraddiusp_p = false;
+
+ if (!sibcall_p && mips_can_use_return_insn ())
+ {
+ emit_jump_insn (gen_return ());
+ return;
+ }
+
+ /* In MIPS16 mode, if the return value should go into a floating-point
+ register, we need to call a helper routine to copy it over. */
+ if (mips16_cfun_returns_in_fpr_p ())
+ mips16_copy_fpr_return_value ();
+
+ /* Split the frame into two. STEP1 is the amount of stack we should
+ deallocate before restoring the registers. STEP2 is the amount we
+ should deallocate afterwards.
+
+ Start off by assuming that no registers need to be restored. */
+ frame = &cfun->machine->frame;
+ step1 = frame->total_size;
+ step2 = 0;
+
+ /* Work out which register holds the frame address. */
+ if (!frame_pointer_needed)
+ base = stack_pointer_rtx;
+ else
+ {
+ base = hard_frame_pointer_rtx;
+ step1 -= frame->hard_frame_pointer_offset;
+ }
+ mips_epilogue.cfa_reg = base;
+ mips_epilogue.cfa_offset = step1;
+ mips_epilogue.cfa_restores = NULL_RTX;
+
+ /* If we need to restore registers, deallocate as much stack as
+ possible in the second step without going out of range. */
+ if ((frame->mask | frame->fmask | frame->acc_mask) != 0
+ || frame->num_cop0_regs > 0)
+ {
+ step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
+ step1 -= step2;
+ }
+
+ /* Get an rtx for STEP1 that we can add to BASE. */
+ adjust = GEN_INT (step1);
+ if (!SMALL_OPERAND (step1))
+ {
+ mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
+ adjust = MIPS_EPILOGUE_TEMP (Pmode);
+ }
+ mips_deallocate_stack (base, adjust, step2);
+
+ /* If we're using addressing macros, $gp is implicitly used by all
+ SYMBOL_REFs. We must emit a blockage insn before restoring $gp
+ from the stack. */
+ if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
+ emit_insn (gen_blockage ());
+
+ mips_epilogue.cfa_restore_sp_offset = step2;
+ if (GENERATE_MIPS16E_SAVE_RESTORE && frame->mask != 0)
+ {
+ unsigned int regno, mask;
+ HOST_WIDE_INT offset;
+ rtx restore;
+
+ /* Generate the restore instruction. */
+ mask = frame->mask;
+ restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
+
+ /* Restore any other registers manually. */
+ for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
+ if (BITSET_P (mask, regno - GP_REG_FIRST))
+ {
+ offset -= UNITS_PER_WORD;
+ mips_save_restore_reg (word_mode, regno, offset, mips_restore_reg);
+ }
+
+ /* Restore the remaining registers and deallocate the final bit
+ of the frame. */
+ mips_frame_barrier ();
+ emit_insn (restore);
+ mips_epilogue_set_cfa (stack_pointer_rtx, 0);
+ }
+ else
+ {
+ /* Restore the registers. */
+ mips_for_each_saved_acc (frame->total_size - step2, mips_restore_reg);
+ mips_for_each_saved_gpr_and_fpr (frame->total_size - step2,
+ mips_restore_reg);
+
+ if (cfun->machine->interrupt_handler_p)
+ {
+ HOST_WIDE_INT offset;
+ rtx mem;
+
+ offset = frame->cop0_sp_offset - (frame->total_size - step2);
+ if (!cfun->machine->keep_interrupts_masked_p)
+ {
+ /* Restore the original EPC. */
+ mem = gen_frame_mem (word_mode,
+ plus_constant (Pmode, stack_pointer_rtx,
+ offset));
+ mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
+ offset -= UNITS_PER_WORD;
+
+ /* Move to COP0 EPC. */
+ emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_EPC_REG_NUM),
+ gen_rtx_REG (SImode, K0_REG_NUM)));
+ }
+
+ /* Restore the original Status. */
+ mem = gen_frame_mem (word_mode,
+ plus_constant (Pmode, stack_pointer_rtx,
+ offset));
+ mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
+ offset -= UNITS_PER_WORD;
+
+ /* If we don't use shadow register set, we need to update SP. */
+ if (!cfun->machine->use_shadow_register_set_p)
+ mips_deallocate_stack (stack_pointer_rtx, GEN_INT (step2), 0);
+ else
+ /* The choice of position is somewhat arbitrary in this case. */
+ mips_epilogue_emit_cfa_restores ();
+
+ /* Move to COP0 Status. */
+ emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
+ gen_rtx_REG (SImode, K0_REG_NUM)));
+ }
+ else if (TARGET_MICROMIPS
+ && !crtl->calls_eh_return
+ && !sibcall_p
+ && step2 > 0
+ && mips_unsigned_immediate_p (step2, 5, 2))
+ use_jraddiusp_p = true;
+ else
+ /* Deallocate the final bit of the frame. */
+ mips_deallocate_stack (stack_pointer_rtx, GEN_INT (step2), 0);
+ }
+
+ if (!use_jraddiusp_p)
+ gcc_assert (!mips_epilogue.cfa_restores);
+
+ /* Add in the __builtin_eh_return stack adjustment. We need to
+ use a temporary in MIPS16 code. */
+ if (crtl->calls_eh_return)
+ {
+ if (TARGET_MIPS16)
+ {
+ mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
+ emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
+ MIPS_EPILOGUE_TEMP (Pmode),
+ EH_RETURN_STACKADJ_RTX));
+ mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
+ }
+ else
+ emit_insn (gen_add3_insn (stack_pointer_rtx,
+ stack_pointer_rtx,
+ EH_RETURN_STACKADJ_RTX));
+ }
+
+ if (!sibcall_p)
+ {
+ mips_expand_before_return ();
+ if (cfun->machine->interrupt_handler_p)
+ {
+ /* Interrupt handlers generate eret or deret. */
+ if (cfun->machine->use_debug_exception_return_p)
+ emit_jump_insn (gen_mips_deret ());
+ else
+ emit_jump_insn (gen_mips_eret ());
+ }
+ else
+ {
+ rtx pat;
+
+ /* When generating MIPS16 code, the normal
+ mips_for_each_saved_gpr_and_fpr path will restore the return
+ address into $7 rather than $31. */
+ if (TARGET_MIPS16
+ && !GENERATE_MIPS16E_SAVE_RESTORE
+ && BITSET_P (frame->mask, RETURN_ADDR_REGNUM))
+ {
+ /* simple_returns cannot rely on values that are only available
+ on paths through the epilogue (because return paths that do
+ not pass through the epilogue may nevertheless reuse a
+ simple_return that occurs at the end of the epilogue).
+ Use a normal return here instead. */
+ rtx reg = gen_rtx_REG (Pmode, GP_REG_FIRST + 7);
+ pat = gen_return_internal (reg);
+ }
+ else if (use_jraddiusp_p)
+ pat = gen_jraddiusp (GEN_INT (step2));
+ else
+ {
+ rtx reg = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
+ pat = gen_simple_return_internal (reg);
+ }
+ emit_jump_insn (pat);
+ if (use_jraddiusp_p)
+ mips_epilogue_set_cfa (stack_pointer_rtx, step2);
+ }
+ }
+
+ /* Search from the beginning to the first use of K0 or K1. */
+ if (cfun->machine->interrupt_handler_p
+ && !cfun->machine->keep_interrupts_masked_p)
+ {
+ for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
+ if (INSN_P (insn)
+ && for_each_rtx (&PATTERN(insn), mips_kernel_reg_p, NULL))
+ break;
+ gcc_assert (insn != NULL_RTX);
+ /* Insert disable interrupts before the first use of K0 or K1. */
+ emit_insn_before (gen_mips_di (), insn);
+ emit_insn_before (gen_mips_ehb (), insn);
+ }
+}
+
+/* Return nonzero if this function is known to have a null epilogue.
+ This allows the optimizer to omit jumps to jumps if no stack
+ was created. */
+
+bool
+mips_can_use_return_insn (void)
+{
+ /* Interrupt handlers need to go through the epilogue. */
+ if (cfun->machine->interrupt_handler_p)
+ return false;
+
+ if (!reload_completed)
+ return false;
+
+ if (crtl->profile)
+ return false;
+
+ /* In MIPS16 mode, a function that returns a floating-point value
+ needs to arrange to copy the return value into the floating-point
+ registers. */
+ if (mips16_cfun_returns_in_fpr_p ())
+ return false;
+
+ return cfun->machine->frame.total_size == 0;
+}
+
+/* Return true if register REGNO can store a value of mode MODE.
+ The result of this function is cached in mips_hard_regno_mode_ok. */
+
+static bool
+mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
+{
+ unsigned int size;
+ enum mode_class mclass;
+
+ if (mode == CCV2mode)
+ return (ISA_HAS_8CC
+ && ST_REG_P (regno)
+ && (regno - ST_REG_FIRST) % 2 == 0);
+
+ if (mode == CCV4mode)
+ return (ISA_HAS_8CC
+ && ST_REG_P (regno)
+ && (regno - ST_REG_FIRST) % 4 == 0);
+
+ if (mode == CCmode)
+ return ISA_HAS_8CC ? ST_REG_P (regno) : regno == FPSW_REGNUM;
+
+ size = GET_MODE_SIZE (mode);
+ mclass = GET_MODE_CLASS (mode);
+
+ if (GP_REG_P (regno))
+ return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
+
+ if (FP_REG_P (regno)
+ && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
+ || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
+ {
+ /* Allow 64-bit vector modes for Loongson-2E/2F. */
+ if (TARGET_LOONGSON_VECTORS
+ && (mode == V2SImode
+ || mode == V4HImode
+ || mode == V8QImode
+ || mode == DImode))
+ return true;
+
+ if (mclass == MODE_FLOAT
+ || mclass == MODE_COMPLEX_FLOAT
+ || mclass == MODE_VECTOR_FLOAT)
+ return size <= UNITS_PER_FPVALUE;
+
+ /* Allow integer modes that fit into a single register. We need
+ to put integers into FPRs when using instructions like CVT
+ and TRUNC. There's no point allowing sizes smaller than a word,
+ because the FPU has no appropriate load/store instructions. */
+ if (mclass == MODE_INT)
+ return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
+ }
+
+ if (ACC_REG_P (regno)
+ && (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
+ {
+ if (MD_REG_P (regno))
+ {
+ /* After a multiplication or division, clobbering HI makes
+ the value of LO unpredictable, and vice versa. This means
+ that, for all interesting cases, HI and LO are effectively
+ a single register.
+
+ We model this by requiring that any value that uses HI
+ also uses LO. */
+ if (size <= UNITS_PER_WORD * 2)
+ return regno == (size <= UNITS_PER_WORD ? LO_REGNUM : MD_REG_FIRST);
+ }
+ else
+ {
+ /* DSP accumulators do not have the same restrictions as
+ HI and LO, so we can treat them as normal doubleword
+ registers. */
+ if (size <= UNITS_PER_WORD)
+ return true;
+
+ if (size <= UNITS_PER_WORD * 2
+ && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)
+ return true;
+ }
+ }
+
+ if (ALL_COP_REG_P (regno))
+ return mclass == MODE_INT && size <= UNITS_PER_WORD;
+
+ if (regno == GOT_VERSION_REGNUM)
+ return mode == SImode;
+
+ return false;
+}
+
+/* Implement HARD_REGNO_NREGS. */
+
+unsigned int
+mips_hard_regno_nregs (int regno, enum machine_mode mode)
+{
+ if (ST_REG_P (regno))
+ /* The size of FP status registers is always 4, because they only hold
+ CCmode values, and CCmode is always considered to be 4 bytes wide. */
+ return (GET_MODE_SIZE (mode) + 3) / 4;
+
+ if (FP_REG_P (regno))
+ return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
+
+ /* All other registers are word-sized. */
+ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+}
+
+/* Implement CLASS_MAX_NREGS, taking the maximum of the cases
+ in mips_hard_regno_nregs. */
+
+int
+mips_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
+{
+ int size;
+ HARD_REG_SET left;
+
+ size = 0x8000;
+ COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
+ if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
+ {
+ if (HARD_REGNO_MODE_OK (ST_REG_FIRST, mode))
+ size = MIN (size, 4);
+ AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]);
+ }
+ if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
+ {
+ if (HARD_REGNO_MODE_OK (FP_REG_FIRST, mode))
+ size = MIN (size, UNITS_PER_FPREG);
+ AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
+ }
+ if (!hard_reg_set_empty_p (left))
+ size = MIN (size, UNITS_PER_WORD);
+ return (GET_MODE_SIZE (mode) + size - 1) / size;
+}
+
+/* Implement CANNOT_CHANGE_MODE_CLASS. */
+
+bool
+mips_cannot_change_mode_class (enum machine_mode from,
+ enum machine_mode to,
+ enum reg_class rclass)
+{
+ /* Allow conversions between different Loongson integer vectors,
+ and between those vectors and DImode. */
+ if (GET_MODE_SIZE (from) == 8 && GET_MODE_SIZE (to) == 8
+ && INTEGRAL_MODE_P (from) && INTEGRAL_MODE_P (to))
+ return false;
+
+ /* Otherwise, there are several problems with changing the modes of
+ values in floating-point registers:
+
+ - When a multi-word value is stored in paired floating-point
+ registers, the first register always holds the low word. We
+ therefore can't allow FPRs to change between single-word and
+ multi-word modes on big-endian targets.
+
+ - GCC assumes that each word of a multiword register can be
+ accessed individually using SUBREGs. This is not true for
+ floating-point registers if they are bigger than a word.
+
+ - Loading a 32-bit value into a 64-bit floating-point register
+ will not sign-extend the value, despite what LOAD_EXTEND_OP
+ says. We can't allow FPRs to change from SImode to a wider
+ mode on 64-bit targets.
+
+ - If the FPU has already interpreted a value in one format, we
+ must not ask it to treat the value as having a different
+ format.
+
+ We therefore disallow all mode changes involving FPRs. */
+
+ return reg_classes_intersect_p (FP_REGS, rclass);
+}
+
+/* Implement target hook small_register_classes_for_mode_p. */
+
+static bool
+mips_small_register_classes_for_mode_p (enum machine_mode mode
+ ATTRIBUTE_UNUSED)
+{
+ return TARGET_MIPS16;
+}
+
+/* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
+
+static bool
+mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case SFmode:
+ return TARGET_HARD_FLOAT;
+
+ case DFmode:
+ return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
+
+ case V2SFmode:
+ return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
+
+ default:
+ return false;
+ }
+}
+
+/* Implement MODES_TIEABLE_P. */
+
+bool
+mips_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
+{
+ /* FPRs allow no mode punning, so it's not worth tying modes if we'd
+ prefer to put one of them in FPRs. */
+ return (mode1 == mode2
+ || (!mips_mode_ok_for_mov_fmt_p (mode1)
+ && !mips_mode_ok_for_mov_fmt_p (mode2)));
+}
+
+/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
+
+static reg_class_t
+mips_preferred_reload_class (rtx x, reg_class_t rclass)
+{
+ if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, rclass))
+ return LEA_REGS;
+
+ if (reg_class_subset_p (FP_REGS, rclass)
+ && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
+ return FP_REGS;
+
+ if (reg_class_subset_p (GR_REGS, rclass))
+ rclass = GR_REGS;
+
+ if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, rclass))
+ rclass = M16_REGS;
+
+ return rclass;
+}
+
+/* RCLASS is a class involved in a REGISTER_MOVE_COST calculation.
+ Return a "canonical" class to represent it in later calculations. */
+
+static reg_class_t
+mips_canonicalize_move_class (reg_class_t rclass)
+{
+ /* All moves involving accumulator registers have the same cost. */
+ if (reg_class_subset_p (rclass, ACC_REGS))
+ rclass = ACC_REGS;
+
+ /* Likewise promote subclasses of general registers to the most
+ interesting containing class. */
+ if (TARGET_MIPS16 && reg_class_subset_p (rclass, M16_REGS))
+ rclass = M16_REGS;
+ else if (reg_class_subset_p (rclass, GENERAL_REGS))
+ rclass = GENERAL_REGS;
+
+ return rclass;
+}
+
+/* Return the cost of moving a value of mode MODE from a register of
+ class FROM to a GPR. Return 0 for classes that are unions of other
+ classes handled by this function. */
+
+static int
+mips_move_to_gpr_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
+ reg_class_t from)
+{
+ switch (from)
+ {
+ case M16_REGS:
+ case GENERAL_REGS:
+ /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
+ return 2;
+
+ case ACC_REGS:
+ /* MFLO and MFHI. */
+ return 6;
+
+ case FP_REGS:
+ /* MFC1, etc. */
+ return 4;
+
+ case ST_REGS:
+ /* LUI followed by MOVF. */
+ return 4;
+
+ case COP0_REGS:
+ case COP2_REGS:
+ case COP3_REGS:
+ /* This choice of value is historical. */
+ return 5;
+
+ default:
+ return 0;
+ }
+}
+
+/* Return the cost of moving a value of mode MODE from a GPR to a
+ register of class TO. Return 0 for classes that are unions of
+ other classes handled by this function. */
+
+static int
+mips_move_from_gpr_cost (enum machine_mode mode, reg_class_t to)
+{
+ switch (to)
+ {
+ case M16_REGS:
+ case GENERAL_REGS:
+ /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
+ return 2;
+
+ case ACC_REGS:
+ /* MTLO and MTHI. */
+ return 6;
+
+ case FP_REGS:
+ /* MTC1, etc. */
+ return 4;
+
+ case ST_REGS:
+ /* A secondary reload through an FPR scratch. */
+ return (mips_register_move_cost (mode, GENERAL_REGS, FP_REGS)
+ + mips_register_move_cost (mode, FP_REGS, ST_REGS));
+
+ case COP0_REGS:
+ case COP2_REGS:
+ case COP3_REGS:
+ /* This choice of value is historical. */
+ return 5;
+
+ default:
+ return 0;
+ }
+}
+
+/* Implement TARGET_REGISTER_MOVE_COST. Return 0 for classes that are the
+ maximum of the move costs for subclasses; regclass will work out
+ the maximum for us. */
+
+static int
+mips_register_move_cost (enum machine_mode mode,
+ reg_class_t from, reg_class_t to)
+{
+ reg_class_t dregs;
+ int cost1, cost2;
+
+ from = mips_canonicalize_move_class (from);
+ to = mips_canonicalize_move_class (to);
+
+ /* Handle moves that can be done without using general-purpose registers. */
+ if (from == FP_REGS)
+ {
+ if (to == FP_REGS && mips_mode_ok_for_mov_fmt_p (mode))
+ /* MOV.FMT. */
+ return 4;
+ if (to == ST_REGS)
+ /* The sequence generated by mips_expand_fcc_reload. */
+ return 8;
+ }
+
+ /* Handle cases in which only one class deviates from the ideal. */
+ dregs = TARGET_MIPS16 ? M16_REGS : GENERAL_REGS;
+ if (from == dregs)
+ return mips_move_from_gpr_cost (mode, to);
+ if (to == dregs)
+ return mips_move_to_gpr_cost (mode, from);
+
+ /* Handles cases that require a GPR temporary. */
+ cost1 = mips_move_to_gpr_cost (mode, from);
+ if (cost1 != 0)
+ {
+ cost2 = mips_move_from_gpr_cost (mode, to);
+ if (cost2 != 0)
+ return cost1 + cost2;
+ }
+
+ return 0;
+}
+
+/* Implement TARGET_MEMORY_MOVE_COST. */
+
+static int
+mips_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
+{
+ return (mips_cost->memory_latency
+ + memory_move_secondary_cost (mode, rclass, in));
+}
+
+/* Return the register class required for a secondary register when
+ copying between one of the registers in RCLASS and value X, which
+ has mode MODE. X is the source of the move if IN_P, otherwise it
+ is the destination. Return NO_REGS if no secondary register is
+ needed. */
+
+enum reg_class
+mips_secondary_reload_class (enum reg_class rclass,
+ enum machine_mode mode, rtx x, bool in_p)
+{
+ int regno;
+
+ /* If X is a constant that cannot be loaded into $25, it must be loaded
+ into some other GPR. No other register class allows a direct move. */
+ if (mips_dangerous_for_la25_p (x))
+ return reg_class_subset_p (rclass, LEA_REGS) ? NO_REGS : LEA_REGS;
+
+ regno = true_regnum (x);
+ if (TARGET_MIPS16)
+ {
+ /* In MIPS16 mode, every move must involve a member of M16_REGS. */
+ if (!reg_class_subset_p (rclass, M16_REGS) && !M16_REG_P (regno))
+ return M16_REGS;
+
+ return NO_REGS;
+ }
+
+ /* Copying from accumulator registers to anywhere other than a general
+ register requires a temporary general register. */
+ if (reg_class_subset_p (rclass, ACC_REGS))
+ return GP_REG_P (regno) ? NO_REGS : GR_REGS;
+ if (ACC_REG_P (regno))
+ return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
+
+ /* We can only copy a value to a condition code register from a
+ floating-point register, and even then we require a scratch
+ floating-point register. We can only copy a value out of a
+ condition-code register into a general register. */
+ if (reg_class_subset_p (rclass, ST_REGS))
+ {
+ if (in_p)
+ return FP_REGS;
+ return GP_REG_P (regno) ? NO_REGS : GR_REGS;
+ }
+ if (ST_REG_P (regno))
+ {
+ if (!in_p)
+ return FP_REGS;
+ return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
+ }
+
+ if (reg_class_subset_p (rclass, FP_REGS))
+ {
+ if (MEM_P (x)
+ && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
+ /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
+ pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
+ return NO_REGS;
+
+ if (GP_REG_P (regno) || x == CONST0_RTX (mode))
+ /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
+ return NO_REGS;
+
+ if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (mode, x))
+ /* We can force the constant to memory and use lwc1
+ and ldc1. As above, we will use pairs of lwc1s if
+ ldc1 is not supported. */
+ return NO_REGS;
+
+ if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
+ /* In this case we can use mov.fmt. */
+ return NO_REGS;
+
+ /* Otherwise, we need to reload through an integer register. */
+ return GR_REGS;
+ }
+ if (FP_REG_P (regno))
+ return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
+
+ return NO_REGS;
+}
+
+/* Implement TARGET_MODE_REP_EXTENDED. */
+
+static int
+mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
+{
+ /* On 64-bit targets, SImode register values are sign-extended to DImode. */
+ if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
+ return SIGN_EXTEND;
+
+ return UNKNOWN;
+}
+
+/* Implement TARGET_VALID_POINTER_MODE. */
+
+static bool
+mips_valid_pointer_mode (enum machine_mode mode)
+{
+ return mode == SImode || (TARGET_64BIT && mode == DImode);
+}
+
+/* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
+
+static bool
+mips_vector_mode_supported_p (enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case V2SFmode:
+ return TARGET_PAIRED_SINGLE_FLOAT;
+
+ case V2HImode:
+ case V4QImode:
+ case V2HQmode:
+ case V2UHQmode:
+ case V2HAmode:
+ case V2UHAmode:
+ case V4QQmode:
+ case V4UQQmode:
+ return TARGET_DSP;
+
+ case V2SImode:
+ case V4HImode:
+ case V8QImode:
+ return TARGET_LOONGSON_VECTORS;
+
+ default:
+ return false;
+ }
+}
+
+/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
+
+static bool
+mips_scalar_mode_supported_p (enum machine_mode mode)
+{
+ if (ALL_FIXED_POINT_MODE_P (mode)
+ && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
+ return true;
+
+ return default_scalar_mode_supported_p (mode);
+}
+
+/* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
+
+static enum machine_mode
+mips_preferred_simd_mode (enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ if (TARGET_PAIRED_SINGLE_FLOAT
+ && mode == SFmode)
+ return V2SFmode;
+ return word_mode;
+}
+
+/* Implement TARGET_INIT_LIBFUNCS. */
+
+static void
+mips_init_libfuncs (void)
+{
+ if (TARGET_FIX_VR4120)
+ {
+ /* Register the special divsi3 and modsi3 functions needed to work
+ around VR4120 division errata. */
+ set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
+ set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
+ }
+
+ if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
+ {
+ /* Register the MIPS16 -mhard-float stubs. */
+ set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
+ set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
+ set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
+ set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
+
+ set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
+ set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
+ set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
+ set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
+ set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
+ set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
+ set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
+
+ set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
+ set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
+ set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
+
+ if (TARGET_DOUBLE_FLOAT)
+ {
+ set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
+ set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
+ set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
+ set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
+
+ set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
+ set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
+ set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
+ set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
+ set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
+ set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
+ set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
+
+ set_conv_libfunc (sext_optab, DFmode, SFmode,
+ "__mips16_extendsfdf2");
+ set_conv_libfunc (trunc_optab, SFmode, DFmode,
+ "__mips16_truncdfsf2");
+ set_conv_libfunc (sfix_optab, SImode, DFmode,
+ "__mips16_fix_truncdfsi");
+ set_conv_libfunc (sfloat_optab, DFmode, SImode,
+ "__mips16_floatsidf");
+ set_conv_libfunc (ufloat_optab, DFmode, SImode,
+ "__mips16_floatunsidf");
+ }
+ }
+
+ /* The MIPS16 ISA does not have an encoding for "sync", so we rely
+ on an external non-MIPS16 routine to implement __sync_synchronize.
+ Similarly for the rest of the ll/sc libfuncs. */
+ if (TARGET_MIPS16)
+ {
+ synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
+ init_sync_libfuncs (UNITS_PER_WORD);
+ }
+}
+
+/* Build up a multi-insn sequence that loads label TARGET into $AT. */
+
+static void
+mips_process_load_label (rtx target)
+{
+ rtx base, gp, intop;
+ HOST_WIDE_INT offset;
+
+ mips_multi_start ();
+ switch (mips_abi)
+ {
+ case ABI_N32:
+ mips_multi_add_insn ("lw\t%@,%%got_page(%0)(%+)", target, 0);
+ mips_multi_add_insn ("addiu\t%@,%@,%%got_ofst(%0)", target, 0);
+ break;
+
+ case ABI_64:
+ mips_multi_add_insn ("ld\t%@,%%got_page(%0)(%+)", target, 0);
+ mips_multi_add_insn ("daddiu\t%@,%@,%%got_ofst(%0)", target, 0);
+ break;
+
+ default:
+ gp = pic_offset_table_rtx;
+ if (mips_cfun_has_cprestore_slot_p ())
+ {
+ gp = gen_rtx_REG (Pmode, AT_REGNUM);
+ mips_get_cprestore_base_and_offset (&base, &offset, true);
+ if (!SMALL_OPERAND (offset))
+ {
+ intop = GEN_INT (CONST_HIGH_PART (offset));
+ mips_multi_add_insn ("lui\t%0,%1", gp, intop, 0);
+ mips_multi_add_insn ("addu\t%0,%0,%1", gp, base, 0);
+
+ base = gp;
+ offset = CONST_LOW_PART (offset);
+ }
+ intop = GEN_INT (offset);
+ if (ISA_HAS_LOAD_DELAY)
+ mips_multi_add_insn ("lw\t%0,%1(%2)%#", gp, intop, base, 0);
+ else
+ mips_multi_add_insn ("lw\t%0,%1(%2)", gp, intop, base, 0);
+ }
+ if (ISA_HAS_LOAD_DELAY)
+ mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)%#", target, gp, 0);
+ else
+ mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)", target, gp, 0);
+ mips_multi_add_insn ("addiu\t%@,%@,%%lo(%0)", target, 0);
+ break;
+ }
+}
+
+/* Return the number of instructions needed to load a label into $AT. */
+
+static unsigned int
+mips_load_label_num_insns (void)
+{
+ if (cfun->machine->load_label_num_insns == 0)
+ {
+ mips_process_load_label (pc_rtx);
+ cfun->machine->load_label_num_insns = mips_multi_num_insns;
+ }
+ return cfun->machine->load_label_num_insns;
+}
+
+/* Emit an asm sequence to start a noat block and load the address
+ of a label into $1. */
+
+void
+mips_output_load_label (rtx target)
+{
+ mips_push_asm_switch (&mips_noat);
+ if (TARGET_EXPLICIT_RELOCS)
+ {
+ mips_process_load_label (target);
+ mips_multi_write ();
+ }
+ else
+ {
+ if (Pmode == DImode)
+ output_asm_insn ("dla\t%@,%0", &target);
+ else
+ output_asm_insn ("la\t%@,%0", &target);
+ }
+}
+
+/* Return the length of INSN. LENGTH is the initial length computed by
+ attributes in the machine-description file. */
+
+int
+mips_adjust_insn_length (rtx insn, int length)
+{
+ /* mips.md uses MAX_PIC_BRANCH_LENGTH as a placeholder for the length
+ of a PIC long-branch sequence. Substitute the correct value. */
+ if (length == MAX_PIC_BRANCH_LENGTH
+ && JUMP_P (insn)
+ && INSN_CODE (insn) >= 0
+ && get_attr_type (insn) == TYPE_BRANCH)
+ {
+ /* Add the branch-over instruction and its delay slot, if this
+ is a conditional branch. */
+ length = simplejump_p (insn) ? 0 : 8;
+
+ /* Add the size of a load into $AT. */
+ length += BASE_INSN_LENGTH * mips_load_label_num_insns ();
+
+ /* Add the length of an indirect jump, ignoring the delay slot. */
+ length += TARGET_COMPRESSION ? 2 : 4;
+ }
+
+ /* A unconditional jump has an unfilled delay slot if it is not part
+ of a sequence. A conditional jump normally has a delay slot, but
+ does not on MIPS16. */
+ if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
+ length += TARGET_MIPS16 ? 2 : 4;
+
+ /* See how many nops might be needed to avoid hardware hazards. */
+ if (!cfun->machine->ignore_hazard_length_p
+ && INSN_P (insn)
+ && INSN_CODE (insn) >= 0)
+ switch (get_attr_hazard (insn))
+ {
+ case HAZARD_NONE:
+ break;
+
+ case HAZARD_DELAY:
+ length += NOP_INSN_LENGTH;
+ break;
+
+ case HAZARD_HILO:
+ length += NOP_INSN_LENGTH * 2;
+ break;
+ }
+
+ return length;
+}
+
+/* Return the assembly code for INSN, which has the operands given by
+ OPERANDS, and which branches to OPERANDS[0] if some condition is true.
+ BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[0]
+ is in range of a direct branch. BRANCH_IF_FALSE is an inverted
+ version of BRANCH_IF_TRUE. */
+
+const char *
+mips_output_conditional_branch (rtx insn, rtx *operands,
+ const char *branch_if_true,
+ const char *branch_if_false)
+{
+ unsigned int length;
+ rtx taken, not_taken;
+
+ gcc_assert (LABEL_P (operands[0]));
+
+ length = get_attr_length (insn);
+ if (length <= 8)
+ {
+ /* Just a simple conditional branch. */
+ mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
+ return branch_if_true;
+ }
+
+ /* Generate a reversed branch around a direct jump. This fallback does
+ not use branch-likely instructions. */
+ mips_branch_likely = false;
+ not_taken = gen_label_rtx ();
+ taken = operands[0];
+
+ /* Generate the reversed branch to NOT_TAKEN. */
+ operands[0] = not_taken;
+ output_asm_insn (branch_if_false, operands);
+
+ /* If INSN has a delay slot, we must provide delay slots for both the
+ branch to NOT_TAKEN and the conditional jump. We must also ensure
+ that INSN's delay slot is executed in the appropriate cases. */
+ if (final_sequence)
+ {
+ /* This first delay slot will always be executed, so use INSN's
+ delay slot if is not annulled. */
+ if (!INSN_ANNULLED_BRANCH_P (insn))
+ {
+ final_scan_insn (XVECEXP (final_sequence, 0, 1),
+ asm_out_file, optimize, 1, NULL);
+ INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
+ }
+ else
+ output_asm_insn ("nop", 0);
+ fprintf (asm_out_file, "\n");
+ }
+
+ /* Output the unconditional branch to TAKEN. */
+ if (TARGET_ABSOLUTE_JUMPS)
+ output_asm_insn (MIPS_ABSOLUTE_JUMP ("j\t%0%/"), &taken);
+ else
+ {
+ mips_output_load_label (taken);
+ output_asm_insn ("jr\t%@%]%/", 0);
+ }
+
+ /* Now deal with its delay slot; see above. */
+ if (final_sequence)
+ {
+ /* This delay slot will only be executed if the branch is taken.
+ Use INSN's delay slot if is annulled. */
+ if (INSN_ANNULLED_BRANCH_P (insn))
+ {
+ final_scan_insn (XVECEXP (final_sequence, 0, 1),
+ asm_out_file, optimize, 1, NULL);
+ INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
+ }
+ else
+ output_asm_insn ("nop", 0);
+ fprintf (asm_out_file, "\n");
+ }
+
+ /* Output NOT_TAKEN. */
+ targetm.asm_out.internal_label (asm_out_file, "L",
+ CODE_LABEL_NUMBER (not_taken));
+ return "";
+}
+
+/* Return the assembly code for INSN, which branches to OPERANDS[0]
+ if some ordering condition is true. The condition is given by
+ OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of
+ OPERANDS[1]. OPERANDS[2] is the comparison's first operand;
+ its second is always zero. */
+
+const char *
+mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
+{
+ const char *branch[2];
+
+ /* Make BRANCH[1] branch to OPERANDS[0] when the condition is true.
+ Make BRANCH[0] branch on the inverse condition. */
+ switch (GET_CODE (operands[1]))
+ {
+ /* These cases are equivalent to comparisons against zero. */
+ case LEU:
+ inverted_p = !inverted_p;
+ /* Fall through. */
+ case GTU:
+ branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%0");
+ branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%0");
+ break;
+
+ /* These cases are always true or always false. */
+ case LTU:
+ inverted_p = !inverted_p;
+ /* Fall through. */
+ case GEU:
+ branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%0");
+ branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%0");
+ break;
+
+ default:
+ branch[!inverted_p] = MIPS_BRANCH ("b%C1z", "%2,%0");
+ branch[inverted_p] = MIPS_BRANCH ("b%N1z", "%2,%0");
+ break;
+ }
+ return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
+}
+
+/* Start a block of code that needs access to the LL, SC and SYNC
+ instructions. */
+
+static void
+mips_start_ll_sc_sync_block (void)
+{
+ if (!ISA_HAS_LL_SC)
+ {
+ output_asm_insn (".set\tpush", 0);
+ if (TARGET_64BIT)
+ output_asm_insn (".set\tmips3", 0);
+ else
+ output_asm_insn (".set\tmips2", 0);
+ }
+}
+
+/* End a block started by mips_start_ll_sc_sync_block. */
+
+static void
+mips_end_ll_sc_sync_block (void)
+{
+ if (!ISA_HAS_LL_SC)
+ output_asm_insn (".set\tpop", 0);
+}
+
+/* Output and/or return the asm template for a sync instruction. */
+
+const char *
+mips_output_sync (void)
+{
+ mips_start_ll_sc_sync_block ();
+ output_asm_insn ("sync", 0);
+ mips_end_ll_sc_sync_block ();
+ return "";
+}
+
+/* Return the asm template associated with sync_insn1 value TYPE.
+ IS_64BIT_P is true if we want a 64-bit rather than 32-bit operation. */
+
+static const char *
+mips_sync_insn1_template (enum attr_sync_insn1 type, bool is_64bit_p)
+{
+ switch (type)
+ {
+ case SYNC_INSN1_MOVE:
+ return "move\t%0,%z2";
+ case SYNC_INSN1_LI:
+ return "li\t%0,%2";
+ case SYNC_INSN1_ADDU:
+ return is_64bit_p ? "daddu\t%0,%1,%z2" : "addu\t%0,%1,%z2";
+ case SYNC_INSN1_ADDIU:
+ return is_64bit_p ? "daddiu\t%0,%1,%2" : "addiu\t%0,%1,%2";
+ case SYNC_INSN1_SUBU:
+ return is_64bit_p ? "dsubu\t%0,%1,%z2" : "subu\t%0,%1,%z2";
+ case SYNC_INSN1_AND:
+ return "and\t%0,%1,%z2";
+ case SYNC_INSN1_ANDI:
+ return "andi\t%0,%1,%2";
+ case SYNC_INSN1_OR:
+ return "or\t%0,%1,%z2";
+ case SYNC_INSN1_ORI:
+ return "ori\t%0,%1,%2";
+ case SYNC_INSN1_XOR:
+ return "xor\t%0,%1,%z2";
+ case SYNC_INSN1_XORI:
+ return "xori\t%0,%1,%2";
+ }
+ gcc_unreachable ();
+}
+
+/* Return the asm template associated with sync_insn2 value TYPE. */
+
+static const char *
+mips_sync_insn2_template (enum attr_sync_insn2 type)
+{
+ switch (type)
+ {
+ case SYNC_INSN2_NOP:
+ gcc_unreachable ();
+ case SYNC_INSN2_AND:
+ return "and\t%0,%1,%z2";
+ case SYNC_INSN2_XOR:
+ return "xor\t%0,%1,%z2";
+ case SYNC_INSN2_NOT:
+ return "nor\t%0,%1,%.";
+ }
+ gcc_unreachable ();
+}
+
+/* OPERANDS are the operands to a sync loop instruction and INDEX is
+ the value of the one of the sync_* attributes. Return the operand
+ referred to by the attribute, or DEFAULT_VALUE if the insn doesn't
+ have the associated attribute. */
+
+static rtx
+mips_get_sync_operand (rtx *operands, int index, rtx default_value)
+{
+ if (index > 0)
+ default_value = operands[index - 1];
+ return default_value;
+}
+
+/* INSN is a sync loop with operands OPERANDS. Build up a multi-insn
+ sequence for it. */
+
+static void
+mips_process_sync_loop (rtx insn, rtx *operands)
+{
+ rtx at, mem, oldval, newval, inclusive_mask, exclusive_mask;
+ rtx required_oldval, insn1_op2, tmp1, tmp2, tmp3, cmp;
+ unsigned int tmp3_insn;
+ enum attr_sync_insn1 insn1;
+ enum attr_sync_insn2 insn2;
+ bool is_64bit_p;
+ int memmodel_attr;
+ enum memmodel model;
+
+ /* Read an operand from the sync_WHAT attribute and store it in
+ variable WHAT. DEFAULT is the default value if no attribute
+ is specified. */
+#define READ_OPERAND(WHAT, DEFAULT) \
+ WHAT = mips_get_sync_operand (operands, (int) get_attr_sync_##WHAT (insn), \
+ DEFAULT)
+
+ /* Read the memory. */
+ READ_OPERAND (mem, 0);
+ gcc_assert (mem);
+ is_64bit_p = (GET_MODE_BITSIZE (GET_MODE (mem)) == 64);
+
+ /* Read the other attributes. */
+ at = gen_rtx_REG (GET_MODE (mem), AT_REGNUM);
+ READ_OPERAND (oldval, at);
+ READ_OPERAND (cmp, 0);
+ READ_OPERAND (newval, at);
+ READ_OPERAND (inclusive_mask, 0);
+ READ_OPERAND (exclusive_mask, 0);
+ READ_OPERAND (required_oldval, 0);
+ READ_OPERAND (insn1_op2, 0);
+ insn1 = get_attr_sync_insn1 (insn);
+ insn2 = get_attr_sync_insn2 (insn);
+
+ /* Don't bother setting CMP result that is never used. */
+ if (cmp && find_reg_note (insn, REG_UNUSED, cmp))
+ cmp = 0;
+
+ memmodel_attr = get_attr_sync_memmodel (insn);
+ switch (memmodel_attr)
+ {
+ case 10:
+ model = MEMMODEL_ACQ_REL;
+ break;
+ case 11:
+ model = MEMMODEL_ACQUIRE;
+ break;
+ default:
+ model = (enum memmodel) INTVAL (operands[memmodel_attr]);
+ }
+
+ mips_multi_start ();
+
+ /* Output the release side of the memory barrier. */
+ if (need_atomic_barrier_p (model, true))
+ {
+ if (required_oldval == 0 && TARGET_OCTEON)
+ {
+ /* Octeon doesn't reorder reads, so a full barrier can be
+ created by using SYNCW to order writes combined with the
+ write from the following SC. When the SC successfully
+ completes, we know that all preceding writes are also
+ committed to the coherent memory system. It is possible
+ for a single SYNCW to fail, but a pair of them will never
+ fail, so we use two. */
+ mips_multi_add_insn ("syncw", NULL);
+ mips_multi_add_insn ("syncw", NULL);
+ }
+ else
+ mips_multi_add_insn ("sync", NULL);
+ }
+
+ /* Output the branch-back label. */
+ mips_multi_add_label ("1:");
+
+ /* OLDVAL = *MEM. */
+ mips_multi_add_insn (is_64bit_p ? "lld\t%0,%1" : "ll\t%0,%1",
+ oldval, mem, NULL);
+
+ /* if ((OLDVAL & INCLUSIVE_MASK) != REQUIRED_OLDVAL) goto 2. */
+ if (required_oldval)
+ {
+ if (inclusive_mask == 0)
+ tmp1 = oldval;
+ else
+ {
+ gcc_assert (oldval != at);
+ mips_multi_add_insn ("and\t%0,%1,%2",
+ at, oldval, inclusive_mask, NULL);
+ tmp1 = at;
+ }
+ mips_multi_add_insn ("bne\t%0,%z1,2f", tmp1, required_oldval, NULL);
+
+ /* CMP = 0 [delay slot]. */
+ if (cmp)
+ mips_multi_add_insn ("li\t%0,0", cmp, NULL);
+ }
+
+ /* $TMP1 = OLDVAL & EXCLUSIVE_MASK. */
+ if (exclusive_mask == 0)
+ tmp1 = const0_rtx;
+ else
+ {
+ gcc_assert (oldval != at);
+ mips_multi_add_insn ("and\t%0,%1,%z2",
+ at, oldval, exclusive_mask, NULL);
+ tmp1 = at;
+ }
+
+ /* $TMP2 = INSN1 (OLDVAL, INSN1_OP2).
+
+ We can ignore moves if $TMP4 != INSN1_OP2, since we'll still emit
+ at least one instruction in that case. */
+ if (insn1 == SYNC_INSN1_MOVE
+ && (tmp1 != const0_rtx || insn2 != SYNC_INSN2_NOP))
+ tmp2 = insn1_op2;
+ else
+ {
+ mips_multi_add_insn (mips_sync_insn1_template (insn1, is_64bit_p),
+ newval, oldval, insn1_op2, NULL);
+ tmp2 = newval;
+ }
+
+ /* $TMP3 = INSN2 ($TMP2, INCLUSIVE_MASK). */
+ if (insn2 == SYNC_INSN2_NOP)
+ tmp3 = tmp2;
+ else
+ {
+ mips_multi_add_insn (mips_sync_insn2_template (insn2),
+ newval, tmp2, inclusive_mask, NULL);
+ tmp3 = newval;
+ }
+ tmp3_insn = mips_multi_last_index ();
+
+ /* $AT = $TMP1 | $TMP3. */
+ if (tmp1 == const0_rtx || tmp3 == const0_rtx)
+ {
+ mips_multi_set_operand (tmp3_insn, 0, at);
+ tmp3 = at;
+ }
+ else
+ {
+ gcc_assert (tmp1 != tmp3);
+ mips_multi_add_insn ("or\t%0,%1,%2", at, tmp1, tmp3, NULL);
+ }
+
+ /* if (!commit (*MEM = $AT)) goto 1.
+
+ This will sometimes be a delayed branch; see the write code below
+ for details. */
+ mips_multi_add_insn (is_64bit_p ? "scd\t%0,%1" : "sc\t%0,%1", at, mem, NULL);
+ mips_multi_add_insn ("beq%?\t%0,%.,1b", at, NULL);
+
+ /* if (INSN1 != MOVE && INSN1 != LI) NEWVAL = $TMP3 [delay slot]. */
+ if (insn1 != SYNC_INSN1_MOVE && insn1 != SYNC_INSN1_LI && tmp3 != newval)
+ {
+ mips_multi_copy_insn (tmp3_insn);
+ mips_multi_set_operand (mips_multi_last_index (), 0, newval);
+ }
+ else if (!(required_oldval && cmp))
+ mips_multi_add_insn ("nop", NULL);
+
+ /* CMP = 1 -- either standalone or in a delay slot. */
+ if (required_oldval && cmp)
+ mips_multi_add_insn ("li\t%0,1", cmp, NULL);
+
+ /* Output the acquire side of the memory barrier. */
+ if (TARGET_SYNC_AFTER_SC && need_atomic_barrier_p (model, false))
+ mips_multi_add_insn ("sync", NULL);
+
+ /* Output the exit label, if needed. */
+ if (required_oldval)
+ mips_multi_add_label ("2:");
+
+#undef READ_OPERAND
+}
+
+/* Output and/or return the asm template for sync loop INSN, which has
+ the operands given by OPERANDS. */
+
+const char *
+mips_output_sync_loop (rtx insn, rtx *operands)
+{
+ mips_process_sync_loop (insn, operands);
+
+ /* Use branch-likely instructions to work around the LL/SC R10000
+ errata. */
+ mips_branch_likely = TARGET_FIX_R10000;
+
+ mips_push_asm_switch (&mips_noreorder);
+ mips_push_asm_switch (&mips_nomacro);
+ mips_push_asm_switch (&mips_noat);
+ mips_start_ll_sc_sync_block ();
+
+ mips_multi_write ();
+
+ mips_end_ll_sc_sync_block ();
+ mips_pop_asm_switch (&mips_noat);
+ mips_pop_asm_switch (&mips_nomacro);
+ mips_pop_asm_switch (&mips_noreorder);
+
+ return "";
+}
+
+/* Return the number of individual instructions in sync loop INSN,
+ which has the operands given by OPERANDS. */
+
+unsigned int
+mips_sync_loop_insns (rtx insn, rtx *operands)
+{
+ mips_process_sync_loop (insn, operands);
+ return mips_multi_num_insns;
+}
+
+/* Return the assembly code for DIV or DDIV instruction DIVISION, which has
+ the operands given by OPERANDS. Add in a divide-by-zero check if needed.
+
+ When working around R4000 and R4400 errata, we need to make sure that
+ the division is not immediately followed by a shift[1][2]. We also
+ need to stop the division from being put into a branch delay slot[3].
+ The easiest way to avoid both problems is to add a nop after the
+ division. When a divide-by-zero check is needed, this nop can be
+ used to fill the branch delay slot.
+
+ [1] If a double-word or a variable shift executes immediately
+ after starting an integer division, the shift may give an
+ incorrect result. See quotations of errata #16 and #28 from
+ "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
+ in mips.md for details.
+
+ [2] A similar bug to [1] exists for all revisions of the
+ R4000 and the R4400 when run in an MC configuration.
+ From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
+
+ "19. In this following sequence:
+
+ ddiv (or ddivu or div or divu)
+ dsll32 (or dsrl32, dsra32)
+
+ if an MPT stall occurs, while the divide is slipping the cpu
+ pipeline, then the following double shift would end up with an
+ incorrect result.
+
+ Workaround: The compiler needs to avoid generating any
+ sequence with divide followed by extended double shift."
+
+ This erratum is also present in "MIPS R4400MC Errata, Processor
+ Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
+ & 3.0" as errata #10 and #4, respectively.
+
+ [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
+ (also valid for MIPS R4000MC processors):
+
+ "52. R4000SC: This bug does not apply for the R4000PC.
+
+ There are two flavors of this bug:
+
+ 1) If the instruction just after divide takes an RF exception
+ (tlb-refill, tlb-invalid) and gets an instruction cache
+ miss (both primary and secondary) and the line which is
+ currently in secondary cache at this index had the first
+ data word, where the bits 5..2 are set, then R4000 would
+ get a wrong result for the div.
+
+ ##1
+ nop
+ div r8, r9
+ ------------------- # end-of page. -tlb-refill
+ nop
+ ##2
+ nop
+ div r8, r9
+ ------------------- # end-of page. -tlb-invalid
+ nop
+
+ 2) If the divide is in the taken branch delay slot, where the
+ target takes RF exception and gets an I-cache miss for the
+ exception vector or where I-cache miss occurs for the
+ target address, under the above mentioned scenarios, the
+ div would get wrong results.
+
+ ##1
+ j r2 # to next page mapped or unmapped
+ div r8,r9 # this bug would be there as long
+ # as there is an ICache miss and
+ nop # the "data pattern" is present
+
+ ##2
+ beq r0, r0, NextPage # to Next page
+ div r8,r9
+ nop
+
+ This bug is present for div, divu, ddiv, and ddivu
+ instructions.
+
+ Workaround: For item 1), OS could make sure that the next page
+ after the divide instruction is also mapped. For item 2), the
+ compiler could make sure that the divide instruction is not in
+ the branch delay slot."
+
+ These processors have PRId values of 0x00004220 and 0x00004300 for
+ the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
+
+const char *
+mips_output_division (const char *division, rtx *operands)
+{
+ const char *s;
+
+ s = division;
+ if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
+ {
+ output_asm_insn (s, operands);
+ s = "nop";
+ }
+ if (TARGET_CHECK_ZERO_DIV)
+ {
+ if (TARGET_MIPS16)
+ {
+ output_asm_insn (s, operands);
+ s = "bnez\t%2,1f\n\tbreak\t7\n1:";
+ }
+ else if (GENERATE_DIVIDE_TRAPS)
+ {
+ /* Avoid long replay penalty on load miss by putting the trap before
+ the divide. */
+ if (TUNE_74K)
+ output_asm_insn ("teq\t%2,%.,7", operands);
+ else
+ {
+ output_asm_insn (s, operands);
+ s = "teq\t%2,%.,7";
+ }
+ }
+ else
+ {
+ output_asm_insn ("%(bne\t%2,%.,1f", operands);
+ output_asm_insn (s, operands);
+ s = "break\t7%)\n1:";
+ }
+ }
+ return s;
+}
+
+/* Return true if IN_INSN is a multiply-add or multiply-subtract
+ instruction and if OUT_INSN assigns to the accumulator operand. */
+
+bool
+mips_linked_madd_p (rtx out_insn, rtx in_insn)
+{
+ enum attr_accum_in accum_in;
+ int accum_in_opnum;
+ rtx accum_in_op;
+
+ if (recog_memoized (in_insn) < 0)
+ return false;
+
+ accum_in = get_attr_accum_in (in_insn);
+ if (accum_in == ACCUM_IN_NONE)
+ return false;
+
+ accum_in_opnum = accum_in - ACCUM_IN_0;
+
+ extract_insn (in_insn);
+ gcc_assert (accum_in_opnum < recog_data.n_operands);
+ accum_in_op = recog_data.operand[accum_in_opnum];
+
+ return reg_set_p (accum_in_op, out_insn);
+}
+
+/* True if the dependency between OUT_INSN and IN_INSN is on the store
+ data rather than the address. We need this because the cprestore
+ pattern is type "store", but is defined using an UNSPEC_VOLATILE,
+ which causes the default routine to abort. We just return false
+ for that case. */
+
+bool
+mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
+{
+ if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
+ return false;
+
+ return !store_data_bypass_p (out_insn, in_insn);
+}
+
+
+/* Variables and flags used in scheduler hooks when tuning for
+ Loongson 2E/2F. */
+static struct
+{
+ /* Variables to support Loongson 2E/2F round-robin [F]ALU1/2 dispatch
+ strategy. */
+
+ /* If true, then next ALU1/2 instruction will go to ALU1. */
+ bool alu1_turn_p;
+
+ /* If true, then next FALU1/2 unstruction will go to FALU1. */
+ bool falu1_turn_p;
+
+ /* Codes to query if [f]alu{1,2}_core units are subscribed or not. */
+ int alu1_core_unit_code;
+ int alu2_core_unit_code;
+ int falu1_core_unit_code;
+ int falu2_core_unit_code;
+
+ /* True if current cycle has a multi instruction.
+ This flag is used in mips_ls2_dfa_post_advance_cycle. */
+ bool cycle_has_multi_p;
+
+ /* Instructions to subscribe ls2_[f]alu{1,2}_turn_enabled units.
+ These are used in mips_ls2_dfa_post_advance_cycle to initialize
+ DFA state.
+ E.g., when alu1_turn_enabled_insn is issued it makes next ALU1/2
+ instruction to go ALU1. */
+ rtx alu1_turn_enabled_insn;
+ rtx alu2_turn_enabled_insn;
+ rtx falu1_turn_enabled_insn;
+ rtx falu2_turn_enabled_insn;
+} mips_ls2;
+
+/* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
+ dependencies have no cost, except on the 20Kc where output-dependence
+ is treated like input-dependence. */
+
+static int
+mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
+ rtx dep ATTRIBUTE_UNUSED, int cost)
+{
+ if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
+ && TUNE_20KC)
+ return cost;
+ if (REG_NOTE_KIND (link) != 0)
+ return 0;
+ return cost;
+}
+
+/* Return the number of instructions that can be issued per cycle. */
+
+static int
+mips_issue_rate (void)
+{
+ switch (mips_tune)
+ {
+ case PROCESSOR_74KC:
+ case PROCESSOR_74KF2_1:
+ case PROCESSOR_74KF1_1:
+ case PROCESSOR_74KF3_2:
+ /* The 74k is not strictly quad-issue cpu, but can be seen as one
+ by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
+ but in reality only a maximum of 3 insns can be issued as
+ floating-point loads and stores also require a slot in the
+ AGEN pipe. */
+ case PROCESSOR_R10000:
+ /* All R10K Processors are quad-issue (being the first MIPS
+ processors to support this feature). */
+ return 4;
+
+ case PROCESSOR_20KC:
+ case PROCESSOR_R4130:
+ case PROCESSOR_R5400:
+ case PROCESSOR_R5500:
+ case PROCESSOR_R5900:
+ case PROCESSOR_R7000:
+ case PROCESSOR_R9000:
+ case PROCESSOR_OCTEON:
+ case PROCESSOR_OCTEON2:
+ return 2;
+
+ case PROCESSOR_SB1:
+ case PROCESSOR_SB1A:
+ /* This is actually 4, but we get better performance if we claim 3.
+ This is partly because of unwanted speculative code motion with the
+ larger number, and partly because in most common cases we can't
+ reach the theoretical max of 4. */
+ return 3;
+
+ case PROCESSOR_LOONGSON_2E:
+ case PROCESSOR_LOONGSON_2F:
+ case PROCESSOR_LOONGSON_3A:
+ return 4;
+
+ case PROCESSOR_XLP:
+ return (reload_completed ? 4 : 3);
+
+ default:
+ return 1;
+ }
+}
+
+/* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook for Loongson2. */
+
+static void
+mips_ls2_init_dfa_post_cycle_insn (void)
+{
+ start_sequence ();
+ emit_insn (gen_ls2_alu1_turn_enabled_insn ());
+ mips_ls2.alu1_turn_enabled_insn = get_insns ();
+ end_sequence ();
+
+ start_sequence ();
+ emit_insn (gen_ls2_alu2_turn_enabled_insn ());
+ mips_ls2.alu2_turn_enabled_insn = get_insns ();
+ end_sequence ();
+
+ start_sequence ();
+ emit_insn (gen_ls2_falu1_turn_enabled_insn ());
+ mips_ls2.falu1_turn_enabled_insn = get_insns ();
+ end_sequence ();
+
+ start_sequence ();
+ emit_insn (gen_ls2_falu2_turn_enabled_insn ());
+ mips_ls2.falu2_turn_enabled_insn = get_insns ();
+ end_sequence ();
+
+ mips_ls2.alu1_core_unit_code = get_cpu_unit_code ("ls2_alu1_core");
+ mips_ls2.alu2_core_unit_code = get_cpu_unit_code ("ls2_alu2_core");
+ mips_ls2.falu1_core_unit_code = get_cpu_unit_code ("ls2_falu1_core");
+ mips_ls2.falu2_core_unit_code = get_cpu_unit_code ("ls2_falu2_core");
+}
+
+/* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook.
+ Init data used in mips_dfa_post_advance_cycle. */
+
+static void
+mips_init_dfa_post_cycle_insn (void)
+{
+ if (TUNE_LOONGSON_2EF)
+ mips_ls2_init_dfa_post_cycle_insn ();
+}
+
+/* Initialize STATE when scheduling for Loongson 2E/2F.
+ Support round-robin dispatch scheme by enabling only one of
+ ALU1/ALU2 and one of FALU1/FALU2 units for ALU1/2 and FALU1/2 instructions
+ respectively. */
+
+static void
+mips_ls2_dfa_post_advance_cycle (state_t state)
+{
+ if (cpu_unit_reservation_p (state, mips_ls2.alu1_core_unit_code))
+ {
+ /* Though there are no non-pipelined ALU1 insns,
+ we can get an instruction of type 'multi' before reload. */
+ gcc_assert (mips_ls2.cycle_has_multi_p);
+ mips_ls2.alu1_turn_p = false;
+ }
+
+ mips_ls2.cycle_has_multi_p = false;
+
+ if (cpu_unit_reservation_p (state, mips_ls2.alu2_core_unit_code))
+ /* We have a non-pipelined alu instruction in the core,
+ adjust round-robin counter. */
+ mips_ls2.alu1_turn_p = true;
+
+ if (mips_ls2.alu1_turn_p)
+ {
+ if (state_transition (state, mips_ls2.alu1_turn_enabled_insn) >= 0)
+ gcc_unreachable ();
+ }
+ else
+ {
+ if (state_transition (state, mips_ls2.alu2_turn_enabled_insn) >= 0)
+ gcc_unreachable ();
+ }
+
+ if (cpu_unit_reservation_p (state, mips_ls2.falu1_core_unit_code))
+ {
+ /* There are no non-pipelined FALU1 insns. */
+ gcc_unreachable ();
+ mips_ls2.falu1_turn_p = false;
+ }
+
+ if (cpu_unit_reservation_p (state, mips_ls2.falu2_core_unit_code))
+ /* We have a non-pipelined falu instruction in the core,
+ adjust round-robin counter. */
+ mips_ls2.falu1_turn_p = true;
+
+ if (mips_ls2.falu1_turn_p)
+ {
+ if (state_transition (state, mips_ls2.falu1_turn_enabled_insn) >= 0)
+ gcc_unreachable ();
+ }
+ else
+ {
+ if (state_transition (state, mips_ls2.falu2_turn_enabled_insn) >= 0)
+ gcc_unreachable ();
+ }
+}
+
+/* Implement TARGET_SCHED_DFA_POST_ADVANCE_CYCLE.
+ This hook is being called at the start of each cycle. */
+
+static void
+mips_dfa_post_advance_cycle (void)
+{
+ if (TUNE_LOONGSON_2EF)
+ mips_ls2_dfa_post_advance_cycle (curr_state);
+}
+
+/* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
+ be as wide as the scheduling freedom in the DFA. */
+
+static int
+mips_multipass_dfa_lookahead (void)
+{
+ /* Can schedule up to 4 of the 6 function units in any one cycle. */
+ if (TUNE_SB1)
+ return 4;
+
+ if (TUNE_LOONGSON_2EF || TUNE_LOONGSON_3A)
+ return 4;
+
+ if (TUNE_OCTEON)
+ return 2;
+
+ return 0;
+}
+
+/* Remove the instruction at index LOWER from ready queue READY and
+ reinsert it in front of the instruction at index HIGHER. LOWER must
+ be <= HIGHER. */
+
+static void
+mips_promote_ready (rtx *ready, int lower, int higher)
+{
+ rtx new_head;
+ int i;
+
+ new_head = ready[lower];
+ for (i = lower; i < higher; i++)
+ ready[i] = ready[i + 1];
+ ready[i] = new_head;
+}
+
+/* If the priority of the instruction at POS2 in the ready queue READY
+ is within LIMIT units of that of the instruction at POS1, swap the
+ instructions if POS2 is not already less than POS1. */
+
+static void
+mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
+{
+ if (pos1 < pos2
+ && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
+ {
+ rtx temp;
+
+ temp = ready[pos1];
+ ready[pos1] = ready[pos2];
+ ready[pos2] = temp;
+ }
+}
+
+/* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
+ that may clobber hi or lo. */
+static rtx mips_macc_chains_last_hilo;
+
+/* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
+ been scheduled, updating mips_macc_chains_last_hilo appropriately. */
+
+static void
+mips_macc_chains_record (rtx insn)
+{
+ if (get_attr_may_clobber_hilo (insn))
+ mips_macc_chains_last_hilo = insn;
+}
+
+/* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
+ has NREADY elements, looking for a multiply-add or multiply-subtract
+ instruction that is cumulative with mips_macc_chains_last_hilo.
+ If there is one, promote it ahead of anything else that might
+ clobber hi or lo. */
+
+static void
+mips_macc_chains_reorder (rtx *ready, int nready)
+{
+ int i, j;
+
+ if (mips_macc_chains_last_hilo != 0)
+ for (i = nready - 1; i >= 0; i--)
+ if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
+ {
+ for (j = nready - 1; j > i; j--)
+ if (recog_memoized (ready[j]) >= 0
+ && get_attr_may_clobber_hilo (ready[j]))
+ {
+ mips_promote_ready (ready, i, j);
+ break;
+ }
+ break;
+ }
+}
+
+/* The last instruction to be scheduled. */
+static rtx vr4130_last_insn;
+
+/* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
+ points to an rtx that is initially an instruction. Nullify the rtx
+ if the instruction uses the value of register X. */
+
+static void
+vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
+ void *data)
+{
+ rtx *insn_ptr;
+
+ insn_ptr = (rtx *) data;
+ if (REG_P (x)
+ && *insn_ptr != 0
+ && reg_referenced_p (x, PATTERN (*insn_ptr)))
+ *insn_ptr = 0;
+}
+
+/* Return true if there is true register dependence between vr4130_last_insn
+ and INSN. */
+
+static bool
+vr4130_true_reg_dependence_p (rtx insn)
+{
+ note_stores (PATTERN (vr4130_last_insn),
+ vr4130_true_reg_dependence_p_1, &insn);
+ return insn == 0;
+}
+
+/* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
+ the ready queue and that INSN2 is the instruction after it, return
+ true if it is worth promoting INSN2 ahead of INSN1. Look for cases
+ in which INSN1 and INSN2 can probably issue in parallel, but for
+ which (INSN2, INSN1) should be less sensitive to instruction
+ alignment than (INSN1, INSN2). See 4130.md for more details. */
+
+static bool
+vr4130_swap_insns_p (rtx insn1, rtx insn2)
+{
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ /* Check for the following case:
+
+ 1) there is some other instruction X with an anti dependence on INSN1;
+ 2) X has a higher priority than INSN2; and
+ 3) X is an arithmetic instruction (and thus has no unit restrictions).
+
+ If INSN1 is the last instruction blocking X, it would better to
+ choose (INSN1, X) over (INSN2, INSN1). */
+ FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
+ if (DEP_TYPE (dep) == REG_DEP_ANTI
+ && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
+ && recog_memoized (DEP_CON (dep)) >= 0
+ && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
+ return false;
+
+ if (vr4130_last_insn != 0
+ && recog_memoized (insn1) >= 0
+ && recog_memoized (insn2) >= 0)
+ {
+ /* See whether INSN1 and INSN2 use different execution units,
+ or if they are both ALU-type instructions. If so, they can
+ probably execute in parallel. */
+ enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
+ enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
+ if (class1 != class2 || class1 == VR4130_CLASS_ALU)
+ {
+ /* If only one of the instructions has a dependence on
+ vr4130_last_insn, prefer to schedule the other one first. */
+ bool dep1_p = vr4130_true_reg_dependence_p (insn1);
+ bool dep2_p = vr4130_true_reg_dependence_p (insn2);
+ if (dep1_p != dep2_p)
+ return dep1_p;
+
+ /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
+ is not an ALU-type instruction and if INSN1 uses the same
+ execution unit. (Note that if this condition holds, we already
+ know that INSN2 uses a different execution unit.) */
+ if (class1 != VR4130_CLASS_ALU
+ && recog_memoized (vr4130_last_insn) >= 0
+ && class1 == get_attr_vr4130_class (vr4130_last_insn))
+ return true;
+ }
+ }
+ return false;
+}
+
+/* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
+ queue with at least two instructions. Swap the first two if
+ vr4130_swap_insns_p says that it could be worthwhile. */
+
+static void
+vr4130_reorder (rtx *ready, int nready)
+{
+ if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
+ mips_promote_ready (ready, nready - 2, nready - 1);
+}
+
+/* Record whether last 74k AGEN instruction was a load or store. */
+static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
+
+/* Initialize mips_last_74k_agen_insn from INSN. A null argument
+ resets to TYPE_UNKNOWN state. */
+
+static void
+mips_74k_agen_init (rtx insn)
+{
+ if (!insn || CALL_P (insn) || JUMP_P (insn))
+ mips_last_74k_agen_insn = TYPE_UNKNOWN;
+ else
+ {
+ enum attr_type type = get_attr_type (insn);
+ if (type == TYPE_LOAD || type == TYPE_STORE)
+ mips_last_74k_agen_insn = type;
+ }
+}
+
+/* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
+ loads to be grouped together, and multiple stores to be grouped
+ together. Swap things around in the ready queue to make this happen. */
+
+static void
+mips_74k_agen_reorder (rtx *ready, int nready)
+{
+ int i;
+ int store_pos, load_pos;
+
+ store_pos = -1;
+ load_pos = -1;
+
+ for (i = nready - 1; i >= 0; i--)
+ {
+ rtx insn = ready[i];
+ if (USEFUL_INSN_P (insn))
+ switch (get_attr_type (insn))
+ {
+ case TYPE_STORE:
+ if (store_pos == -1)
+ store_pos = i;
+ break;
+
+ case TYPE_LOAD:
+ if (load_pos == -1)
+ load_pos = i;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (load_pos == -1 || store_pos == -1)
+ return;
+
+ switch (mips_last_74k_agen_insn)
+ {
+ case TYPE_UNKNOWN:
+ /* Prefer to schedule loads since they have a higher latency. */
+ case TYPE_LOAD:
+ /* Swap loads to the front of the queue. */
+ mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
+ break;
+ case TYPE_STORE:
+ /* Swap stores to the front of the queue. */
+ mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Implement TARGET_SCHED_INIT. */
+
+static void
+mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
+ int max_ready ATTRIBUTE_UNUSED)
+{
+ mips_macc_chains_last_hilo = 0;
+ vr4130_last_insn = 0;
+ mips_74k_agen_init (NULL_RTX);
+
+ /* When scheduling for Loongson2, branch instructions go to ALU1,
+ therefore basic block is most likely to start with round-robin counter
+ pointed to ALU2. */
+ mips_ls2.alu1_turn_p = false;
+ mips_ls2.falu1_turn_p = true;
+}
+
+/* Subroutine used by TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
+
+static void
+mips_sched_reorder_1 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
+ rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
+{
+ if (!reload_completed
+ && TUNE_MACC_CHAINS
+ && *nreadyp > 0)
+ mips_macc_chains_reorder (ready, *nreadyp);
+
+ if (reload_completed
+ && TUNE_MIPS4130
+ && !TARGET_VR4130_ALIGN
+ && *nreadyp > 1)
+ vr4130_reorder (ready, *nreadyp);
+
+ if (TUNE_74K)
+ mips_74k_agen_reorder (ready, *nreadyp);
+}
+
+/* Implement TARGET_SCHED_REORDER. */
+
+static int
+mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
+ rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
+{
+ mips_sched_reorder_1 (file, verbose, ready, nreadyp, cycle);
+ return mips_issue_rate ();
+}
+
+/* Implement TARGET_SCHED_REORDER2. */
+
+static int
+mips_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
+ rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
+{
+ mips_sched_reorder_1 (file, verbose, ready, nreadyp, cycle);
+ return cached_can_issue_more;
+}
+
+/* Update round-robin counters for ALU1/2 and FALU1/2. */
+
+static void
+mips_ls2_variable_issue (rtx insn)
+{
+ if (mips_ls2.alu1_turn_p)
+ {
+ if (cpu_unit_reservation_p (curr_state, mips_ls2.alu1_core_unit_code))
+ mips_ls2.alu1_turn_p = false;
+ }
+ else
+ {
+ if (cpu_unit_reservation_p (curr_state, mips_ls2.alu2_core_unit_code))
+ mips_ls2.alu1_turn_p = true;
+ }
+
+ if (mips_ls2.falu1_turn_p)
+ {
+ if (cpu_unit_reservation_p (curr_state, mips_ls2.falu1_core_unit_code))
+ mips_ls2.falu1_turn_p = false;
+ }
+ else
+ {
+ if (cpu_unit_reservation_p (curr_state, mips_ls2.falu2_core_unit_code))
+ mips_ls2.falu1_turn_p = true;
+ }
+
+ if (recog_memoized (insn) >= 0)
+ mips_ls2.cycle_has_multi_p |= (get_attr_type (insn) == TYPE_MULTI);
+}
+
+/* Implement TARGET_SCHED_VARIABLE_ISSUE. */
+
+static int
+mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
+ rtx insn, int more)
+{
+ /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
+ if (USEFUL_INSN_P (insn))
+ {
+ if (get_attr_type (insn) != TYPE_GHOST)
+ more--;
+ if (!reload_completed && TUNE_MACC_CHAINS)
+ mips_macc_chains_record (insn);
+ vr4130_last_insn = insn;
+ if (TUNE_74K)
+ mips_74k_agen_init (insn);
+ else if (TUNE_LOONGSON_2EF)
+ mips_ls2_variable_issue (insn);
+ }
+
+ /* Instructions of type 'multi' should all be split before
+ the second scheduling pass. */
+ gcc_assert (!reload_completed
+ || recog_memoized (insn) < 0
+ || get_attr_type (insn) != TYPE_MULTI);
+
+ cached_can_issue_more = more;
+ return more;
+}
+
+/* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
+ return the first operand of the associated PREF or PREFX insn. */
+
+rtx
+mips_prefetch_cookie (rtx write, rtx locality)
+{
+ /* store_streamed / load_streamed. */
+ if (INTVAL (locality) <= 0)
+ return GEN_INT (INTVAL (write) + 4);
+
+ /* store / load. */
+ if (INTVAL (locality) <= 2)
+ return write;
+
+ /* store_retained / load_retained. */
+ return GEN_INT (INTVAL (write) + 6);
+}
+
+/* Flags that indicate when a built-in function is available.
+
+ BUILTIN_AVAIL_NON_MIPS16
+ The function is available on the current target if !TARGET_MIPS16.
+
+ BUILTIN_AVAIL_MIPS16
+ The function is available on the current target if TARGET_MIPS16. */
+#define BUILTIN_AVAIL_NON_MIPS16 1
+#define BUILTIN_AVAIL_MIPS16 2
+
+/* Declare an availability predicate for built-in functions that
+ require non-MIPS16 mode and also require COND to be true.
+ NAME is the main part of the predicate's name. */
+#define AVAIL_NON_MIPS16(NAME, COND) \
+ static unsigned int \
+ mips_builtin_avail_##NAME (void) \
+ { \
+ return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0; \
+ }
+
+/* Declare an availability predicate for built-in functions that
+ support both MIPS16 and non-MIPS16 code and also require COND
+ to be true. NAME is the main part of the predicate's name. */
+#define AVAIL_ALL(NAME, COND) \
+ static unsigned int \
+ mips_builtin_avail_##NAME (void) \
+ { \
+ return (COND) ? BUILTIN_AVAIL_NON_MIPS16 | BUILTIN_AVAIL_MIPS16 : 0; \
+ }
+
+/* This structure describes a single built-in function. */
+struct mips_builtin_description {
+ /* The code of the main .md file instruction. See mips_builtin_type
+ for more information. */
+ enum insn_code icode;
+
+ /* The floating-point comparison code to use with ICODE, if any. */
+ enum mips_fp_condition cond;
+
+ /* The name of the built-in function. */
+ const char *name;
+
+ /* Specifies how the function should be expanded. */
+ enum mips_builtin_type builtin_type;
+
+ /* The function's prototype. */
+ enum mips_function_type function_type;
+
+ /* Whether the function is available. */
+ unsigned int (*avail) (void);
+};
+
+AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI)
+AVAIL_NON_MIPS16 (paired_single, TARGET_PAIRED_SINGLE_FLOAT)
+AVAIL_NON_MIPS16 (sb1_paired_single, TARGET_SB1 && TARGET_PAIRED_SINGLE_FLOAT)
+AVAIL_NON_MIPS16 (mips3d, TARGET_MIPS3D)
+AVAIL_NON_MIPS16 (dsp, TARGET_DSP)
+AVAIL_NON_MIPS16 (dspr2, TARGET_DSPR2)
+AVAIL_NON_MIPS16 (dsp_32, !TARGET_64BIT && TARGET_DSP)
+AVAIL_NON_MIPS16 (dsp_64, TARGET_64BIT && TARGET_DSP)
+AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
+AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_VECTORS)
+AVAIL_NON_MIPS16 (cache, TARGET_CACHE_BUILTIN)
+
+/* Construct a mips_builtin_description from the given arguments.
+
+ INSN is the name of the associated instruction pattern, without the
+ leading CODE_FOR_mips_.
+
+ CODE is the floating-point condition code associated with the
+ function. It can be 'f' if the field is not applicable.
+
+ NAME is the name of the function itself, without the leading
+ "__builtin_mips_".
+
+ BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
+
+ AVAIL is the name of the availability predicate, without the leading
+ mips_builtin_avail_. */
+#define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \
+ FUNCTION_TYPE, AVAIL) \
+ { CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND, \
+ "__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \
+ mips_builtin_avail_ ## AVAIL }
+
+/* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
+ mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE and AVAIL
+ are as for MIPS_BUILTIN. */
+#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
+ MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
+
+/* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
+ are subject to mips_builtin_avail_<AVAIL>. */
+#define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL) \
+ MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s", \
+ MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL), \
+ MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d", \
+ MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
+
+/* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
+ The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
+ while the any and all forms are subject to mips_builtin_avail_mips3d. */
+#define CMP_PS_BUILTINS(INSN, COND, AVAIL) \
+ MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps", \
+ MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, \
+ mips3d), \
+ MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps", \
+ MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, \
+ mips3d), \
+ MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps", \
+ MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, \
+ AVAIL), \
+ MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps", \
+ MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, \
+ AVAIL)
+
+/* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
+ are subject to mips_builtin_avail_mips3d. */
+#define CMP_4S_BUILTINS(INSN, COND) \
+ MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s", \
+ MIPS_BUILTIN_CMP_ANY, \
+ MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d), \
+ MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s", \
+ MIPS_BUILTIN_CMP_ALL, \
+ MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
+
+/* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
+ instruction requires mips_builtin_avail_<AVAIL>. */
+#define MOVTF_BUILTINS(INSN, COND, AVAIL) \
+ MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps", \
+ MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
+ AVAIL), \
+ MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps", \
+ MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
+ AVAIL)
+
+/* Define all the built-in functions related to C.cond.fmt condition COND. */
+#define CMP_BUILTINS(COND) \
+ MOVTF_BUILTINS (c, COND, paired_single), \
+ MOVTF_BUILTINS (cabs, COND, mips3d), \
+ CMP_SCALAR_BUILTINS (cabs, COND, mips3d), \
+ CMP_PS_BUILTINS (c, COND, paired_single), \
+ CMP_PS_BUILTINS (cabs, COND, mips3d), \
+ CMP_4S_BUILTINS (c, COND), \
+ CMP_4S_BUILTINS (cabs, COND)
+
+/* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
+ function mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE
+ and AVAIL are as for MIPS_BUILTIN. */
+#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
+ MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
+ FUNCTION_TYPE, AVAIL)
+
+/* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
+ branch instruction. AVAIL is as for MIPS_BUILTIN. */
+#define BPOSGE_BUILTIN(VALUE, AVAIL) \
+ MIPS_BUILTIN (bposge, f, "bposge" #VALUE, \
+ MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
+
+/* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<FN_NAME>
+ for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
+ builtin_description field. */
+#define LOONGSON_BUILTIN_ALIAS(INSN, FN_NAME, FUNCTION_TYPE) \
+ { CODE_FOR_loongson_ ## INSN, MIPS_FP_COND_f, \
+ "__builtin_loongson_" #FN_NAME, MIPS_BUILTIN_DIRECT, \
+ FUNCTION_TYPE, mips_builtin_avail_loongson }
+
+/* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<INSN>
+ for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
+ builtin_description field. */
+#define LOONGSON_BUILTIN(INSN, FUNCTION_TYPE) \
+ LOONGSON_BUILTIN_ALIAS (INSN, INSN, FUNCTION_TYPE)
+
+/* Like LOONGSON_BUILTIN, but add _<SUFFIX> to the end of the function name.
+ We use functions of this form when the same insn can be usefully applied
+ to more than one datatype. */
+#define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE) \
+ LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
+
+#define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
+#define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
+#define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
+#define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
+#define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
+#define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
+#define CODE_FOR_mips_mult CODE_FOR_mulsidi3_32bit
+#define CODE_FOR_mips_multu CODE_FOR_umulsidi3_32bit
+
+#define CODE_FOR_loongson_packsswh CODE_FOR_vec_pack_ssat_v2si
+#define CODE_FOR_loongson_packsshb CODE_FOR_vec_pack_ssat_v4hi
+#define CODE_FOR_loongson_packushb CODE_FOR_vec_pack_usat_v4hi
+#define CODE_FOR_loongson_paddw CODE_FOR_addv2si3
+#define CODE_FOR_loongson_paddh CODE_FOR_addv4hi3
+#define CODE_FOR_loongson_paddb CODE_FOR_addv8qi3
+#define CODE_FOR_loongson_paddsh CODE_FOR_ssaddv4hi3
+#define CODE_FOR_loongson_paddsb CODE_FOR_ssaddv8qi3
+#define CODE_FOR_loongson_paddush CODE_FOR_usaddv4hi3
+#define CODE_FOR_loongson_paddusb CODE_FOR_usaddv8qi3
+#define CODE_FOR_loongson_pmaxsh CODE_FOR_smaxv4hi3
+#define CODE_FOR_loongson_pmaxub CODE_FOR_umaxv8qi3
+#define CODE_FOR_loongson_pminsh CODE_FOR_sminv4hi3
+#define CODE_FOR_loongson_pminub CODE_FOR_uminv8qi3
+#define CODE_FOR_loongson_pmulhuh CODE_FOR_umulv4hi3_highpart
+#define CODE_FOR_loongson_pmulhh CODE_FOR_smulv4hi3_highpart
+#define CODE_FOR_loongson_pmullh CODE_FOR_mulv4hi3
+#define CODE_FOR_loongson_psllh CODE_FOR_ashlv4hi3
+#define CODE_FOR_loongson_psllw CODE_FOR_ashlv2si3
+#define CODE_FOR_loongson_psrlh CODE_FOR_lshrv4hi3
+#define CODE_FOR_loongson_psrlw CODE_FOR_lshrv2si3
+#define CODE_FOR_loongson_psrah CODE_FOR_ashrv4hi3
+#define CODE_FOR_loongson_psraw CODE_FOR_ashrv2si3
+#define CODE_FOR_loongson_psubw CODE_FOR_subv2si3
+#define CODE_FOR_loongson_psubh CODE_FOR_subv4hi3
+#define CODE_FOR_loongson_psubb CODE_FOR_subv8qi3
+#define CODE_FOR_loongson_psubsh CODE_FOR_sssubv4hi3
+#define CODE_FOR_loongson_psubsb CODE_FOR_sssubv8qi3
+#define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
+#define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
+
+static const struct mips_builtin_description mips_builtins[] = {
+#define MIPS_GET_FCSR 0
+ DIRECT_BUILTIN (get_fcsr, MIPS_USI_FTYPE_VOID, hard_float),
+#define MIPS_SET_FCSR 1
+ DIRECT_NO_TARGET_BUILTIN (set_fcsr, MIPS_VOID_FTYPE_USI, hard_float),
+
+ DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
+ DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
+ DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
+ DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
+ DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, paired_single),
+ DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, paired_single),
+ DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, paired_single),
+ DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, paired_single),
+
+ DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, paired_single),
+ DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
+ DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
+ DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
+ DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, mips3d),
+
+ DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, mips3d),
+ DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, mips3d),
+ DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
+ DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
+ DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
+ DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
+
+ DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, mips3d),
+ DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, mips3d),
+ DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
+ DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
+ DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
+ DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
+
+ MIPS_FP_CONDITIONS (CMP_BUILTINS),
+
+ /* Built-in functions for the SB-1 processor. */
+ DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, sb1_paired_single),
+
+ /* Built-in functions for the DSP ASE (32-bit and 64-bit). */
+ DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
+ DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
+ DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
+ DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
+ DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
+ DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
+ DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
+ DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
+ DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
+ DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
+ DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, dsp),
+ DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, dsp),
+ DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, dsp),
+ DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, dsp),
+ DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, dsp),
+ DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, dsp),
+ DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
+ DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
+ DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
+ DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
+ DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, dsp),
+ DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, dsp),
+ DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
+ DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
+ DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
+ DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
+ DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
+ DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
+ DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
+ DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
+ DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
+ DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
+ DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
+ DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
+ DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
+ DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
+ DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
+ DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, dsp),
+ DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
+ DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
+ DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
+ DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
+ DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
+ DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, dsp),
+ DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, dsp),
+ DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, dsp),
+ DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, dsp),
+ DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
+ DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
+ DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
+ DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
+ DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
+ DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
+ DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
+ DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
+ DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
+ DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
+ DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
+ DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
+ DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, dsp),
+ DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, dsp),
+ DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, dsp),
+ DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, dsp),
+ DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, dsp),
+ BPOSGE_BUILTIN (32, dsp),
+
+ /* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit). */
+ DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, dspr2),
+ DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
+ DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
+ DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
+ DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
+ DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
+ DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
+ DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
+ DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
+ DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
+ DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
+ DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
+ DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, dspr2),
+ DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
+ DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, dspr2),
+ DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dspr2),
+ DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
+ DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
+ DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
+ DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
+ DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
+ DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, dspr2),
+ DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
+ DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
+ DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
+ DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
+ DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
+ DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
+ DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
+ DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
+ DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
+ DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
+ DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
+ DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
+
+ /* Built-in functions for the DSP ASE (32-bit only). */
+ DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
+ DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
+ DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
+ DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
+ DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
+ DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
+ DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
+ DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
+ DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
+ DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
+ DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
+ DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
+ DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
+ DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
+ DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
+ DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
+ DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, dsp_32),
+ DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, dsp_32),
+ DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, dsp_32),
+ DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, dsp_32),
+ DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, dsp_32),
+ DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
+ DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, dsp_32),
+ DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
+ DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, dsp_32),
+ DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, dsp_32),
+ DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, dsp_32),
+
+ /* Built-in functions for the DSP ASE (64-bit only). */
+ DIRECT_BUILTIN (ldx, MIPS_DI_FTYPE_POINTER_SI, dsp_64),
+
+ /* The following are for the MIPS DSP ASE REV 2 (32-bit only). */
+ DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
+ DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
+ DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
+ DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
+ DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
+ DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
+ DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
+ DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
+ DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
+
+ /* Builtin functions for ST Microelectronics Loongson-2E/2F cores. */
+ LOONGSON_BUILTIN (packsswh, MIPS_V4HI_FTYPE_V2SI_V2SI),
+ LOONGSON_BUILTIN (packsshb, MIPS_V8QI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN (packushb, MIPS_UV8QI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN_SUFFIX (paddw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
+ LOONGSON_BUILTIN_SUFFIX (paddh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN_SUFFIX (paddb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
+ LOONGSON_BUILTIN_SUFFIX (paddw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
+ LOONGSON_BUILTIN_SUFFIX (paddh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN_SUFFIX (paddb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
+ LOONGSON_BUILTIN_SUFFIX (paddd, u, MIPS_UDI_FTYPE_UDI_UDI),
+ LOONGSON_BUILTIN_SUFFIX (paddd, s, MIPS_DI_FTYPE_DI_DI),
+ LOONGSON_BUILTIN (paddsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN (paddsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
+ LOONGSON_BUILTIN (paddush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN (paddusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
+ LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_ud, MIPS_UDI_FTYPE_UDI_UDI),
+ LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_uw, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
+ LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_uh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_ub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
+ LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_sd, MIPS_DI_FTYPE_DI_DI),
+ LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_sw, MIPS_V2SI_FTYPE_V2SI_V2SI),
+ LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_sh, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_sb, MIPS_V8QI_FTYPE_V8QI_V8QI),
+ LOONGSON_BUILTIN (pavgh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN (pavgb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
+ LOONGSON_BUILTIN_SUFFIX (pcmpeqw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
+ LOONGSON_BUILTIN_SUFFIX (pcmpeqh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN_SUFFIX (pcmpeqb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
+ LOONGSON_BUILTIN_SUFFIX (pcmpeqw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
+ LOONGSON_BUILTIN_SUFFIX (pcmpeqh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN_SUFFIX (pcmpeqb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
+ LOONGSON_BUILTIN_SUFFIX (pcmpgtw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
+ LOONGSON_BUILTIN_SUFFIX (pcmpgth, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN_SUFFIX (pcmpgtb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
+ LOONGSON_BUILTIN_SUFFIX (pcmpgtw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
+ LOONGSON_BUILTIN_SUFFIX (pcmpgth, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN_SUFFIX (pcmpgtb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
+ LOONGSON_BUILTIN_SUFFIX (pextrh, u, MIPS_UV4HI_FTYPE_UV4HI_USI),
+ LOONGSON_BUILTIN_SUFFIX (pextrh, s, MIPS_V4HI_FTYPE_V4HI_USI),
+ LOONGSON_BUILTIN_SUFFIX (pinsrh_0, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN_SUFFIX (pinsrh_1, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN_SUFFIX (pinsrh_2, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN_SUFFIX (pinsrh_3, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN_SUFFIX (pinsrh_0, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN_SUFFIX (pinsrh_1, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN_SUFFIX (pinsrh_2, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN_SUFFIX (pinsrh_3, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN (pmaddhw, MIPS_V2SI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN (pmaxsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN (pmaxub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
+ LOONGSON_BUILTIN (pminsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN (pminub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
+ LOONGSON_BUILTIN_SUFFIX (pmovmskb, u, MIPS_UV8QI_FTYPE_UV8QI),
+ LOONGSON_BUILTIN_SUFFIX (pmovmskb, s, MIPS_V8QI_FTYPE_V8QI),
+ LOONGSON_BUILTIN (pmulhuh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN (pmulhh, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN (pmullh, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN (pmuluw, MIPS_UDI_FTYPE_UV2SI_UV2SI),
+ LOONGSON_BUILTIN (pasubub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
+ LOONGSON_BUILTIN (biadd, MIPS_UV4HI_FTYPE_UV8QI),
+ LOONGSON_BUILTIN (psadbh, MIPS_UV4HI_FTYPE_UV8QI_UV8QI),
+ LOONGSON_BUILTIN_SUFFIX (pshufh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
+ LOONGSON_BUILTIN_SUFFIX (pshufh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
+ LOONGSON_BUILTIN_SUFFIX (psllh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
+ LOONGSON_BUILTIN_SUFFIX (psllh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
+ LOONGSON_BUILTIN_SUFFIX (psllw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
+ LOONGSON_BUILTIN_SUFFIX (psllw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
+ LOONGSON_BUILTIN_SUFFIX (psrah, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
+ LOONGSON_BUILTIN_SUFFIX (psrah, s, MIPS_V4HI_FTYPE_V4HI_UQI),
+ LOONGSON_BUILTIN_SUFFIX (psraw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
+ LOONGSON_BUILTIN_SUFFIX (psraw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
+ LOONGSON_BUILTIN_SUFFIX (psrlh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
+ LOONGSON_BUILTIN_SUFFIX (psrlh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
+ LOONGSON_BUILTIN_SUFFIX (psrlw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
+ LOONGSON_BUILTIN_SUFFIX (psrlw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
+ LOONGSON_BUILTIN_SUFFIX (psubw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
+ LOONGSON_BUILTIN_SUFFIX (psubh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN_SUFFIX (psubb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
+ LOONGSON_BUILTIN_SUFFIX (psubw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
+ LOONGSON_BUILTIN_SUFFIX (psubh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN_SUFFIX (psubb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
+ LOONGSON_BUILTIN_SUFFIX (psubd, u, MIPS_UDI_FTYPE_UDI_UDI),
+ LOONGSON_BUILTIN_SUFFIX (psubd, s, MIPS_DI_FTYPE_DI_DI),
+ LOONGSON_BUILTIN (psubsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN (psubsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
+ LOONGSON_BUILTIN (psubush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN (psubusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
+ LOONGSON_BUILTIN_SUFFIX (punpckhbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
+ LOONGSON_BUILTIN_SUFFIX (punpckhhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN_SUFFIX (punpckhwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
+ LOONGSON_BUILTIN_SUFFIX (punpckhbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
+ LOONGSON_BUILTIN_SUFFIX (punpckhhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN_SUFFIX (punpckhwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
+ LOONGSON_BUILTIN_SUFFIX (punpcklbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
+ LOONGSON_BUILTIN_SUFFIX (punpcklhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
+ LOONGSON_BUILTIN_SUFFIX (punpcklwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
+ LOONGSON_BUILTIN_SUFFIX (punpcklbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
+ LOONGSON_BUILTIN_SUFFIX (punpcklhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
+ LOONGSON_BUILTIN_SUFFIX (punpcklwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
+
+ /* Sundry other built-in functions. */
+ DIRECT_NO_TARGET_BUILTIN (cache, MIPS_VOID_FTYPE_SI_CVPOINTER, cache)
+};
+
+/* Index I is the function declaration for mips_builtins[I], or null if the
+ function isn't defined on this target. */
+static GTY(()) tree mips_builtin_decls[ARRAY_SIZE (mips_builtins)];
+
+/* MODE is a vector mode whose elements have type TYPE. Return the type
+ of the vector itself. */
+
+static tree
+mips_builtin_vector_type (tree type, enum machine_mode mode)
+{
+ static tree types[2 * (int) MAX_MACHINE_MODE];
+ int mode_index;
+
+ mode_index = (int) mode;
+
+ if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type))
+ mode_index += MAX_MACHINE_MODE;
+
+ if (types[mode_index] == NULL_TREE)
+ types[mode_index] = build_vector_type_for_mode (type, mode);
+ return types[mode_index];
+}
+
+/* Return a type for 'const volatile void *'. */
+
+static tree
+mips_build_cvpointer_type (void)
+{
+ static tree cache;
+
+ if (cache == NULL_TREE)
+ cache = build_pointer_type (build_qualified_type
+ (void_type_node,
+ TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE));
+ return cache;
+}
+
+/* Source-level argument types. */
+#define MIPS_ATYPE_VOID void_type_node
+#define MIPS_ATYPE_INT integer_type_node
+#define MIPS_ATYPE_POINTER ptr_type_node
+#define MIPS_ATYPE_CVPOINTER mips_build_cvpointer_type ()
+
+/* Standard mode-based argument types. */
+#define MIPS_ATYPE_UQI unsigned_intQI_type_node
+#define MIPS_ATYPE_SI intSI_type_node
+#define MIPS_ATYPE_USI unsigned_intSI_type_node
+#define MIPS_ATYPE_DI intDI_type_node
+#define MIPS_ATYPE_UDI unsigned_intDI_type_node
+#define MIPS_ATYPE_SF float_type_node
+#define MIPS_ATYPE_DF double_type_node
+
+/* Vector argument types. */
+#define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
+#define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
+#define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode)
+#define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
+#define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
+#define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
+#define MIPS_ATYPE_UV2SI \
+ mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
+#define MIPS_ATYPE_UV4HI \
+ mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
+#define MIPS_ATYPE_UV8QI \
+ mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
+
+/* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
+ their associated MIPS_ATYPEs. */
+#define MIPS_FTYPE_ATYPES1(A, B) \
+ MIPS_ATYPE_##A, MIPS_ATYPE_##B
+
+#define MIPS_FTYPE_ATYPES2(A, B, C) \
+ MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
+
+#define MIPS_FTYPE_ATYPES3(A, B, C, D) \
+ MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
+
+#define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
+ MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
+ MIPS_ATYPE_##E
+
+/* Return the function type associated with function prototype TYPE. */
+
+static tree
+mips_build_function_type (enum mips_function_type type)
+{
+ static tree types[(int) MIPS_MAX_FTYPE_MAX];
+
+ if (types[(int) type] == NULL_TREE)
+ switch (type)
+ {
+#define DEF_MIPS_FTYPE(NUM, ARGS) \
+ case MIPS_FTYPE_NAME##NUM ARGS: \
+ types[(int) type] \
+ = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
+ NULL_TREE); \
+ break;
+#include "config/mips/mips-ftypes.def"
+#undef DEF_MIPS_FTYPE
+ default:
+ gcc_unreachable ();
+ }
+
+ return types[(int) type];
+}
+
+/* Implement TARGET_INIT_BUILTINS. */
+
+static void
+mips_init_builtins (void)
+{
+ const struct mips_builtin_description *d;
+ unsigned int i;
+
+ /* Iterate through all of the bdesc arrays, initializing all of the
+ builtin functions. */
+ for (i = 0; i < ARRAY_SIZE (mips_builtins); i++)
+ {
+ d = &mips_builtins[i];
+ if (d->avail ())
+ mips_builtin_decls[i]
+ = add_builtin_function (d->name,
+ mips_build_function_type (d->function_type),
+ i, BUILT_IN_MD, NULL, NULL);
+ }
+}
+
+/* Implement TARGET_BUILTIN_DECL. */
+
+static tree
+mips_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
+{
+ if (code >= ARRAY_SIZE (mips_builtins))
+ return error_mark_node;
+ return mips_builtin_decls[code];
+}
+
+/* Take argument ARGNO from EXP's argument list and convert it into
+ an expand operand. Store the operand in *OP. */
+
+static void
+mips_prepare_builtin_arg (struct expand_operand *op, tree exp,
+ unsigned int argno)
+{
+ tree arg;
+ rtx value;
+
+ arg = CALL_EXPR_ARG (exp, argno);
+ value = expand_normal (arg);
+ create_input_operand (op, value, TYPE_MODE (TREE_TYPE (arg)));
+}
+
+/* Expand instruction ICODE as part of a built-in function sequence.
+ Use the first NOPS elements of OPS as the instruction's operands.
+ HAS_TARGET_P is true if operand 0 is a target; it is false if the
+ instruction has no target.
+
+ Return the target rtx if HAS_TARGET_P, otherwise return const0_rtx. */
+
+static rtx
+mips_expand_builtin_insn (enum insn_code icode, unsigned int nops,
+ struct expand_operand *ops, bool has_target_p)
+{
+ if (!maybe_expand_insn (icode, nops, ops))
+ {
+ error ("invalid argument to built-in function");
+ return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx;
+ }
+ return has_target_p ? ops[0].value : const0_rtx;
+}
+
+/* Expand a floating-point comparison for built-in function call EXP.
+ The first NARGS arguments are the values to be compared. ICODE is
+ the .md pattern that does the comparison and COND is the condition
+ that is being tested. Return an rtx for the result. */
+
+static rtx
+mips_expand_builtin_compare_1 (enum insn_code icode,
+ enum mips_fp_condition cond,
+ tree exp, int nargs)
+{
+ struct expand_operand ops[MAX_RECOG_OPERANDS];
+ rtx output;
+ int opno, argno;
+
+ /* The instruction should have a target operand, an operand for each
+ argument, and an operand for COND. */
+ gcc_assert (nargs + 2 == insn_data[(int) icode].n_generator_args);
+
+ output = mips_allocate_fcc (insn_data[(int) icode].operand[0].mode);
+ opno = 0;
+ create_fixed_operand (&ops[opno++], output);
+ for (argno = 0; argno < nargs; argno++)
+ mips_prepare_builtin_arg (&ops[opno++], exp, argno);
+ create_integer_operand (&ops[opno++], (int) cond);
+ return mips_expand_builtin_insn (icode, opno, ops, true);
+}
+
+/* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
+ HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
+ and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
+ suggests a good place to put the result. */
+
+static rtx
+mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
+ bool has_target_p)
+{
+ struct expand_operand ops[MAX_RECOG_OPERANDS];
+ int opno, argno;
+
+ /* Map any target to operand 0. */
+ opno = 0;
+ if (has_target_p)
+ create_output_operand (&ops[opno++], target, TYPE_MODE (TREE_TYPE (exp)));
+
+ /* Map the arguments to the other operands. */
+ gcc_assert (opno + call_expr_nargs (exp)
+ == insn_data[icode].n_generator_args);
+ for (argno = 0; argno < call_expr_nargs (exp); argno++)
+ mips_prepare_builtin_arg (&ops[opno++], exp, argno);
+
+ return mips_expand_builtin_insn (icode, opno, ops, has_target_p);
+}
+
+/* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
+ function; TYPE says which. EXP is the CALL_EXPR that calls the
+ function, ICODE is the instruction that should be used to compare
+ the first two arguments, and COND is the condition it should test.
+ TARGET, if nonnull, suggests a good place to put the result. */
+
+static rtx
+mips_expand_builtin_movtf (enum mips_builtin_type type,
+ enum insn_code icode, enum mips_fp_condition cond,
+ rtx target, tree exp)
+{
+ struct expand_operand ops[4];
+ rtx cmp_result;
+
+ cmp_result = mips_expand_builtin_compare_1 (icode, cond, exp, 2);
+ create_output_operand (&ops[0], target, TYPE_MODE (TREE_TYPE (exp)));
+ if (type == MIPS_BUILTIN_MOVT)
+ {
+ mips_prepare_builtin_arg (&ops[2], exp, 2);
+ mips_prepare_builtin_arg (&ops[1], exp, 3);
+ }
+ else
+ {
+ mips_prepare_builtin_arg (&ops[1], exp, 2);
+ mips_prepare_builtin_arg (&ops[2], exp, 3);
+ }
+ create_fixed_operand (&ops[3], cmp_result);
+ return mips_expand_builtin_insn (CODE_FOR_mips_cond_move_tf_ps,
+ 4, ops, true);
+}
+
+/* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
+ into TARGET otherwise. Return TARGET. */
+
+static rtx
+mips_builtin_branch_and_move (rtx condition, rtx target,
+ rtx value_if_true, rtx value_if_false)
+{
+ rtx true_label, done_label;
+
+ true_label = gen_label_rtx ();
+ done_label = gen_label_rtx ();
+
+ /* First assume that CONDITION is false. */
+ mips_emit_move (target, value_if_false);
+
+ /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
+ emit_jump_insn (gen_condjump (condition, true_label));
+ emit_jump_insn (gen_jump (done_label));
+ emit_barrier ();
+
+ /* Fix TARGET if CONDITION is true. */
+ emit_label (true_label);
+ mips_emit_move (target, value_if_true);
+
+ emit_label (done_label);
+ return target;
+}
+
+/* Expand a comparison built-in function of type BUILTIN_TYPE. EXP is
+ the CALL_EXPR that calls the function, ICODE is the code of the
+ comparison instruction, and COND is the condition it should test.
+ TARGET, if nonnull, suggests a good place to put the boolean result. */
+
+static rtx
+mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
+ enum insn_code icode, enum mips_fp_condition cond,
+ rtx target, tree exp)
+{
+ rtx offset, condition, cmp_result;
+
+ if (target == 0 || GET_MODE (target) != SImode)
+ target = gen_reg_rtx (SImode);
+ cmp_result = mips_expand_builtin_compare_1 (icode, cond, exp,
+ call_expr_nargs (exp));
+
+ /* If the comparison sets more than one register, we define the result
+ to be 0 if all registers are false and -1 if all registers are true.
+ The value of the complete result is indeterminate otherwise. */
+ switch (builtin_type)
+ {
+ case MIPS_BUILTIN_CMP_ALL:
+ condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
+ return mips_builtin_branch_and_move (condition, target,
+ const0_rtx, const1_rtx);
+
+ case MIPS_BUILTIN_CMP_UPPER:
+ case MIPS_BUILTIN_CMP_LOWER:
+ offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
+ condition = gen_single_cc (cmp_result, offset);
+ return mips_builtin_branch_and_move (condition, target,
+ const1_rtx, const0_rtx);
+
+ default:
+ condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
+ return mips_builtin_branch_and_move (condition, target,
+ const1_rtx, const0_rtx);
+ }
+}
+
+/* Expand a bposge built-in function of type BUILTIN_TYPE. TARGET,
+ if nonnull, suggests a good place to put the boolean result. */
+
+static rtx
+mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
+{
+ rtx condition, cmp_result;
+ int cmp_value;
+
+ if (target == 0 || GET_MODE (target) != SImode)
+ target = gen_reg_rtx (SImode);
+
+ cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
+
+ if (builtin_type == MIPS_BUILTIN_BPOSGE32)
+ cmp_value = 32;
+ else
+ gcc_assert (0);
+
+ condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
+ return mips_builtin_branch_and_move (condition, target,
+ const1_rtx, const0_rtx);
+}
+
+/* Implement TARGET_EXPAND_BUILTIN. */
+
+static rtx
+mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode, int ignore)
+{
+ tree fndecl;
+ unsigned int fcode, avail;
+ const struct mips_builtin_description *d;
+
+ fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ fcode = DECL_FUNCTION_CODE (fndecl);
+ gcc_assert (fcode < ARRAY_SIZE (mips_builtins));
+ d = &mips_builtins[fcode];
+ avail = d->avail ();
+ gcc_assert (avail != 0);
+ if (TARGET_MIPS16 && !(avail & BUILTIN_AVAIL_MIPS16))
+ {
+ error ("built-in function %qE not supported for MIPS16",
+ DECL_NAME (fndecl));
+ return ignore ? const0_rtx : CONST0_RTX (mode);
+ }
+ switch (d->builtin_type)
+ {
+ case MIPS_BUILTIN_DIRECT:
+ return mips_expand_builtin_direct (d->icode, target, exp, true);
+
+ case MIPS_BUILTIN_DIRECT_NO_TARGET:
+ return mips_expand_builtin_direct (d->icode, target, exp, false);
+
+ case MIPS_BUILTIN_MOVT:
+ case MIPS_BUILTIN_MOVF:
+ return mips_expand_builtin_movtf (d->builtin_type, d->icode,
+ d->cond, target, exp);
+
+ case MIPS_BUILTIN_CMP_ANY:
+ case MIPS_BUILTIN_CMP_ALL:
+ case MIPS_BUILTIN_CMP_UPPER:
+ case MIPS_BUILTIN_CMP_LOWER:
+ case MIPS_BUILTIN_CMP_SINGLE:
+ return mips_expand_builtin_compare (d->builtin_type, d->icode,
+ d->cond, target, exp);
+
+ case MIPS_BUILTIN_BPOSGE32:
+ return mips_expand_builtin_bposge (d->builtin_type, target);
+ }
+ gcc_unreachable ();
+}
+
+/* An entry in the MIPS16 constant pool. VALUE is the pool constant,
+ MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
+struct mips16_constant {
+ struct mips16_constant *next;
+ rtx value;
+ rtx label;
+ enum machine_mode mode;
+};
+
+/* Information about an incomplete MIPS16 constant pool. FIRST is the
+ first constant, HIGHEST_ADDRESS is the highest address that the first
+ byte of the pool can have, and INSN_ADDRESS is the current instruction
+ address. */
+struct mips16_constant_pool {
+ struct mips16_constant *first;
+ int highest_address;
+ int insn_address;
+};
+
+/* Add constant VALUE to POOL and return its label. MODE is the
+ value's mode (used for CONST_INTs, etc.). */
+
+static rtx
+mips16_add_constant (struct mips16_constant_pool *pool,
+ rtx value, enum machine_mode mode)
+{
+ struct mips16_constant **p, *c;
+ bool first_of_size_p;
+
+ /* See whether the constant is already in the pool. If so, return the
+ existing label, otherwise leave P pointing to the place where the
+ constant should be added.
+
+ Keep the pool sorted in increasing order of mode size so that we can
+ reduce the number of alignments needed. */
+ first_of_size_p = true;
+ for (p = &pool->first; *p != 0; p = &(*p)->next)
+ {
+ if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
+ return (*p)->label;
+ if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
+ break;
+ if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
+ first_of_size_p = false;
+ }
+
+ /* In the worst case, the constant needed by the earliest instruction
+ will end up at the end of the pool. The entire pool must then be
+ accessible from that instruction.
+
+ When adding the first constant, set the pool's highest address to
+ the address of the first out-of-range byte. Adjust this address
+ downwards each time a new constant is added. */
+ if (pool->first == 0)
+ /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
+ of the instruction with the lowest two bits clear. The base PC
+ value for LDPC has the lowest three bits clear. Assume the worst
+ case here; namely that the PC-relative instruction occupies the
+ last 2 bytes in an aligned word. */
+ pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
+ pool->highest_address -= GET_MODE_SIZE (mode);
+ if (first_of_size_p)
+ /* Take into account the worst possible padding due to alignment. */
+ pool->highest_address -= GET_MODE_SIZE (mode) - 1;
+
+ /* Create a new entry. */
+ c = XNEW (struct mips16_constant);
+ c->value = value;
+ c->mode = mode;
+ c->label = gen_label_rtx ();
+ c->next = *p;
+ *p = c;
+
+ return c->label;
+}
+
+/* Output constant VALUE after instruction INSN and return the last
+ instruction emitted. MODE is the mode of the constant. */
+
+static rtx
+mips16_emit_constants_1 (enum machine_mode mode, rtx value, rtx insn)
+{
+ if (SCALAR_INT_MODE_P (mode) || ALL_SCALAR_FIXED_POINT_MODE_P (mode))
+ {
+ rtx size = GEN_INT (GET_MODE_SIZE (mode));
+ return emit_insn_after (gen_consttable_int (value, size), insn);
+ }
+
+ if (SCALAR_FLOAT_MODE_P (mode))
+ return emit_insn_after (gen_consttable_float (value), insn);
+
+ if (VECTOR_MODE_P (mode))
+ {
+ int i;
+
+ for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
+ insn = mips16_emit_constants_1 (GET_MODE_INNER (mode),
+ CONST_VECTOR_ELT (value, i), insn);
+ return insn;
+ }
+
+ gcc_unreachable ();
+}
+
+/* Dump out the constants in CONSTANTS after INSN. */
+
+static void
+mips16_emit_constants (struct mips16_constant *constants, rtx insn)
+{
+ struct mips16_constant *c, *next;
+ int align;
+
+ align = 0;
+ for (c = constants; c != NULL; c = next)
+ {
+ /* If necessary, increase the alignment of PC. */
+ if (align < GET_MODE_SIZE (c->mode))
+ {
+ int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
+ insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
+ }
+ align = GET_MODE_SIZE (c->mode);
+
+ insn = emit_label_after (c->label, insn);
+ insn = mips16_emit_constants_1 (c->mode, c->value, insn);
+
+ next = c->next;
+ free (c);
+ }
+
+ emit_barrier_after (insn);
+}
+
+/* Return the length of instruction INSN. */
+
+static int
+mips16_insn_length (rtx insn)
+{
+ if (JUMP_TABLE_DATA_P (insn))
+ {
+ rtx body = PATTERN (insn);
+ if (GET_CODE (body) == ADDR_VEC)
+ return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
+ else if (GET_CODE (body) == ADDR_DIFF_VEC)
+ return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
+ else
+ gcc_unreachable ();
+ }
+ return get_attr_length (insn);
+}
+
+/* If *X is a symbolic constant that refers to the constant pool, add
+ the constant to POOL and rewrite *X to use the constant's label. */
+
+static void
+mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
+{
+ rtx base, offset, label;
+
+ split_const (*x, &base, &offset);
+ if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
+ {
+ label = mips16_add_constant (pool, copy_rtx (get_pool_constant (base)),
+ get_pool_mode (base));
+ base = gen_rtx_LABEL_REF (Pmode, label);
+ *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
+ }
+}
+
+/* This structure is used to communicate with mips16_rewrite_pool_refs.
+ INSN is the instruction we're rewriting and POOL points to the current
+ constant pool. */
+struct mips16_rewrite_pool_refs_info {
+ rtx insn;
+ struct mips16_constant_pool *pool;
+};
+
+/* Rewrite *X so that constant pool references refer to the constant's
+ label instead. DATA points to a mips16_rewrite_pool_refs_info
+ structure. */
+
+static int
+mips16_rewrite_pool_refs (rtx *x, void *data)
+{
+ struct mips16_rewrite_pool_refs_info *info =
+ (struct mips16_rewrite_pool_refs_info *) data;
+
+ if (force_to_mem_operand (*x, Pmode))
+ {
+ rtx mem = force_const_mem (GET_MODE (*x), *x);
+ validate_change (info->insn, x, mem, false);
+ }
+
+ if (MEM_P (*x))
+ {
+ mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
+ return -1;
+ }
+
+ /* Don't rewrite the __mips16_rdwr symbol. */
+ if (GET_CODE (*x) == UNSPEC && XINT (*x, 1) == UNSPEC_TLS_GET_TP)
+ return -1;
+
+ if (TARGET_MIPS16_TEXT_LOADS)
+ mips16_rewrite_pool_constant (info->pool, x);
+
+ return GET_CODE (*x) == CONST ? -1 : 0;
+}
+
+/* Return whether CFG is used in mips_reorg. */
+
+static bool
+mips_cfg_in_reorg (void)
+{
+ return (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE
+ || TARGET_RELAX_PIC_CALLS);
+}
+
+/* Build MIPS16 constant pools. Split the instructions if SPLIT_P,
+ otherwise assume that they are already split. */
+
+static void
+mips16_lay_out_constants (bool split_p)
+{
+ struct mips16_constant_pool pool;
+ struct mips16_rewrite_pool_refs_info info;
+ rtx insn, barrier;
+
+ if (!TARGET_MIPS16_PCREL_LOADS)
+ return;
+
+ if (split_p)
+ {
+ if (mips_cfg_in_reorg ())
+ split_all_insns ();
+ else
+ split_all_insns_noflow ();
+ }
+ barrier = 0;
+ memset (&pool, 0, sizeof (pool));
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ /* Rewrite constant pool references in INSN. */
+ if (USEFUL_INSN_P (insn))
+ {
+ info.insn = insn;
+ info.pool = &pool;
+ for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
+ }
+
+ pool.insn_address += mips16_insn_length (insn);
+
+ if (pool.first != NULL)
+ {
+ /* If there are no natural barriers between the first user of
+ the pool and the highest acceptable address, we'll need to
+ create a new instruction to jump around the constant pool.
+ In the worst case, this instruction will be 4 bytes long.
+
+ If it's too late to do this transformation after INSN,
+ do it immediately before INSN. */
+ if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
+ {
+ rtx label, jump;
+
+ label = gen_label_rtx ();
+
+ jump = emit_jump_insn_before (gen_jump (label), insn);
+ JUMP_LABEL (jump) = label;
+ LABEL_NUSES (label) = 1;
+ barrier = emit_barrier_after (jump);
+
+ emit_label_after (label, barrier);
+ pool.insn_address += 4;
+ }
+
+ /* See whether the constant pool is now out of range of the first
+ user. If so, output the constants after the previous barrier.
+ Note that any instructions between BARRIER and INSN (inclusive)
+ will use negative offsets to refer to the pool. */
+ if (pool.insn_address > pool.highest_address)
+ {
+ mips16_emit_constants (pool.first, barrier);
+ pool.first = NULL;
+ barrier = 0;
+ }
+ else if (BARRIER_P (insn))
+ barrier = insn;
+ }
+ }
+ mips16_emit_constants (pool.first, get_last_insn ());
+}
+
+/* Return true if it is worth r10k_simplify_address's while replacing
+ an address with X. We are looking for constants, and for addresses
+ at a known offset from the incoming stack pointer. */
+
+static bool
+r10k_simplified_address_p (rtx x)
+{
+ if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
+ x = XEXP (x, 0);
+ return x == virtual_incoming_args_rtx || CONSTANT_P (x);
+}
+
+/* X is an expression that appears in INSN. Try to use the UD chains
+ to simplify it, returning the simplified form on success and the
+ original form otherwise. Replace the incoming value of $sp with
+ virtual_incoming_args_rtx (which should never occur in X otherwise). */
+
+static rtx
+r10k_simplify_address (rtx x, rtx insn)
+{
+ rtx newx, op0, op1, set, def_insn, note;
+ df_ref use, def;
+ struct df_link *defs;
+
+ newx = NULL_RTX;
+ if (UNARY_P (x))
+ {
+ op0 = r10k_simplify_address (XEXP (x, 0), insn);
+ if (op0 != XEXP (x, 0))
+ newx = simplify_gen_unary (GET_CODE (x), GET_MODE (x),
+ op0, GET_MODE (XEXP (x, 0)));
+ }
+ else if (BINARY_P (x))
+ {
+ op0 = r10k_simplify_address (XEXP (x, 0), insn);
+ op1 = r10k_simplify_address (XEXP (x, 1), insn);
+ if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
+ newx = simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
+ }
+ else if (GET_CODE (x) == LO_SUM)
+ {
+ /* LO_SUMs can be offset from HIGHs, if we know they won't
+ overflow. See mips_classify_address for the rationale behind
+ the lax check. */
+ op0 = r10k_simplify_address (XEXP (x, 0), insn);
+ if (GET_CODE (op0) == HIGH)
+ newx = XEXP (x, 1);
+ }
+ else if (REG_P (x))
+ {
+ /* Uses are recorded by regno_reg_rtx, not X itself. */
+ use = df_find_use (insn, regno_reg_rtx[REGNO (x)]);
+ gcc_assert (use);
+ defs = DF_REF_CHAIN (use);
+
+ /* Require a single definition. */
+ if (defs && defs->next == NULL)
+ {
+ def = defs->ref;
+ if (DF_REF_IS_ARTIFICIAL (def))
+ {
+ /* Replace the incoming value of $sp with
+ virtual_incoming_args_rtx. */
+ if (x == stack_pointer_rtx
+ && DF_REF_BB (def) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ newx = virtual_incoming_args_rtx;
+ }
+ else if (dominated_by_p (CDI_DOMINATORS, DF_REF_BB (use),
+ DF_REF_BB (def)))
+ {
+ /* Make sure that DEF_INSN is a single set of REG. */
+ def_insn = DF_REF_INSN (def);
+ if (NONJUMP_INSN_P (def_insn))
+ {
+ set = single_set (def_insn);
+ if (set && rtx_equal_p (SET_DEST (set), x))
+ {
+ /* Prefer to use notes, since the def-use chains
+ are often shorter. */
+ note = find_reg_equal_equiv_note (def_insn);
+ if (note)
+ newx = XEXP (note, 0);
+ else
+ newx = SET_SRC (set);
+ newx = r10k_simplify_address (newx, def_insn);
+ }
+ }
+ }
+ }
+ }
+ if (newx && r10k_simplified_address_p (newx))
+ return newx;
+ return x;
+}
+
+/* Return true if ADDRESS is known to be an uncached address
+ on R10K systems. */
+
+static bool
+r10k_uncached_address_p (unsigned HOST_WIDE_INT address)
+{
+ unsigned HOST_WIDE_INT upper;
+
+ /* Check for KSEG1. */
+ if (address + 0x60000000 < 0x20000000)
+ return true;
+
+ /* Check for uncached XKPHYS addresses. */
+ if (Pmode == DImode)
+ {
+ upper = (address >> 40) & 0xf9ffff;
+ if (upper == 0x900000 || upper == 0xb80000)
+ return true;
+ }
+ return false;
+}
+
+/* Return true if we can prove that an access to address X in instruction
+ INSN would be safe from R10K speculation. This X is a general
+ expression; it might not be a legitimate address. */
+
+static bool
+r10k_safe_address_p (rtx x, rtx insn)
+{
+ rtx base, offset;
+ HOST_WIDE_INT offset_val;
+
+ x = r10k_simplify_address (x, insn);
+
+ /* Check for references to the stack frame. It doesn't really matter
+ how much of the frame has been allocated at INSN; -mr10k-cache-barrier
+ allows us to assume that accesses to any part of the eventual frame
+ is safe from speculation at any point in the function. */
+ mips_split_plus (x, &base, &offset_val);
+ if (base == virtual_incoming_args_rtx
+ && offset_val >= -cfun->machine->frame.total_size
+ && offset_val < cfun->machine->frame.args_size)
+ return true;
+
+ /* Check for uncached addresses. */
+ if (CONST_INT_P (x))
+ return r10k_uncached_address_p (INTVAL (x));
+
+ /* Check for accesses to a static object. */
+ split_const (x, &base, &offset);
+ return offset_within_block_p (base, INTVAL (offset));
+}
+
+/* Return true if a MEM with MEM_EXPR EXPR and MEM_OFFSET OFFSET is
+ an in-range access to an automatic variable, or to an object with
+ a link-time-constant address. */
+
+static bool
+r10k_safe_mem_expr_p (tree expr, unsigned HOST_WIDE_INT offset)
+{
+ HOST_WIDE_INT bitoffset, bitsize;
+ tree inner, var_offset;
+ enum machine_mode mode;
+ int unsigned_p, volatile_p;
+
+ inner = get_inner_reference (expr, &bitsize, &bitoffset, &var_offset, &mode,
+ &unsigned_p, &volatile_p, false);
+ if (!DECL_P (inner) || !DECL_SIZE_UNIT (inner) || var_offset)
+ return false;
+
+ offset += bitoffset / BITS_PER_UNIT;
+ return offset < tree_to_uhwi (DECL_SIZE_UNIT (inner));
+}
+
+/* A for_each_rtx callback for which DATA points to the instruction
+ containing *X. Stop the search if we find a MEM that is not safe
+ from R10K speculation. */
+
+static int
+r10k_needs_protection_p_1 (rtx *loc, void *data)
+{
+ rtx mem;
+
+ mem = *loc;
+ if (!MEM_P (mem))
+ return 0;
+
+ if (MEM_EXPR (mem)
+ && MEM_OFFSET_KNOWN_P (mem)
+ && r10k_safe_mem_expr_p (MEM_EXPR (mem), MEM_OFFSET (mem)))
+ return -1;
+
+ if (r10k_safe_address_p (XEXP (mem, 0), (rtx) data))
+ return -1;
+
+ return 1;
+}
+
+/* A note_stores callback for which DATA points to an instruction pointer.
+ If *DATA is nonnull, make it null if it X contains a MEM that is not
+ safe from R10K speculation. */
+
+static void
+r10k_needs_protection_p_store (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
+ void *data)
+{
+ rtx *insn_ptr;
+
+ insn_ptr = (rtx *) data;
+ if (*insn_ptr && for_each_rtx (&x, r10k_needs_protection_p_1, *insn_ptr))
+ *insn_ptr = NULL_RTX;
+}
+
+/* A for_each_rtx callback that iterates over the pattern of a CALL_INSN.
+ Return nonzero if the call is not to a declared function. */
+
+static int
+r10k_needs_protection_p_call (rtx *loc, void *data ATTRIBUTE_UNUSED)
+{
+ rtx x;
+
+ x = *loc;
+ if (!MEM_P (x))
+ return 0;
+
+ x = XEXP (x, 0);
+ if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DECL (x))
+ return -1;
+
+ return 1;
+}
+
+/* Return true if instruction INSN needs to be protected by an R10K
+ cache barrier. */
+
+static bool
+r10k_needs_protection_p (rtx insn)
+{
+ if (CALL_P (insn))
+ return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_call, NULL);
+
+ if (mips_r10k_cache_barrier == R10K_CACHE_BARRIER_STORE)
+ {
+ note_stores (PATTERN (insn), r10k_needs_protection_p_store, &insn);
+ return insn == NULL_RTX;
+ }
+
+ return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_1, insn);
+}
+
+/* Return true if BB is only reached by blocks in PROTECTED_BBS and if every
+ edge is unconditional. */
+
+static bool
+r10k_protected_bb_p (basic_block bb, sbitmap protected_bbs)
+{
+ edge_iterator ei;
+ edge e;
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ if (!single_succ_p (e->src)
+ || !bitmap_bit_p (protected_bbs, e->src->index)
+ || (e->flags & EDGE_COMPLEX) != 0)
+ return false;
+ return true;
+}
+
+/* Implement -mr10k-cache-barrier= for the current function. */
+
+static void
+r10k_insert_cache_barriers (void)
+{
+ int *rev_post_order;
+ unsigned int i, n;
+ basic_block bb;
+ sbitmap protected_bbs;
+ rtx insn, end, unprotected_region;
+
+ if (TARGET_MIPS16)
+ {
+ sorry ("%qs does not support MIPS16 code", "-mr10k-cache-barrier");
+ return;
+ }
+
+ /* Calculate dominators. */
+ calculate_dominance_info (CDI_DOMINATORS);
+
+ /* Bit X of PROTECTED_BBS is set if the last operation in basic block
+ X is protected by a cache barrier. */
+ protected_bbs = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ bitmap_clear (protected_bbs);
+
+ /* Iterate over the basic blocks in reverse post-order. */
+ rev_post_order = XNEWVEC (int, last_basic_block_for_fn (cfun));
+ n = pre_and_rev_post_order_compute (NULL, rev_post_order, false);
+ for (i = 0; i < n; i++)
+ {
+ bb = BASIC_BLOCK_FOR_FN (cfun, rev_post_order[i]);
+
+ /* If this block is only reached by unconditional edges, and if the
+ source of every edge is protected, the beginning of the block is
+ also protected. */
+ if (r10k_protected_bb_p (bb, protected_bbs))
+ unprotected_region = NULL_RTX;
+ else
+ unprotected_region = pc_rtx;
+ end = NEXT_INSN (BB_END (bb));
+
+ /* UNPROTECTED_REGION is:
+
+ - null if we are processing a protected region,
+ - pc_rtx if we are processing an unprotected region but have
+ not yet found the first instruction in it
+ - the first instruction in an unprotected region otherwise. */
+ for (insn = BB_HEAD (bb); insn != end; insn = NEXT_INSN (insn))
+ {
+ if (unprotected_region && USEFUL_INSN_P (insn))
+ {
+ if (recog_memoized (insn) == CODE_FOR_mips_cache)
+ /* This CACHE instruction protects the following code. */
+ unprotected_region = NULL_RTX;
+ else
+ {
+ /* See if INSN is the first instruction in this
+ unprotected region. */
+ if (unprotected_region == pc_rtx)
+ unprotected_region = insn;
+
+ /* See if INSN needs to be protected. If so,
+ we must insert a cache barrier somewhere between
+ PREV_INSN (UNPROTECTED_REGION) and INSN. It isn't
+ clear which position is better performance-wise,
+ but as a tie-breaker, we assume that it is better
+ to allow delay slots to be back-filled where
+ possible, and that it is better not to insert
+ barriers in the middle of already-scheduled code.
+ We therefore insert the barrier at the beginning
+ of the region. */
+ if (r10k_needs_protection_p (insn))
+ {
+ emit_insn_before (gen_r10k_cache_barrier (),
+ unprotected_region);
+ unprotected_region = NULL_RTX;
+ }
+ }
+ }
+
+ if (CALL_P (insn))
+ /* The called function is not required to protect the exit path.
+ The code that follows a call is therefore unprotected. */
+ unprotected_region = pc_rtx;
+ }
+
+ /* Record whether the end of this block is protected. */
+ if (unprotected_region == NULL_RTX)
+ bitmap_set_bit (protected_bbs, bb->index);
+ }
+ XDELETEVEC (rev_post_order);
+
+ sbitmap_free (protected_bbs);
+
+ free_dominance_info (CDI_DOMINATORS);
+}
+
+/* If INSN is a call, return the underlying CALL expr. Return NULL_RTX
+ otherwise. If INSN has two call rtx, then store the second one in
+ SECOND_CALL. */
+
+static rtx
+mips_call_expr_from_insn (rtx insn, rtx *second_call)
+{
+ rtx x;
+ rtx x2;
+
+ if (!CALL_P (insn))
+ return NULL_RTX;
+
+ x = PATTERN (insn);
+ if (GET_CODE (x) == PARALLEL)
+ {
+ /* Calls returning complex values have two CALL rtx. Look for the second
+ one here, and return it via the SECOND_CALL arg. */
+ x2 = XVECEXP (x, 0, 1);
+ if (GET_CODE (x2) == SET)
+ x2 = XEXP (x2, 1);
+ if (GET_CODE (x2) == CALL)
+ *second_call = x2;
+
+ x = XVECEXP (x, 0, 0);
+ }
+ if (GET_CODE (x) == SET)
+ x = XEXP (x, 1);
+ gcc_assert (GET_CODE (x) == CALL);
+
+ return x;
+}
+
+/* REG is set in DEF. See if the definition is one of the ways we load a
+ register with a symbol address for a mips_use_pic_fn_addr_reg_p call.
+ If it is, return the symbol reference of the function, otherwise return
+ NULL_RTX.
+
+ If RECURSE_P is true, use mips_find_pic_call_symbol to interpret
+ the values of source registers, otherwise treat such registers as
+ having an unknown value. */
+
+static rtx
+mips_pic_call_symbol_from_set (df_ref def, rtx reg, bool recurse_p)
+{
+ rtx def_insn, set;
+
+ if (DF_REF_IS_ARTIFICIAL (def))
+ return NULL_RTX;
+
+ def_insn = DF_REF_INSN (def);
+ set = single_set (def_insn);
+ if (set && rtx_equal_p (SET_DEST (set), reg))
+ {
+ rtx note, src, symbol;
+
+ /* First see whether the source is a plain symbol. This is used
+ when calling symbols that are not lazily bound. */
+ src = SET_SRC (set);
+ if (GET_CODE (src) == SYMBOL_REF)
+ return src;
+
+ /* Handle %call16 references. */
+ symbol = mips_strip_unspec_call (src);
+ if (symbol)
+ {
+ gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
+ return symbol;
+ }
+
+ /* If we have something more complicated, look for a
+ REG_EQUAL or REG_EQUIV note. */
+ note = find_reg_equal_equiv_note (def_insn);
+ if (note && GET_CODE (XEXP (note, 0)) == SYMBOL_REF)
+ return XEXP (note, 0);
+
+ /* Follow at most one simple register copy. Such copies are
+ interesting in cases like:
+
+ for (...)
+ {
+ locally_binding_fn (...);
+ }
+
+ and:
+
+ locally_binding_fn (...);
+ ...
+ locally_binding_fn (...);
+
+ where the load of locally_binding_fn can legitimately be
+ hoisted or shared. However, we do not expect to see complex
+ chains of copies, so a full worklist solution to the problem
+ would probably be overkill. */
+ if (recurse_p && REG_P (src))
+ return mips_find_pic_call_symbol (def_insn, src, false);
+ }
+
+ return NULL_RTX;
+}
+
+/* Find the definition of the use of REG in INSN. See if the definition
+ is one of the ways we load a register with a symbol address for a
+ mips_use_pic_fn_addr_reg_p call. If it is return the symbol reference
+ of the function, otherwise return NULL_RTX. RECURSE_P is as for
+ mips_pic_call_symbol_from_set. */
+
+static rtx
+mips_find_pic_call_symbol (rtx insn, rtx reg, bool recurse_p)
+{
+ df_ref use;
+ struct df_link *defs;
+ rtx symbol;
+
+ use = df_find_use (insn, regno_reg_rtx[REGNO (reg)]);
+ if (!use)
+ return NULL_RTX;
+ defs = DF_REF_CHAIN (use);
+ if (!defs)
+ return NULL_RTX;
+ symbol = mips_pic_call_symbol_from_set (defs->ref, reg, recurse_p);
+ if (!symbol)
+ return NULL_RTX;
+
+ /* If we have more than one definition, they need to be identical. */
+ for (defs = defs->next; defs; defs = defs->next)
+ {
+ rtx other;
+
+ other = mips_pic_call_symbol_from_set (defs->ref, reg, recurse_p);
+ if (!rtx_equal_p (symbol, other))
+ return NULL_RTX;
+ }
+
+ return symbol;
+}
+
+/* Replace the args_size operand of the call expression CALL with the
+ call-attribute UNSPEC and fill in SYMBOL as the function symbol. */
+
+static void
+mips_annotate_pic_call_expr (rtx call, rtx symbol)
+{
+ rtx args_size;
+
+ args_size = XEXP (call, 1);
+ XEXP (call, 1) = gen_rtx_UNSPEC (GET_MODE (args_size),
+ gen_rtvec (2, args_size, symbol),
+ UNSPEC_CALL_ATTR);
+}
+
+/* OPERANDS[ARGS_SIZE_OPNO] is the arg_size operand of a CALL expression. See
+ if instead of the arg_size argument it contains the call attributes. If
+ yes return true along with setting OPERANDS[ARGS_SIZE_OPNO] to the function
+ symbol from the call attributes. Also return false if ARGS_SIZE_OPNO is
+ -1. */
+
+bool
+mips_get_pic_call_symbol (rtx *operands, int args_size_opno)
+{
+ rtx args_size, symbol;
+
+ if (!TARGET_RELAX_PIC_CALLS || args_size_opno == -1)
+ return false;
+
+ args_size = operands[args_size_opno];
+ if (GET_CODE (args_size) != UNSPEC)
+ return false;
+ gcc_assert (XINT (args_size, 1) == UNSPEC_CALL_ATTR);
+
+ symbol = XVECEXP (args_size, 0, 1);
+ gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
+
+ operands[args_size_opno] = symbol;
+ return true;
+}
+
+/* Use DF to annotate PIC indirect calls with the function symbol they
+ dispatch to. */
+
+static void
+mips_annotate_pic_calls (void)
+{
+ basic_block bb;
+ rtx insn;
+
+ FOR_EACH_BB_FN (bb, cfun)
+ FOR_BB_INSNS (bb, insn)
+ {
+ rtx call, reg, symbol, second_call;
+
+ second_call = 0;
+ call = mips_call_expr_from_insn (insn, &second_call);
+ if (!call)
+ continue;
+ gcc_assert (MEM_P (XEXP (call, 0)));
+ reg = XEXP (XEXP (call, 0), 0);
+ if (!REG_P (reg))
+ continue;
+
+ symbol = mips_find_pic_call_symbol (insn, reg, true);
+ if (symbol)
+ {
+ mips_annotate_pic_call_expr (call, symbol);
+ if (second_call)
+ mips_annotate_pic_call_expr (second_call, symbol);
+ }
+ }
+}
+
+/* A temporary variable used by for_each_rtx callbacks, etc. */
+static rtx mips_sim_insn;
+
+/* A structure representing the state of the processor pipeline.
+ Used by the mips_sim_* family of functions. */
+struct mips_sim {
+ /* The maximum number of instructions that can be issued in a cycle.
+ (Caches mips_issue_rate.) */
+ unsigned int issue_rate;
+
+ /* The current simulation time. */
+ unsigned int time;
+
+ /* How many more instructions can be issued in the current cycle. */
+ unsigned int insns_left;
+
+ /* LAST_SET[X].INSN is the last instruction to set register X.
+ LAST_SET[X].TIME is the time at which that instruction was issued.
+ INSN is null if no instruction has yet set register X. */
+ struct {
+ rtx insn;
+ unsigned int time;
+ } last_set[FIRST_PSEUDO_REGISTER];
+
+ /* The pipeline's current DFA state. */
+ state_t dfa_state;
+};
+
+/* Reset STATE to the initial simulation state. */
+
+static void
+mips_sim_reset (struct mips_sim *state)
+{
+ curr_state = state->dfa_state;
+
+ state->time = 0;
+ state->insns_left = state->issue_rate;
+ memset (&state->last_set, 0, sizeof (state->last_set));
+ state_reset (curr_state);
+
+ targetm.sched.init (0, false, 0);
+ advance_state (curr_state);
+}
+
+/* Initialize STATE before its first use. DFA_STATE points to an
+ allocated but uninitialized DFA state. */
+
+static void
+mips_sim_init (struct mips_sim *state, state_t dfa_state)
+{
+ if (targetm.sched.init_dfa_pre_cycle_insn)
+ targetm.sched.init_dfa_pre_cycle_insn ();
+
+ if (targetm.sched.init_dfa_post_cycle_insn)
+ targetm.sched.init_dfa_post_cycle_insn ();
+
+ state->issue_rate = mips_issue_rate ();
+ state->dfa_state = dfa_state;
+ mips_sim_reset (state);
+}
+
+/* Advance STATE by one clock cycle. */
+
+static void
+mips_sim_next_cycle (struct mips_sim *state)
+{
+ curr_state = state->dfa_state;
+
+ state->time++;
+ state->insns_left = state->issue_rate;
+ advance_state (curr_state);
+}
+
+/* Advance simulation state STATE until instruction INSN can read
+ register REG. */
+
+static void
+mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
+{
+ unsigned int regno, end_regno;
+
+ end_regno = END_REGNO (reg);
+ for (regno = REGNO (reg); regno < end_regno; regno++)
+ if (state->last_set[regno].insn != 0)
+ {
+ unsigned int t;
+
+ t = (state->last_set[regno].time
+ + insn_latency (state->last_set[regno].insn, insn));
+ while (state->time < t)
+ mips_sim_next_cycle (state);
+ }
+}
+
+/* A for_each_rtx callback. If *X is a register, advance simulation state
+ DATA until mips_sim_insn can read the register's value. */
+
+static int
+mips_sim_wait_regs_2 (rtx *x, void *data)
+{
+ if (REG_P (*x))
+ mips_sim_wait_reg ((struct mips_sim *) data, mips_sim_insn, *x);
+ return 0;
+}
+
+/* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
+
+static void
+mips_sim_wait_regs_1 (rtx *x, void *data)
+{
+ for_each_rtx (x, mips_sim_wait_regs_2, data);
+}
+
+/* Advance simulation state STATE until all of INSN's register
+ dependencies are satisfied. */
+
+static void
+mips_sim_wait_regs (struct mips_sim *state, rtx insn)
+{
+ mips_sim_insn = insn;
+ note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
+}
+
+/* Advance simulation state STATE until the units required by
+ instruction INSN are available. */
+
+static void
+mips_sim_wait_units (struct mips_sim *state, rtx insn)
+{
+ state_t tmp_state;
+
+ tmp_state = alloca (state_size ());
+ while (state->insns_left == 0
+ || (memcpy (tmp_state, state->dfa_state, state_size ()),
+ state_transition (tmp_state, insn) >= 0))
+ mips_sim_next_cycle (state);
+}
+
+/* Advance simulation state STATE until INSN is ready to issue. */
+
+static void
+mips_sim_wait_insn (struct mips_sim *state, rtx insn)
+{
+ mips_sim_wait_regs (state, insn);
+ mips_sim_wait_units (state, insn);
+}
+
+/* mips_sim_insn has just set X. Update the LAST_SET array
+ in simulation state DATA. */
+
+static void
+mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
+{
+ struct mips_sim *state;
+
+ state = (struct mips_sim *) data;
+ if (REG_P (x))
+ {
+ unsigned int regno, end_regno;
+
+ end_regno = END_REGNO (x);
+ for (regno = REGNO (x); regno < end_regno; regno++)
+ {
+ state->last_set[regno].insn = mips_sim_insn;
+ state->last_set[regno].time = state->time;
+ }
+ }
+}
+
+/* Issue instruction INSN in scheduler state STATE. Assume that INSN
+ can issue immediately (i.e., that mips_sim_wait_insn has already
+ been called). */
+
+static void
+mips_sim_issue_insn (struct mips_sim *state, rtx insn)
+{
+ curr_state = state->dfa_state;
+
+ state_transition (curr_state, insn);
+ state->insns_left = targetm.sched.variable_issue (0, false, insn,
+ state->insns_left);
+
+ mips_sim_insn = insn;
+ note_stores (PATTERN (insn), mips_sim_record_set, state);
+}
+
+/* Simulate issuing a NOP in state STATE. */
+
+static void
+mips_sim_issue_nop (struct mips_sim *state)
+{
+ if (state->insns_left == 0)
+ mips_sim_next_cycle (state);
+ state->insns_left--;
+}
+
+/* Update simulation state STATE so that it's ready to accept the instruction
+ after INSN. INSN should be part of the main rtl chain, not a member of a
+ SEQUENCE. */
+
+static void
+mips_sim_finish_insn (struct mips_sim *state, rtx insn)
+{
+ /* If INSN is a jump with an implicit delay slot, simulate a nop. */
+ if (JUMP_P (insn))
+ mips_sim_issue_nop (state);
+
+ switch (GET_CODE (SEQ_BEGIN (insn)))
+ {
+ case CODE_LABEL:
+ case CALL_INSN:
+ /* We can't predict the processor state after a call or label. */
+ mips_sim_reset (state);
+ break;
+
+ case JUMP_INSN:
+ /* The delay slots of branch likely instructions are only executed
+ when the branch is taken. Therefore, if the caller has simulated
+ the delay slot instruction, STATE does not really reflect the state
+ of the pipeline for the instruction after the delay slot. Also,
+ branch likely instructions tend to incur a penalty when not taken,
+ so there will probably be an extra delay between the branch and
+ the instruction after the delay slot. */
+ if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
+ mips_sim_reset (state);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Use simulator state STATE to calculate the execution time of
+ instruction sequence SEQ. */
+
+static unsigned int
+mips_seq_time (struct mips_sim *state, rtx seq)
+{
+ mips_sim_reset (state);
+ for (rtx insn = seq; insn; insn = NEXT_INSN (insn))
+ {
+ mips_sim_wait_insn (state, insn);
+ mips_sim_issue_insn (state, insn);
+ }
+ return state->time;
+}
+
+/* Return the execution-time cost of mips_tuning_info.fast_mult_zero_zero_p
+ setting SETTING, using STATE to simulate instruction sequences. */
+
+static unsigned int
+mips_mult_zero_zero_cost (struct mips_sim *state, bool setting)
+{
+ mips_tuning_info.fast_mult_zero_zero_p = setting;
+ start_sequence ();
+
+ enum machine_mode dword_mode = TARGET_64BIT ? TImode : DImode;
+ rtx hilo = gen_rtx_REG (dword_mode, MD_REG_FIRST);
+ mips_emit_move_or_split (hilo, const0_rtx, SPLIT_FOR_SPEED);
+
+ /* If the target provides mulsidi3_32bit then that's the most likely
+ consumer of the result. Test for bypasses. */
+ if (dword_mode == DImode && HAVE_maddsidi4)
+ {
+ rtx gpr = gen_rtx_REG (SImode, GP_REG_FIRST + 4);
+ emit_insn (gen_maddsidi4 (hilo, gpr, gpr, hilo));
+ }
+
+ unsigned int time = mips_seq_time (state, get_insns ());
+ end_sequence ();
+ return time;
+}
+
+/* Check the relative speeds of "MULT $0,$0" and "MTLO $0; MTHI $0"
+ and set up mips_tuning_info.fast_mult_zero_zero_p accordingly.
+ Prefer MULT -- which is shorter -- in the event of a tie. */
+
+static void
+mips_set_fast_mult_zero_zero_p (struct mips_sim *state)
+{
+ if (TARGET_MIPS16)
+ /* No MTLO or MTHI available. */
+ mips_tuning_info.fast_mult_zero_zero_p = true;
+ else
+ {
+ unsigned int true_time = mips_mult_zero_zero_cost (state, true);
+ unsigned int false_time = mips_mult_zero_zero_cost (state, false);
+ mips_tuning_info.fast_mult_zero_zero_p = (true_time <= false_time);
+ }
+}
+
+/* Set up costs based on the current architecture and tuning settings. */
+
+static void
+mips_set_tuning_info (void)
+{
+ if (mips_tuning_info.initialized_p
+ && mips_tuning_info.arch == mips_arch
+ && mips_tuning_info.tune == mips_tune
+ && mips_tuning_info.mips16_p == TARGET_MIPS16)
+ return;
+
+ mips_tuning_info.arch = mips_arch;
+ mips_tuning_info.tune = mips_tune;
+ mips_tuning_info.mips16_p = TARGET_MIPS16;
+ mips_tuning_info.initialized_p = true;
+
+ dfa_start ();
+
+ struct mips_sim state;
+ mips_sim_init (&state, alloca (state_size ()));
+
+ mips_set_fast_mult_zero_zero_p (&state);
+
+ dfa_finish ();
+}
+
+/* Implement TARGET_EXPAND_TO_RTL_HOOK. */
+
+static void
+mips_expand_to_rtl_hook (void)
+{
+ /* We need to call this at a point where we can safely create sequences
+ of instructions, so TARGET_OVERRIDE_OPTIONS is too early. We also
+ need to call it at a point where the DFA infrastructure is not
+ already in use, so we can't just call it lazily on demand.
+
+ At present, mips_tuning_info is only needed during post-expand
+ RTL passes such as split_insns, so this hook should be early enough.
+ We may need to move the call elsewhere if mips_tuning_info starts
+ to be used for other things (such as rtx_costs, or expanders that
+ could be called during gimple optimization). */
+ mips_set_tuning_info ();
+}
+
+/* The VR4130 pipeline issues aligned pairs of instructions together,
+ but it stalls the second instruction if it depends on the first.
+ In order to cut down the amount of logic required, this dependence
+ check is not based on a full instruction decode. Instead, any non-SPECIAL
+ instruction is assumed to modify the register specified by bits 20-16
+ (which is usually the "rt" field).
+
+ In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
+ input, so we can end up with a false dependence between the branch
+ and its delay slot. If this situation occurs in instruction INSN,
+ try to avoid it by swapping rs and rt. */
+
+static void
+vr4130_avoid_branch_rt_conflict (rtx insn)
+{
+ rtx first, second;
+
+ first = SEQ_BEGIN (insn);
+ second = SEQ_END (insn);
+ if (JUMP_P (first)
+ && NONJUMP_INSN_P (second)
+ && GET_CODE (PATTERN (first)) == SET
+ && GET_CODE (SET_DEST (PATTERN (first))) == PC
+ && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
+ {
+ /* Check for the right kind of condition. */
+ rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
+ if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
+ && REG_P (XEXP (cond, 0))
+ && REG_P (XEXP (cond, 1))
+ && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
+ && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
+ {
+ /* SECOND mentions the rt register but not the rs register. */
+ rtx tmp = XEXP (cond, 0);
+ XEXP (cond, 0) = XEXP (cond, 1);
+ XEXP (cond, 1) = tmp;
+ }
+ }
+}
+
+/* Implement -mvr4130-align. Go through each basic block and simulate the
+ processor pipeline. If we find that a pair of instructions could execute
+ in parallel, and the first of those instructions is not 8-byte aligned,
+ insert a nop to make it aligned. */
+
+static void
+vr4130_align_insns (void)
+{
+ struct mips_sim state;
+ rtx insn, subinsn, last, last2, next;
+ bool aligned_p;
+
+ dfa_start ();
+
+ /* LAST is the last instruction before INSN to have a nonzero length.
+ LAST2 is the last such instruction before LAST. */
+ last = 0;
+ last2 = 0;
+
+ /* ALIGNED_P is true if INSN is known to be at an aligned address. */
+ aligned_p = true;
+
+ mips_sim_init (&state, alloca (state_size ()));
+ for (insn = get_insns (); insn != 0; insn = next)
+ {
+ unsigned int length;
+
+ next = NEXT_INSN (insn);
+
+ /* See the comment above vr4130_avoid_branch_rt_conflict for details.
+ This isn't really related to the alignment pass, but we do it on
+ the fly to avoid a separate instruction walk. */
+ vr4130_avoid_branch_rt_conflict (insn);
+
+ length = get_attr_length (insn);
+ if (length > 0 && USEFUL_INSN_P (insn))
+ FOR_EACH_SUBINSN (subinsn, insn)
+ {
+ mips_sim_wait_insn (&state, subinsn);
+
+ /* If we want this instruction to issue in parallel with the
+ previous one, make sure that the previous instruction is
+ aligned. There are several reasons why this isn't worthwhile
+ when the second instruction is a call:
+
+ - Calls are less likely to be performance critical,
+ - There's a good chance that the delay slot can execute
+ in parallel with the call.
+ - The return address would then be unaligned.
+
+ In general, if we're going to insert a nop between instructions
+ X and Y, it's better to insert it immediately after X. That
+ way, if the nop makes Y aligned, it will also align any labels
+ between X and Y. */
+ if (state.insns_left != state.issue_rate
+ && !CALL_P (subinsn))
+ {
+ if (subinsn == SEQ_BEGIN (insn) && aligned_p)
+ {
+ /* SUBINSN is the first instruction in INSN and INSN is
+ aligned. We want to align the previous instruction
+ instead, so insert a nop between LAST2 and LAST.
+
+ Note that LAST could be either a single instruction
+ or a branch with a delay slot. In the latter case,
+ LAST, like INSN, is already aligned, but the delay
+ slot must have some extra delay that stops it from
+ issuing at the same time as the branch. We therefore
+ insert a nop before the branch in order to align its
+ delay slot. */
+ gcc_assert (last2);
+ emit_insn_after (gen_nop (), last2);
+ aligned_p = false;
+ }
+ else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
+ {
+ /* SUBINSN is the delay slot of INSN, but INSN is
+ currently unaligned. Insert a nop between
+ LAST and INSN to align it. */
+ gcc_assert (last);
+ emit_insn_after (gen_nop (), last);
+ aligned_p = true;
+ }
+ }
+ mips_sim_issue_insn (&state, subinsn);
+ }
+ mips_sim_finish_insn (&state, insn);
+
+ /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
+ length = get_attr_length (insn);
+ if (length > 0)
+ {
+ /* If the instruction is an asm statement or multi-instruction
+ mips.md patern, the length is only an estimate. Insert an
+ 8 byte alignment after it so that the following instructions
+ can be handled correctly. */
+ if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
+ && (recog_memoized (insn) < 0 || length >= 8))
+ {
+ next = emit_insn_after (gen_align (GEN_INT (3)), insn);
+ next = NEXT_INSN (next);
+ mips_sim_next_cycle (&state);
+ aligned_p = true;
+ }
+ else if (length & 4)
+ aligned_p = !aligned_p;
+ last2 = last;
+ last = insn;
+ }
+
+ /* See whether INSN is an aligned label. */
+ if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
+ aligned_p = true;
+ }
+ dfa_finish ();
+}
+
+/* This structure records that the current function has a LO_SUM
+ involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
+ the largest offset applied to BASE by all such LO_SUMs. */
+struct mips_lo_sum_offset {
+ rtx base;
+ HOST_WIDE_INT offset;
+};
+
+/* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
+
+static hashval_t
+mips_hash_base (rtx base)
+{
+ int do_not_record_p;
+
+ return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false);
+}
+
+/* Hashtable helpers. */
+
+struct mips_lo_sum_offset_hasher : typed_free_remove <mips_lo_sum_offset>
+{
+ typedef mips_lo_sum_offset value_type;
+ typedef rtx_def compare_type;
+ static inline hashval_t hash (const value_type *);
+ static inline bool equal (const value_type *, const compare_type *);
+};
+
+/* Hash-table callbacks for mips_lo_sum_offsets. */
+
+inline hashval_t
+mips_lo_sum_offset_hasher::hash (const value_type *entry)
+{
+ return mips_hash_base (entry->base);
+}
+
+inline bool
+mips_lo_sum_offset_hasher::equal (const value_type *entry,
+ const compare_type *value)
+{
+ return rtx_equal_p (entry->base, value);
+}
+
+typedef hash_table <mips_lo_sum_offset_hasher> mips_offset_table;
+
+/* Look up symbolic constant X in HTAB, which is a hash table of
+ mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
+ paired with a recorded LO_SUM, otherwise record X in the table. */
+
+static bool
+mips_lo_sum_offset_lookup (mips_offset_table htab, rtx x,
+ enum insert_option option)
+{
+ rtx base, offset;
+ mips_lo_sum_offset **slot;
+ struct mips_lo_sum_offset *entry;
+
+ /* Split X into a base and offset. */
+ split_const (x, &base, &offset);
+ if (UNSPEC_ADDRESS_P (base))
+ base = UNSPEC_ADDRESS (base);
+
+ /* Look up the base in the hash table. */
+ slot = htab.find_slot_with_hash (base, mips_hash_base (base), option);
+ if (slot == NULL)
+ return false;
+
+ entry = (struct mips_lo_sum_offset *) *slot;
+ if (option == INSERT)
+ {
+ if (entry == NULL)
+ {
+ entry = XNEW (struct mips_lo_sum_offset);
+ entry->base = base;
+ entry->offset = INTVAL (offset);
+ *slot = entry;
+ }
+ else
+ {
+ if (INTVAL (offset) > entry->offset)
+ entry->offset = INTVAL (offset);
+ }
+ }
+ return INTVAL (offset) <= entry->offset;
+}
+
+/* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
+ Record every LO_SUM in *LOC. */
+
+static int
+mips_record_lo_sum (rtx *loc, void *data)
+{
+ if (GET_CODE (*loc) == LO_SUM)
+ mips_lo_sum_offset_lookup (*(mips_offset_table*) data,
+ XEXP (*loc, 1), INSERT);
+ return 0;
+}
+
+/* Return true if INSN is a SET of an orphaned high-part relocation.
+ HTAB is a hash table of mips_lo_sum_offsets that describes all the
+ LO_SUMs in the current function. */
+
+static bool
+mips_orphaned_high_part_p (mips_offset_table htab, rtx insn)
+{
+ enum mips_symbol_type type;
+ rtx x, set;
+
+ set = single_set (insn);
+ if (set)
+ {
+ /* Check for %his. */
+ x = SET_SRC (set);
+ if (GET_CODE (x) == HIGH
+ && absolute_symbolic_operand (XEXP (x, 0), VOIDmode))
+ return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT);
+
+ /* Check for local %gots (and %got_pages, which is redundant but OK). */
+ if (GET_CODE (x) == UNSPEC
+ && XINT (x, 1) == UNSPEC_LOAD_GOT
+ && mips_symbolic_constant_p (XVECEXP (x, 0, 1),
+ SYMBOL_CONTEXT_LEA, &type)
+ && type == SYMBOL_GOTOFF_PAGE)
+ return !mips_lo_sum_offset_lookup (htab, XVECEXP (x, 0, 1), NO_INSERT);
+ }
+ return false;
+}
+
+/* Subroutine of mips_reorg_process_insns. If there is a hazard between
+ INSN and a previous instruction, avoid it by inserting nops after
+ instruction AFTER.
+
+ *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
+ this point. If *DELAYED_REG is non-null, INSN must wait a cycle
+ before using the value of that register. *HILO_DELAY counts the
+ number of instructions since the last hilo hazard (that is,
+ the number of instructions since the last MFLO or MFHI).
+
+ After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
+ for the next instruction.
+
+ LO_REG is an rtx for the LO register, used in dependence checking. */
+
+static void
+mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
+ rtx *delayed_reg, rtx lo_reg)
+{
+ rtx pattern, set;
+ int nops, ninsns;
+
+ pattern = PATTERN (insn);
+
+ /* Do not put the whole function in .set noreorder if it contains
+ an asm statement. We don't know whether there will be hazards
+ between the asm statement and the gcc-generated code. */
+ if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
+ cfun->machine->all_noreorder_p = false;
+
+ /* Ignore zero-length instructions (barriers and the like). */
+ ninsns = get_attr_length (insn) / 4;
+ if (ninsns == 0)
+ return;
+
+ /* Work out how many nops are needed. Note that we only care about
+ registers that are explicitly mentioned in the instruction's pattern.
+ It doesn't matter that calls use the argument registers or that they
+ clobber hi and lo. */
+ if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
+ nops = 2 - *hilo_delay;
+ else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
+ nops = 1;
+ else
+ nops = 0;
+
+ /* Insert the nops between this instruction and the previous one.
+ Each new nop takes us further from the last hilo hazard. */
+ *hilo_delay += nops;
+ while (nops-- > 0)
+ emit_insn_after (gen_hazard_nop (), after);
+
+ /* Set up the state for the next instruction. */
+ *hilo_delay += ninsns;
+ *delayed_reg = 0;
+ if (INSN_CODE (insn) >= 0)
+ switch (get_attr_hazard (insn))
+ {
+ case HAZARD_NONE:
+ break;
+
+ case HAZARD_HILO:
+ *hilo_delay = 0;
+ break;
+
+ case HAZARD_DELAY:
+ set = single_set (insn);
+ gcc_assert (set);
+ *delayed_reg = SET_DEST (set);
+ break;
+ }
+}
+
+/* Go through the instruction stream and insert nops where necessary.
+ Also delete any high-part relocations whose partnering low parts
+ are now all dead. See if the whole function can then be put into
+ .set noreorder and .set nomacro. */
+
+static void
+mips_reorg_process_insns (void)
+{
+ rtx insn, last_insn, subinsn, next_insn, lo_reg, delayed_reg;
+ int hilo_delay;
+ mips_offset_table htab;
+
+ /* Force all instructions to be split into their final form. */
+ split_all_insns_noflow ();
+
+ /* Recalculate instruction lengths without taking nops into account. */
+ cfun->machine->ignore_hazard_length_p = true;
+ shorten_branches (get_insns ());
+
+ cfun->machine->all_noreorder_p = true;
+
+ /* We don't track MIPS16 PC-relative offsets closely enough to make
+ a good job of "set .noreorder" code in MIPS16 mode. */
+ if (TARGET_MIPS16)
+ cfun->machine->all_noreorder_p = false;
+
+ /* Code that doesn't use explicit relocs can't be ".set nomacro". */
+ if (!TARGET_EXPLICIT_RELOCS)
+ cfun->machine->all_noreorder_p = false;
+
+ /* Profiled functions can't be all noreorder because the profiler
+ support uses assembler macros. */
+ if (crtl->profile)
+ cfun->machine->all_noreorder_p = false;
+
+ /* Code compiled with -mfix-vr4120, -mfix-rm7000 or -mfix-24k can't be
+ all noreorder because we rely on the assembler to work around some
+ errata. The R5900 too has several bugs. */
+ if (TARGET_FIX_VR4120
+ || TARGET_FIX_RM7000
+ || TARGET_FIX_24K
+ || TARGET_MIPS5900)
+ cfun->machine->all_noreorder_p = false;
+
+ /* The same is true for -mfix-vr4130 if we might generate MFLO or
+ MFHI instructions. Note that we avoid using MFLO and MFHI if
+ the VR4130 MACC and DMACC instructions are available instead;
+ see the *mfhilo_{si,di}_macc patterns. */
+ if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
+ cfun->machine->all_noreorder_p = false;
+
+ htab.create (37);
+
+ /* Make a first pass over the instructions, recording all the LO_SUMs. */
+ for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
+ FOR_EACH_SUBINSN (subinsn, insn)
+ if (USEFUL_INSN_P (subinsn))
+ {
+ rtx body = PATTERN (insn);
+ int noperands = asm_noperands (body);
+ if (noperands >= 0)
+ {
+ rtx *ops = XALLOCAVEC (rtx, noperands);
+ bool *used = XALLOCAVEC (bool, noperands);
+ const char *string = decode_asm_operands (body, ops, NULL, NULL,
+ NULL, NULL);
+ get_referenced_operands (string, used, noperands);
+ for (int i = 0; i < noperands; ++i)
+ if (used[i])
+ for_each_rtx (&ops[i], mips_record_lo_sum, &htab);
+ }
+ else
+ for_each_rtx (&PATTERN (subinsn), mips_record_lo_sum, &htab);
+ }
+
+ last_insn = 0;
+ hilo_delay = 2;
+ delayed_reg = 0;
+ lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
+
+ /* Make a second pass over the instructions. Delete orphaned
+ high-part relocations or turn them into NOPs. Avoid hazards
+ by inserting NOPs. */
+ for (insn = get_insns (); insn != 0; insn = next_insn)
+ {
+ next_insn = NEXT_INSN (insn);
+ if (USEFUL_INSN_P (insn))
+ {
+ if (GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ /* If we find an orphaned high-part relocation in a delay
+ slot, it's easier to turn that instruction into a NOP than
+ to delete it. The delay slot will be a NOP either way. */
+ FOR_EACH_SUBINSN (subinsn, insn)
+ if (INSN_P (subinsn))
+ {
+ if (mips_orphaned_high_part_p (htab, subinsn))
+ {
+ PATTERN (subinsn) = gen_nop ();
+ INSN_CODE (subinsn) = CODE_FOR_nop;
+ }
+ mips_avoid_hazard (last_insn, subinsn, &hilo_delay,
+ &delayed_reg, lo_reg);
+ }
+ last_insn = insn;
+ }
+ else
+ {
+ /* INSN is a single instruction. Delete it if it's an
+ orphaned high-part relocation. */
+ if (mips_orphaned_high_part_p (htab, insn))
+ delete_insn (insn);
+ /* Also delete cache barriers if the last instruction
+ was an annulled branch. INSN will not be speculatively
+ executed. */
+ else if (recog_memoized (insn) == CODE_FOR_r10k_cache_barrier
+ && last_insn
+ && JUMP_P (SEQ_BEGIN (last_insn))
+ && INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (last_insn)))
+ delete_insn (insn);
+ else
+ {
+ mips_avoid_hazard (last_insn, insn, &hilo_delay,
+ &delayed_reg, lo_reg);
+ last_insn = insn;
+ }
+ }
+ }
+ }
+
+ htab.dispose ();
+}
+
+/* Return true if the function has a long branch instruction. */
+
+static bool
+mips_has_long_branch_p (void)
+{
+ rtx insn, subinsn;
+ int normal_length;
+
+ /* We need up-to-date instruction lengths. */
+ shorten_branches (get_insns ());
+
+ /* Look for a branch that is longer than normal. The normal length for
+ non-MIPS16 branches is 8, because the length includes the delay slot.
+ It is 4 for MIPS16, because MIPS16 branches are extended instructions,
+ but they have no delay slot. */
+ normal_length = (TARGET_MIPS16 ? 4 : 8);
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ FOR_EACH_SUBINSN (subinsn, insn)
+ if (JUMP_P (subinsn)
+ && get_attr_length (subinsn) > normal_length
+ && (any_condjump_p (subinsn) || any_uncondjump_p (subinsn)))
+ return true;
+
+ return false;
+}
+
+/* If we are using a GOT, but have not decided to use a global pointer yet,
+ see whether we need one to implement long branches. Convert the ghost
+ global-pointer instructions into real ones if so. */
+
+static bool
+mips_expand_ghost_gp_insns (void)
+{
+ /* Quick exit if we already know that we will or won't need a
+ global pointer. */
+ if (!TARGET_USE_GOT
+ || cfun->machine->global_pointer == INVALID_REGNUM
+ || mips_must_initialize_gp_p ())
+ return false;
+
+ /* Run a full check for long branches. */
+ if (!mips_has_long_branch_p ())
+ return false;
+
+ /* We've now established that we need $gp. */
+ cfun->machine->must_initialize_gp_p = true;
+ split_all_insns_noflow ();
+
+ return true;
+}
+
+/* Subroutine of mips_reorg to manage passes that require DF. */
+
+static void
+mips_df_reorg (void)
+{
+ /* Create def-use chains. */
+ df_set_flags (DF_EQ_NOTES);
+ df_chain_add_problem (DF_UD_CHAIN);
+ df_analyze ();
+
+ if (TARGET_RELAX_PIC_CALLS)
+ mips_annotate_pic_calls ();
+
+ if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE)
+ r10k_insert_cache_barriers ();
+
+ df_finish_pass (false);
+}
+
+/* Emit code to load LABEL_REF SRC into MIPS16 register DEST. This is
+ called very late in mips_reorg, but the caller is required to run
+ mips16_lay_out_constants on the result. */
+
+static void
+mips16_load_branch_target (rtx dest, rtx src)
+{
+ if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
+ {
+ rtx page, low;
+
+ if (mips_cfun_has_cprestore_slot_p ())
+ mips_emit_move (dest, mips_cprestore_slot (dest, true));
+ else
+ mips_emit_move (dest, pic_offset_table_rtx);
+ page = mips_unspec_address (src, SYMBOL_GOTOFF_PAGE);
+ low = mips_unspec_address (src, SYMBOL_GOT_PAGE_OFST);
+ emit_insn (gen_rtx_SET (VOIDmode, dest,
+ PMODE_INSN (gen_unspec_got, (dest, page))));
+ emit_insn (gen_rtx_SET (VOIDmode, dest,
+ gen_rtx_LO_SUM (Pmode, dest, low)));
+ }
+ else
+ {
+ src = mips_unspec_address (src, SYMBOL_ABSOLUTE);
+ mips_emit_move (dest, src);
+ }
+}
+
+/* If we're compiling a MIPS16 function, look for and split any long branches.
+ This must be called after all other instruction modifications in
+ mips_reorg. */
+
+static void
+mips16_split_long_branches (void)
+{
+ bool something_changed;
+
+ if (!TARGET_MIPS16)
+ return;
+
+ /* Loop until the alignments for all targets are sufficient. */
+ do
+ {
+ rtx insn;
+
+ shorten_branches (get_insns ());
+ something_changed = false;
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (JUMP_P (insn)
+ && get_attr_length (insn) > 4
+ && (any_condjump_p (insn) || any_uncondjump_p (insn)))
+ {
+ rtx old_label, new_label, temp, saved_temp;
+ rtx target, jump, jump_sequence;
+
+ start_sequence ();
+
+ /* Free up a MIPS16 register by saving it in $1. */
+ saved_temp = gen_rtx_REG (Pmode, AT_REGNUM);
+ temp = gen_rtx_REG (Pmode, GP_REG_FIRST + 2);
+ emit_move_insn (saved_temp, temp);
+
+ /* Load the branch target into TEMP. */
+ old_label = JUMP_LABEL (insn);
+ target = gen_rtx_LABEL_REF (Pmode, old_label);
+ mips16_load_branch_target (temp, target);
+
+ /* Jump to the target and restore the register's
+ original value. */
+ jump = emit_jump_insn (PMODE_INSN (gen_indirect_jump_and_restore,
+ (temp, temp, saved_temp)));
+ JUMP_LABEL (jump) = old_label;
+ LABEL_NUSES (old_label)++;
+
+ /* Rewrite any symbolic references that are supposed to use
+ a PC-relative constant pool. */
+ mips16_lay_out_constants (false);
+
+ if (simplejump_p (insn))
+ /* We're going to replace INSN with a longer form. */
+ new_label = NULL_RTX;
+ else
+ {
+ /* Create a branch-around label for the original
+ instruction. */
+ new_label = gen_label_rtx ();
+ emit_label (new_label);
+ }
+
+ jump_sequence = get_insns ();
+ end_sequence ();
+
+ emit_insn_after (jump_sequence, insn);
+ if (new_label)
+ invert_jump (insn, new_label, false);
+ else
+ delete_insn (insn);
+ something_changed = true;
+ }
+ }
+ while (something_changed);
+}
+
+/* Implement TARGET_MACHINE_DEPENDENT_REORG. */
+
+static void
+mips_reorg (void)
+{
+ /* Restore the BLOCK_FOR_INSN pointers, which are needed by DF. Also during
+ insn splitting in mips16_lay_out_constants, DF insn info is only kept up
+ to date if the CFG is available. */
+ if (mips_cfg_in_reorg ())
+ compute_bb_for_insn ();
+ mips16_lay_out_constants (true);
+ if (mips_cfg_in_reorg ())
+ {
+ mips_df_reorg ();
+ free_bb_for_insn ();
+ }
+}
+
+/* We use a machine specific pass to do a second machine dependent reorg
+ pass after delay branch scheduling. */
+
+static unsigned int
+mips_machine_reorg2 (void)
+{
+ mips_reorg_process_insns ();
+ if (!TARGET_MIPS16
+ && TARGET_EXPLICIT_RELOCS
+ && TUNE_MIPS4130
+ && TARGET_VR4130_ALIGN)
+ vr4130_align_insns ();
+ if (mips_expand_ghost_gp_insns ())
+ /* The expansion could invalidate some of the VR4130 alignment
+ optimizations, but this should be an extremely rare case anyhow. */
+ mips_reorg_process_insns ();
+ mips16_split_long_branches ();
+ return 0;
+}
+
+namespace {
+
+const pass_data pass_data_mips_machine_reorg2 =
+{
+ RTL_PASS, /* type */
+ "mach2", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ false, /* has_gate */
+ true, /* has_execute */
+ TV_MACH_DEP, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_verify_rtl_sharing, /* todo_flags_finish */
+};
+
+class pass_mips_machine_reorg2 : public rtl_opt_pass
+{
+public:
+ pass_mips_machine_reorg2(gcc::context *ctxt)
+ : rtl_opt_pass(pass_data_mips_machine_reorg2, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ unsigned int execute () { return mips_machine_reorg2 (); }
+
+}; // class pass_mips_machine_reorg2
+
+} // anon namespace
+
+rtl_opt_pass *
+make_pass_mips_machine_reorg2 (gcc::context *ctxt)
+{
+ return new pass_mips_machine_reorg2 (ctxt);
+}
+
+
+/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
+ in order to avoid duplicating too much logic from elsewhere. */
+
+static void
+mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
+ tree function)
+{
+ rtx this_rtx, temp1, temp2, insn, fnaddr;
+ bool use_sibcall_p;
+
+ /* Pretend to be a post-reload pass while generating rtl. */
+ reload_completed = 1;
+
+ /* Mark the end of the (empty) prologue. */
+ emit_note (NOTE_INSN_PROLOGUE_END);
+
+ /* Determine if we can use a sibcall to call FUNCTION directly. */
+ fnaddr = XEXP (DECL_RTL (function), 0);
+ use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
+ && const_call_insn_operand (fnaddr, Pmode));
+
+ /* Determine if we need to load FNADDR from the GOT. */
+ if (!use_sibcall_p
+ && (mips_got_symbol_type_p
+ (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))))
+ {
+ /* Pick a global pointer. Use a call-clobbered register if
+ TARGET_CALL_SAVED_GP. */
+ cfun->machine->global_pointer
+ = TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
+ cfun->machine->must_initialize_gp_p = true;
+ SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
+
+ /* Set up the global pointer for n32 or n64 abicalls. */
+ mips_emit_loadgp ();
+ }
+
+ /* We need two temporary registers in some cases. */
+ temp1 = gen_rtx_REG (Pmode, 2);
+ temp2 = gen_rtx_REG (Pmode, 3);
+
+ /* Find out which register contains the "this" pointer. */
+ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
+ else
+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
+
+ /* Add DELTA to THIS_RTX. */
+ if (delta != 0)
+ {
+ rtx offset = GEN_INT (delta);
+ if (!SMALL_OPERAND (delta))
+ {
+ mips_emit_move (temp1, offset);
+ offset = temp1;
+ }
+ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
+ }
+
+ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
+ if (vcall_offset != 0)
+ {
+ rtx addr;
+
+ /* Set TEMP1 to *THIS_RTX. */
+ mips_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
+
+ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
+ addr = mips_add_offset (temp2, temp1, vcall_offset);
+
+ /* Load the offset and add it to THIS_RTX. */
+ mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
+ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
+ }
+
+ /* Jump to the target function. Use a sibcall if direct jumps are
+ allowed, otherwise load the address into a register first. */
+ if (use_sibcall_p)
+ {
+ insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
+ SIBLING_CALL_P (insn) = 1;
+ }
+ else
+ {
+ /* This is messy. GAS treats "la $25,foo" as part of a call
+ sequence and may allow a global "foo" to be lazily bound.
+ The general move patterns therefore reject this combination.
+
+ In this context, lazy binding would actually be OK
+ for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
+ TARGET_CALL_SAVED_GP; see mips_load_call_address.
+ We must therefore load the address via a temporary
+ register if mips_dangerous_for_la25_p.
+
+ If we jump to the temporary register rather than $25,
+ the assembler can use the move insn to fill the jump's
+ delay slot.
+
+ We can use the same technique for MIPS16 code, where $25
+ is not a valid JR register. */
+ if (TARGET_USE_PIC_FN_ADDR_REG
+ && !TARGET_MIPS16
+ && !mips_dangerous_for_la25_p (fnaddr))
+ temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
+ mips_load_call_address (MIPS_CALL_SIBCALL, temp1, fnaddr);
+
+ if (TARGET_USE_PIC_FN_ADDR_REG
+ && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
+ mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
+ emit_jump_insn (gen_indirect_jump (temp1));
+ }
+
+ /* Run just enough of rest_of_compilation. This sequence was
+ "borrowed" from alpha.c. */
+ insn = get_insns ();
+ split_all_insns_noflow ();
+ mips16_lay_out_constants (true);
+ shorten_branches (insn);
+ final_start_function (insn, file, 1);
+ final (insn, file, 1);
+ final_end_function ();
+
+ /* Clean up the vars set above. Note that final_end_function resets
+ the global pointer for us. */
+ reload_completed = 0;
+}
+
+
+/* The last argument passed to mips_set_compression_mode,
+ or negative if the function hasn't been called yet. */
+static unsigned int old_compression_mode = -1;
+
+/* Set up the target-dependent global state for ISA mode COMPRESSION_MODE,
+ which is either MASK_MIPS16 or MASK_MICROMIPS. */
+
+static void
+mips_set_compression_mode (unsigned int compression_mode)
+{
+
+ if (compression_mode == old_compression_mode)
+ return;
+
+ /* Restore base settings of various flags. */
+ target_flags = mips_base_target_flags;
+ flag_schedule_insns = mips_base_schedule_insns;
+ flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
+ flag_move_loop_invariants = mips_base_move_loop_invariants;
+ align_loops = mips_base_align_loops;
+ align_jumps = mips_base_align_jumps;
+ align_functions = mips_base_align_functions;
+ target_flags &= ~(MASK_MIPS16 | MASK_MICROMIPS);
+ target_flags |= compression_mode;
+
+ if (compression_mode & MASK_MIPS16)
+ {
+ /* Switch to MIPS16 mode. */
+ target_flags |= MASK_MIPS16;
+
+ /* Turn off SYNCI if it was on, MIPS16 doesn't support it. */
+ target_flags &= ~MASK_SYNCI;
+
+ /* Don't run the scheduler before reload, since it tends to
+ increase register pressure. */
+ flag_schedule_insns = 0;
+
+ /* Don't do hot/cold partitioning. mips16_lay_out_constants expects
+ the whole function to be in a single section. */
+ flag_reorder_blocks_and_partition = 0;
+
+ /* Don't move loop invariants, because it tends to increase
+ register pressure. It also introduces an extra move in cases
+ where the constant is the first operand in a two-operand binary
+ instruction, or when it forms a register argument to a functon
+ call. */
+ flag_move_loop_invariants = 0;
+
+ target_flags |= MASK_EXPLICIT_RELOCS;
+
+ /* Experiments suggest we get the best overall section-anchor
+ results from using the range of an unextended LW or SW. Code
+ that makes heavy use of byte or short accesses can do better
+ with ranges of 0...31 and 0...63 respectively, but most code is
+ sensitive to the range of LW and SW instead. */
+ targetm.min_anchor_offset = 0;
+ targetm.max_anchor_offset = 127;
+
+ targetm.const_anchor = 0;
+
+ /* MIPS16 has no BAL instruction. */
+ target_flags &= ~MASK_RELAX_PIC_CALLS;
+
+ /* The R4000 errata don't apply to any known MIPS16 cores.
+ It's simpler to make the R4000 fixes and MIPS16 mode
+ mutually exclusive. */
+ target_flags &= ~MASK_FIX_R4000;
+
+ if (flag_pic && !TARGET_OLDABI)
+ sorry ("MIPS16 PIC for ABIs other than o32 and o64");
+
+ if (TARGET_XGOT)
+ sorry ("MIPS16 -mxgot code");
+
+ if (TARGET_HARD_FLOAT_ABI && !TARGET_OLDABI)
+ sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
+ }
+ else
+ {
+ /* Switch to microMIPS or the standard encoding. */
+
+ if (TARGET_MICROMIPS)
+ /* Avoid branch likely. */
+ target_flags &= ~MASK_BRANCHLIKELY;
+
+ /* Provide default values for align_* for 64-bit targets. */
+ if (TARGET_64BIT)
+ {
+ if (align_loops == 0)
+ align_loops = 8;
+ if (align_jumps == 0)
+ align_jumps = 8;
+ if (align_functions == 0)
+ align_functions = 8;
+ }
+
+ targetm.min_anchor_offset = -32768;
+ targetm.max_anchor_offset = 32767;
+
+ targetm.const_anchor = 0x8000;
+ }
+
+ /* (Re)initialize MIPS target internals for new ISA. */
+ mips_init_relocs ();
+
+ if (compression_mode & MASK_MIPS16)
+ {
+ if (!mips16_globals)
+ mips16_globals = save_target_globals_default_opts ();
+ else
+ restore_target_globals (mips16_globals);
+ }
+ else
+ restore_target_globals (&default_target_globals);
+
+ old_compression_mode = compression_mode;
+}
+
+/* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
+ function should use the MIPS16 or microMIPS ISA and switch modes
+ accordingly. */
+
+static void
+mips_set_current_function (tree fndecl)
+{
+ mips_set_compression_mode (mips_get_compress_mode (fndecl));
+}
+
+/* Allocate a chunk of memory for per-function machine-dependent data. */
+
+static struct machine_function *
+mips_init_machine_status (void)
+{
+ return ggc_alloc_cleared_machine_function ();
+}
+
+/* Return the processor associated with the given ISA level, or null
+ if the ISA isn't valid. */
+
+static const struct mips_cpu_info *
+mips_cpu_info_from_isa (int isa)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
+ if (mips_cpu_info_table[i].isa == isa)
+ return mips_cpu_info_table + i;
+
+ return NULL;
+}
+
+/* Return a mips_cpu_info entry determined by an option valued
+ OPT. */
+
+static const struct mips_cpu_info *
+mips_cpu_info_from_opt (int opt)
+{
+ switch (opt)
+ {
+ case MIPS_ARCH_OPTION_FROM_ABI:
+ /* 'from-abi' selects the most compatible architecture for the
+ given ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit
+ ABIs. For the EABIs, we have to decide whether we're using
+ the 32-bit or 64-bit version. */
+ return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
+ : ABI_NEEDS_64BIT_REGS ? 3
+ : (TARGET_64BIT ? 3 : 1));
+
+ case MIPS_ARCH_OPTION_NATIVE:
+ gcc_unreachable ();
+
+ default:
+ return &mips_cpu_info_table[opt];
+ }
+}
+
+/* Return a default mips_cpu_info entry, given that no -march= option
+ was explicitly specified. */
+
+static const struct mips_cpu_info *
+mips_default_arch (void)
+{
+#if defined (MIPS_CPU_STRING_DEFAULT)
+ unsigned int i;
+ for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
+ if (strcmp (mips_cpu_info_table[i].name, MIPS_CPU_STRING_DEFAULT) == 0)
+ return mips_cpu_info_table + i;
+ gcc_unreachable ();
+#elif defined (MIPS_ISA_DEFAULT)
+ return mips_cpu_info_from_isa (MIPS_ISA_DEFAULT);
+#else
+ /* 'from-abi' makes a good default: you get whatever the ABI
+ requires. */
+ return mips_cpu_info_from_opt (MIPS_ARCH_OPTION_FROM_ABI);
+#endif
+}
+
+/* Set up globals to generate code for the ISA or processor
+ described by INFO. */
+
+static void
+mips_set_architecture (const struct mips_cpu_info *info)
+{
+ if (info != 0)
+ {
+ mips_arch_info = info;
+ mips_arch = info->cpu;
+ mips_isa = info->isa;
+ }
+}
+
+/* Likewise for tuning. */
+
+static void
+mips_set_tune (const struct mips_cpu_info *info)
+{
+ if (info != 0)
+ {
+ mips_tune_info = info;
+ mips_tune = info->cpu;
+ }
+}
+
+/* Implement TARGET_OPTION_OVERRIDE. */
+
+static void
+mips_option_override (void)
+{
+ int i, start, regno, mode;
+
+ if (global_options_set.x_mips_isa_option)
+ mips_isa_option_info = &mips_cpu_info_table[mips_isa_option];
+
+#ifdef SUBTARGET_OVERRIDE_OPTIONS
+ SUBTARGET_OVERRIDE_OPTIONS;
+#endif
+
+ /* MIPS16 and microMIPS cannot coexist. */
+ if (TARGET_MICROMIPS && TARGET_MIPS16)
+ error ("unsupported combination: %s", "-mips16 -mmicromips");
+
+ /* Save the base compression state and process flags as though we
+ were generating uncompressed code. */
+ mips_base_compression_flags = TARGET_COMPRESSION;
+ target_flags &= ~TARGET_COMPRESSION;
+
+ /* -mno-float overrides -mhard-float and -msoft-float. */
+ if (TARGET_NO_FLOAT)
+ {
+ target_flags |= MASK_SOFT_FLOAT_ABI;
+ target_flags_explicit |= MASK_SOFT_FLOAT_ABI;
+ }
+
+ if (TARGET_FLIP_MIPS16)
+ TARGET_INTERLINK_COMPRESSED = 1;
+
+ /* Set the small data limit. */
+ mips_small_data_threshold = (global_options_set.x_g_switch_value
+ ? g_switch_value
+ : MIPS_DEFAULT_GVALUE);
+
+ /* The following code determines the architecture and register size.
+ Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
+ The GAS and GCC code should be kept in sync as much as possible. */
+
+ if (global_options_set.x_mips_arch_option)
+ mips_set_architecture (mips_cpu_info_from_opt (mips_arch_option));
+
+ if (mips_isa_option_info != 0)
+ {
+ if (mips_arch_info == 0)
+ mips_set_architecture (mips_isa_option_info);
+ else if (mips_arch_info->isa != mips_isa_option_info->isa)
+ error ("%<-%s%> conflicts with the other architecture options, "
+ "which specify a %s processor",
+ mips_isa_option_info->name,
+ mips_cpu_info_from_isa (mips_arch_info->isa)->name);
+ }
+
+ if (mips_arch_info == 0)
+ mips_set_architecture (mips_default_arch ());
+
+ if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
+ error ("%<-march=%s%> is not compatible with the selected ABI",
+ mips_arch_info->name);
+
+ /* Optimize for mips_arch, unless -mtune selects a different processor. */
+ if (global_options_set.x_mips_tune_option)
+ mips_set_tune (mips_cpu_info_from_opt (mips_tune_option));
+
+ if (mips_tune_info == 0)
+ mips_set_tune (mips_arch_info);
+
+ if ((target_flags_explicit & MASK_64BIT) != 0)
+ {
+ /* The user specified the size of the integer registers. Make sure
+ it agrees with the ABI and ISA. */
+ if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
+ error ("%<-mgp64%> used with a 32-bit processor");
+ else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
+ error ("%<-mgp32%> used with a 64-bit ABI");
+ else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
+ error ("%<-mgp64%> used with a 32-bit ABI");
+ }
+ else
+ {
+ /* Infer the integer register size from the ABI and processor.
+ Restrict ourselves to 32-bit registers if that's all the
+ processor has, or if the ABI cannot handle 64-bit registers. */
+ if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
+ target_flags &= ~MASK_64BIT;
+ else
+ target_flags |= MASK_64BIT;
+ }
+
+ if ((target_flags_explicit & MASK_FLOAT64) != 0)
+ {
+ if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
+ error ("unsupported combination: %s", "-mfp64 -msingle-float");
+ else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
+ error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
+ else if (!TARGET_64BIT && TARGET_FLOAT64)
+ {
+ if (!ISA_HAS_MXHC1)
+ error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
+ " the target supports the mfhc1 and mthc1 instructions");
+ else if (mips_abi != ABI_32)
+ error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
+ " the o32 ABI");
+ }
+ }
+ else
+ {
+ /* -msingle-float selects 32-bit float registers. Otherwise the
+ float registers should be the same size as the integer ones. */
+ if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
+ target_flags |= MASK_FLOAT64;
+ else
+ target_flags &= ~MASK_FLOAT64;
+ }
+
+ /* End of code shared with GAS. */
+
+ /* The R5900 FPU only supports single precision. */
+ if (TARGET_MIPS5900 && TARGET_HARD_FLOAT_ABI && TARGET_DOUBLE_FLOAT)
+ error ("unsupported combination: %s",
+ "-march=r5900 -mhard-float -mdouble-float");
+
+ /* If a -mlong* option was given, check that it matches the ABI,
+ otherwise infer the -mlong* setting from the other options. */
+ if ((target_flags_explicit & MASK_LONG64) != 0)
+ {
+ if (TARGET_LONG64)
+ {
+ if (mips_abi == ABI_N32)
+ error ("%qs is incompatible with %qs", "-mabi=n32", "-mlong64");
+ else if (mips_abi == ABI_32)
+ error ("%qs is incompatible with %qs", "-mabi=32", "-mlong64");
+ else if (mips_abi == ABI_O64 && TARGET_ABICALLS)
+ /* We have traditionally allowed non-abicalls code to use
+ an LP64 form of o64. However, it would take a bit more
+ effort to support the combination of 32-bit GOT entries
+ and 64-bit pointers, so we treat the abicalls case as
+ an error. */
+ error ("the combination of %qs and %qs is incompatible with %qs",
+ "-mabi=o64", "-mabicalls", "-mlong64");
+ }
+ else
+ {
+ if (mips_abi == ABI_64)
+ error ("%qs is incompatible with %qs", "-mabi=64", "-mlong32");
+ }
+ }
+ else
+ {
+ if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
+ target_flags |= MASK_LONG64;
+ else
+ target_flags &= ~MASK_LONG64;
+ }
+
+ if (!TARGET_OLDABI)
+ flag_pcc_struct_return = 0;
+
+ /* Decide which rtx_costs structure to use. */
+ if (optimize_size)
+ mips_cost = &mips_rtx_cost_optimize_size;
+ else
+ mips_cost = &mips_rtx_cost_data[mips_tune];
+
+ /* If the user hasn't specified a branch cost, use the processor's
+ default. */
+ if (mips_branch_cost == 0)
+ mips_branch_cost = mips_cost->branch_cost;
+
+ /* If neither -mbranch-likely nor -mno-branch-likely was given
+ on the command line, set MASK_BRANCHLIKELY based on the target
+ architecture and tuning flags. Annulled delay slots are a
+ size win, so we only consider the processor-specific tuning
+ for !optimize_size. */
+ if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
+ {
+ if (ISA_HAS_BRANCHLIKELY
+ && (optimize_size
+ || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
+ target_flags |= MASK_BRANCHLIKELY;
+ else
+ target_flags &= ~MASK_BRANCHLIKELY;
+ }
+ else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
+ warning (0, "the %qs architecture does not support branch-likely"
+ " instructions", mips_arch_info->name);
+
+ /* If the user hasn't specified -mimadd or -mno-imadd set
+ MASK_IMADD based on the target architecture and tuning
+ flags. */
+ if ((target_flags_explicit & MASK_IMADD) == 0)
+ {
+ if (ISA_HAS_MADD_MSUB &&
+ (mips_tune_info->tune_flags & PTF_AVOID_IMADD) == 0)
+ target_flags |= MASK_IMADD;
+ else
+ target_flags &= ~MASK_IMADD;
+ }
+ else if (TARGET_IMADD && !ISA_HAS_MADD_MSUB)
+ warning (0, "the %qs architecture does not support madd or msub"
+ " instructions", mips_arch_info->name);
+
+ /* The effect of -mabicalls isn't defined for the EABI. */
+ if (mips_abi == ABI_EABI && TARGET_ABICALLS)
+ {
+ error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
+ target_flags &= ~MASK_ABICALLS;
+ }
+
+ /* PIC requires -mabicalls. */
+ if (flag_pic)
+ {
+ if (mips_abi == ABI_EABI)
+ error ("cannot generate position-independent code for %qs",
+ "-mabi=eabi");
+ else if (!TARGET_ABICALLS)
+ error ("position-independent code requires %qs", "-mabicalls");
+ }
+
+ if (TARGET_ABICALLS_PIC2)
+ /* We need to set flag_pic for executables as well as DSOs
+ because we may reference symbols that are not defined in
+ the final executable. (MIPS does not use things like
+ copy relocs, for example.)
+
+ There is a body of code that uses __PIC__ to distinguish
+ between -mabicalls and -mno-abicalls code. The non-__PIC__
+ variant is usually appropriate for TARGET_ABICALLS_PIC0, as
+ long as any indirect jumps use $25. */
+ flag_pic = 1;
+
+ /* -mvr4130-align is a "speed over size" optimization: it usually produces
+ faster code, but at the expense of more nops. Enable it at -O3 and
+ above. */
+ if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
+ target_flags |= MASK_VR4130_ALIGN;
+
+ /* Prefer a call to memcpy over inline code when optimizing for size,
+ though see MOVE_RATIO in mips.h. */
+ if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
+ target_flags |= MASK_MEMCPY;
+
+ /* If we have a nonzero small-data limit, check that the -mgpopt
+ setting is consistent with the other target flags. */
+ if (mips_small_data_threshold > 0)
+ {
+ if (!TARGET_GPOPT)
+ {
+ if (!TARGET_EXPLICIT_RELOCS)
+ error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
+
+ TARGET_LOCAL_SDATA = false;
+ TARGET_EXTERN_SDATA = false;
+ }
+ else
+ {
+ if (TARGET_VXWORKS_RTP)
+ warning (0, "cannot use small-data accesses for %qs", "-mrtp");
+
+ if (TARGET_ABICALLS)
+ warning (0, "cannot use small-data accesses for %qs",
+ "-mabicalls");
+ }
+ }
+
+ /* Pre-IEEE 754-2008 MIPS hardware has a quirky almost-IEEE format
+ for all its floating point. */
+ if (mips_nan != MIPS_IEEE_754_2008)
+ {
+ REAL_MODE_FORMAT (SFmode) = &mips_single_format;
+ REAL_MODE_FORMAT (DFmode) = &mips_double_format;
+ REAL_MODE_FORMAT (TFmode) = &mips_quad_format;
+ }
+
+ /* Make sure that the user didn't turn off paired single support when
+ MIPS-3D support is requested. */
+ if (TARGET_MIPS3D
+ && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
+ && !TARGET_PAIRED_SINGLE_FLOAT)
+ error ("%<-mips3d%> requires %<-mpaired-single%>");
+
+ /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
+ if (TARGET_MIPS3D)
+ target_flags |= MASK_PAIRED_SINGLE_FLOAT;
+
+ /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
+ and TARGET_HARD_FLOAT_ABI are both true. */
+ if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
+ {
+ error ("%qs must be used with %qs",
+ TARGET_MIPS3D ? "-mips3d" : "-mpaired-single",
+ TARGET_HARD_FLOAT_ABI ? "-mfp64" : "-mhard-float");
+ target_flags &= ~MASK_PAIRED_SINGLE_FLOAT;
+ TARGET_MIPS3D = 0;
+ }
+
+ /* Make sure that -mpaired-single is only used on ISAs that support it.
+ We must disable it otherwise since it relies on other ISA properties
+ like ISA_HAS_8CC having their normal values. */
+ if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_HAS_PAIRED_SINGLE)
+ {
+ error ("the %qs architecture does not support paired-single"
+ " instructions", mips_arch_info->name);
+ target_flags &= ~MASK_PAIRED_SINGLE_FLOAT;
+ TARGET_MIPS3D = 0;
+ }
+
+ if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE
+ && !TARGET_CACHE_BUILTIN)
+ {
+ error ("%qs requires a target that provides the %qs instruction",
+ "-mr10k-cache-barrier", "cache");
+ mips_r10k_cache_barrier = R10K_CACHE_BARRIER_NONE;
+ }
+
+ /* If TARGET_DSPR2, enable TARGET_DSP. */
+ if (TARGET_DSPR2)
+ TARGET_DSP = true;
+
+ /* .eh_frame addresses should be the same width as a C pointer.
+ Most MIPS ABIs support only one pointer size, so the assembler
+ will usually know exactly how big an .eh_frame address is.
+
+ Unfortunately, this is not true of the 64-bit EABI. The ABI was
+ originally defined to use 64-bit pointers (i.e. it is LP64), and
+ this is still the default mode. However, we also support an n32-like
+ ILP32 mode, which is selected by -mlong32. The problem is that the
+ assembler has traditionally not had an -mlong option, so it has
+ traditionally not known whether we're using the ILP32 or LP64 form.
+
+ As it happens, gas versions up to and including 2.19 use _32-bit_
+ addresses for EABI64 .cfi_* directives. This is wrong for the
+ default LP64 mode, so we can't use the directives by default.
+ Moreover, since gas's current behavior is at odds with gcc's
+ default behavior, it seems unwise to rely on future versions
+ of gas behaving the same way. We therefore avoid using .cfi
+ directives for -mlong32 as well. */
+ if (mips_abi == ABI_EABI && TARGET_64BIT)
+ flag_dwarf2_cfi_asm = 0;
+
+ /* .cfi_* directives generate a read-only section, so fall back on
+ manual .eh_frame creation if we need the section to be writable. */
+ if (TARGET_WRITABLE_EH_FRAME)
+ flag_dwarf2_cfi_asm = 0;
+
+ mips_init_print_operand_punct ();
+
+ /* Set up array to map GCC register number to debug register number.
+ Ignore the special purpose register numbers. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ mips_dbx_regno[i] = IGNORED_DWARF_REGNUM;
+ if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
+ mips_dwarf_regno[i] = i;
+ else
+ mips_dwarf_regno[i] = INVALID_REGNUM;
+ }
+
+ start = GP_DBX_FIRST - GP_REG_FIRST;
+ for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
+ mips_dbx_regno[i] = i + start;
+
+ start = FP_DBX_FIRST - FP_REG_FIRST;
+ for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
+ mips_dbx_regno[i] = i + start;
+
+ /* Accumulator debug registers use big-endian ordering. */
+ mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
+ mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
+ mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
+ mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
+ for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
+ {
+ mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
+ mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
+ }
+
+ /* Set up mips_hard_regno_mode_ok. */
+ for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ mips_hard_regno_mode_ok[mode][regno]
+ = mips_hard_regno_mode_ok_p (regno, (enum machine_mode) mode);
+
+ /* Function to allocate machine-dependent function status. */
+ init_machine_status = &mips_init_machine_status;
+
+ /* Default to working around R4000 errata only if the processor
+ was selected explicitly. */
+ if ((target_flags_explicit & MASK_FIX_R4000) == 0
+ && strcmp (mips_arch_info->name, "r4000") == 0)
+ target_flags |= MASK_FIX_R4000;
+
+ /* Default to working around R4400 errata only if the processor
+ was selected explicitly. */
+ if ((target_flags_explicit & MASK_FIX_R4400) == 0
+ && strcmp (mips_arch_info->name, "r4400") == 0)
+ target_flags |= MASK_FIX_R4400;
+
+ /* Default to working around R10000 errata only if the processor
+ was selected explicitly. */
+ if ((target_flags_explicit & MASK_FIX_R10000) == 0
+ && strcmp (mips_arch_info->name, "r10000") == 0)
+ target_flags |= MASK_FIX_R10000;
+
+ /* Make sure that branch-likely instructions available when using
+ -mfix-r10000. The instructions are not available if either:
+
+ 1. -mno-branch-likely was passed.
+ 2. The selected ISA does not support branch-likely and
+ the command line does not include -mbranch-likely. */
+ if (TARGET_FIX_R10000
+ && ((target_flags_explicit & MASK_BRANCHLIKELY) == 0
+ ? !ISA_HAS_BRANCHLIKELY
+ : !TARGET_BRANCHLIKELY))
+ sorry ("%qs requires branch-likely instructions", "-mfix-r10000");
+
+ if (TARGET_SYNCI && !ISA_HAS_SYNCI)
+ {
+ warning (0, "the %qs architecture does not support the synci "
+ "instruction", mips_arch_info->name);
+ target_flags &= ~MASK_SYNCI;
+ }
+
+ /* Only optimize PIC indirect calls if they are actually required. */
+ if (!TARGET_USE_GOT || !TARGET_EXPLICIT_RELOCS)
+ target_flags &= ~MASK_RELAX_PIC_CALLS;
+
+ /* Save base state of options. */
+ mips_base_target_flags = target_flags;
+ mips_base_schedule_insns = flag_schedule_insns;
+ mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
+ mips_base_move_loop_invariants = flag_move_loop_invariants;
+ mips_base_align_loops = align_loops;
+ mips_base_align_jumps = align_jumps;
+ mips_base_align_functions = align_functions;
+
+ /* Now select the ISA mode.
+
+ Do all CPP-sensitive stuff in uncompressed mode; we'll switch modes
+ later if required. */
+ mips_set_compression_mode (0);
+
+ /* We register a second machine specific reorg pass after delay slot
+ filling. Registering the pass must be done at start up. It's
+ convenient to do it here. */
+ opt_pass *new_pass = make_pass_mips_machine_reorg2 (g);
+ struct register_pass_info insert_pass_mips_machine_reorg2 =
+ {
+ new_pass, /* pass */
+ "dbr", /* reference_pass_name */
+ 1, /* ref_pass_instance_number */
+ PASS_POS_INSERT_AFTER /* po_op */
+ };
+ register_pass (&insert_pass_mips_machine_reorg2);
+
+ if (TARGET_HARD_FLOAT_ABI && TARGET_MIPS5900)
+ REAL_MODE_FORMAT (SFmode) = &spu_single_format;
+}
+
+/* Swap the register information for registers I and I + 1, which
+ currently have the wrong endianness. Note that the registers'
+ fixedness and call-clobberedness might have been set on the
+ command line. */
+
+static void
+mips_swap_registers (unsigned int i)
+{
+ int tmpi;
+ const char *tmps;
+
+#define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
+#define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
+
+ SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
+ SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
+ SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
+ SWAP_STRING (reg_names[i], reg_names[i + 1]);
+
+#undef SWAP_STRING
+#undef SWAP_INT
+}
+
+/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
+
+static void
+mips_conditional_register_usage (void)
+{
+
+ if (ISA_HAS_DSP)
+ {
+ /* These DSP control register fields are global. */
+ global_regs[CCDSP_PO_REGNUM] = 1;
+ global_regs[CCDSP_SC_REGNUM] = 1;
+ }
+ else
+ AND_COMPL_HARD_REG_SET (accessible_reg_set,
+ reg_class_contents[(int) DSP_ACC_REGS]);
+
+ if (!TARGET_HARD_FLOAT)
+ {
+ AND_COMPL_HARD_REG_SET (accessible_reg_set,
+ reg_class_contents[(int) FP_REGS]);
+ AND_COMPL_HARD_REG_SET (accessible_reg_set,
+ reg_class_contents[(int) ST_REGS]);
+ }
+ else if (!ISA_HAS_8CC)
+ {
+ /* We only have a single condition-code register. We implement
+ this by fixing all the condition-code registers and generating
+ RTL that refers directly to ST_REG_FIRST. */
+ AND_COMPL_HARD_REG_SET (accessible_reg_set,
+ reg_class_contents[(int) ST_REGS]);
+ SET_HARD_REG_BIT (accessible_reg_set, FPSW_REGNUM);
+ fixed_regs[FPSW_REGNUM] = call_used_regs[FPSW_REGNUM] = 1;
+ }
+ if (TARGET_MIPS16)
+ {
+ /* In MIPS16 mode, we prohibit the unused $s registers, since they
+ are call-saved, and saving them via a MIPS16 register would
+ probably waste more time than just reloading the value.
+
+ We permit the $t temporary registers when optimizing for speed
+ but not when optimizing for space because using them results in
+ code that is larger (but faster) then not using them. We do
+ allow $24 (t8) because it is used in CMP and CMPI instructions
+ and $25 (t9) because it is used as the function call address in
+ SVR4 PIC code. */
+
+ fixed_regs[18] = call_used_regs[18] = 1;
+ fixed_regs[19] = call_used_regs[19] = 1;
+ fixed_regs[20] = call_used_regs[20] = 1;
+ fixed_regs[21] = call_used_regs[21] = 1;
+ fixed_regs[22] = call_used_regs[22] = 1;
+ fixed_regs[23] = call_used_regs[23] = 1;
+ fixed_regs[26] = call_used_regs[26] = 1;
+ fixed_regs[27] = call_used_regs[27] = 1;
+ fixed_regs[30] = call_used_regs[30] = 1;
+ if (optimize_size)
+ {
+ fixed_regs[8] = call_used_regs[8] = 1;
+ fixed_regs[9] = call_used_regs[9] = 1;
+ fixed_regs[10] = call_used_regs[10] = 1;
+ fixed_regs[11] = call_used_regs[11] = 1;
+ fixed_regs[12] = call_used_regs[12] = 1;
+ fixed_regs[13] = call_used_regs[13] = 1;
+ fixed_regs[14] = call_used_regs[14] = 1;
+ fixed_regs[15] = call_used_regs[15] = 1;
+ }
+
+ /* Do not allow HI and LO to be treated as register operands.
+ There are no MTHI or MTLO instructions (or any real need
+ for them) and one-way registers cannot easily be reloaded. */
+ AND_COMPL_HARD_REG_SET (operand_reg_set,
+ reg_class_contents[(int) MD_REGS]);
+ }
+ /* $f20-$f23 are call-clobbered for n64. */
+ if (mips_abi == ABI_64)
+ {
+ int regno;
+ for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
+ call_really_used_regs[regno] = call_used_regs[regno] = 1;
+ }
+ /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
+ for n32. */
+ if (mips_abi == ABI_N32)
+ {
+ int regno;
+ for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
+ call_really_used_regs[regno] = call_used_regs[regno] = 1;
+ }
+ /* Make sure that double-register accumulator values are correctly
+ ordered for the current endianness. */
+ if (TARGET_LITTLE_ENDIAN)
+ {
+ unsigned int regno;
+
+ mips_swap_registers (MD_REG_FIRST);
+ for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
+ mips_swap_registers (regno);
+ }
+}
+
+/* When generating MIPS16 code, we want to allocate $24 (T_REG) before
+ other registers for instructions for which it is possible. This
+ encourages the compiler to use CMP in cases where an XOR would
+ require some register shuffling. */
+
+void
+mips_order_regs_for_local_alloc (void)
+{
+ int i;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ reg_alloc_order[i] = i;
+
+ if (TARGET_MIPS16)
+ {
+ /* It really doesn't matter where we put register 0, since it is
+ a fixed register anyhow. */
+ reg_alloc_order[0] = 24;
+ reg_alloc_order[24] = 0;
+ }
+}
+
+/* Implement EH_USES. */
+
+bool
+mips_eh_uses (unsigned int regno)
+{
+ if (reload_completed && !TARGET_ABSOLUTE_JUMPS)
+ {
+ /* We need to force certain registers to be live in order to handle
+ PIC long branches correctly. See mips_must_initialize_gp_p for
+ details. */
+ if (mips_cfun_has_cprestore_slot_p ())
+ {
+ if (regno == CPRESTORE_SLOT_REGNUM)
+ return true;
+ }
+ else
+ {
+ if (cfun->machine->global_pointer == regno)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Implement EPILOGUE_USES. */
+
+bool
+mips_epilogue_uses (unsigned int regno)
+{
+ /* Say that the epilogue uses the return address register. Note that
+ in the case of sibcalls, the values "used by the epilogue" are
+ considered live at the start of the called function. */
+ if (regno == RETURN_ADDR_REGNUM)
+ return true;
+
+ /* If using a GOT, say that the epilogue also uses GOT_VERSION_REGNUM.
+ See the comment above load_call<mode> for details. */
+ if (TARGET_USE_GOT && (regno) == GOT_VERSION_REGNUM)
+ return true;
+
+ /* An interrupt handler must preserve some registers that are
+ ordinarily call-clobbered. */
+ if (cfun->machine->interrupt_handler_p
+ && mips_interrupt_extra_call_saved_reg_p (regno))
+ return true;
+
+ return false;
+}
+
+/* A for_each_rtx callback. Stop the search if *X is an AT register. */
+
+static int
+mips_at_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
+{
+ return REG_P (*x) && REGNO (*x) == AT_REGNUM;
+}
+
+/* Return true if INSN needs to be wrapped in ".set noat".
+ INSN has NOPERANDS operands, stored in OPVEC. */
+
+static bool
+mips_need_noat_wrapper_p (rtx insn, rtx *opvec, int noperands)
+{
+ int i;
+
+ if (recog_memoized (insn) >= 0)
+ for (i = 0; i < noperands; i++)
+ if (for_each_rtx (&opvec[i], mips_at_reg_p, NULL))
+ return true;
+ return false;
+}
+
+/* Implement FINAL_PRESCAN_INSN. */
+
+void
+mips_final_prescan_insn (rtx insn, rtx *opvec, int noperands)
+{
+ if (mips_need_noat_wrapper_p (insn, opvec, noperands))
+ mips_push_asm_switch (&mips_noat);
+}
+
+/* Implement TARGET_ASM_FINAL_POSTSCAN_INSN. */
+
+static void
+mips_final_postscan_insn (FILE *file ATTRIBUTE_UNUSED, rtx insn,
+ rtx *opvec, int noperands)
+{
+ if (mips_need_noat_wrapper_p (insn, opvec, noperands))
+ mips_pop_asm_switch (&mips_noat);
+}
+
+/* Return the function that is used to expand the <u>mulsidi3 pattern.
+ EXT_CODE is the code of the extension used. Return NULL if widening
+ multiplication shouldn't be used. */
+
+mulsidi3_gen_fn
+mips_mulsidi3_gen_fn (enum rtx_code ext_code)
+{
+ bool signed_p;
+
+ signed_p = ext_code == SIGN_EXTEND;
+ if (TARGET_64BIT)
+ {
+ /* Don't use widening multiplication with MULT when we have DMUL. Even
+ with the extension of its input operands DMUL is faster. Note that
+ the extension is not needed for signed multiplication. In order to
+ ensure that we always remove the redundant sign-extension in this
+ case we still expand mulsidi3 for DMUL. */
+ if (ISA_HAS_DMUL3)
+ return signed_p ? gen_mulsidi3_64bit_dmul : NULL;
+ if (TARGET_MIPS16)
+ return (signed_p
+ ? gen_mulsidi3_64bit_mips16
+ : gen_umulsidi3_64bit_mips16);
+ if (TARGET_FIX_R4000)
+ return NULL;
+ return signed_p ? gen_mulsidi3_64bit : gen_umulsidi3_64bit;
+ }
+ else
+ {
+ if (TARGET_MIPS16)
+ return (signed_p
+ ? gen_mulsidi3_32bit_mips16
+ : gen_umulsidi3_32bit_mips16);
+ if (TARGET_FIX_R4000 && !ISA_HAS_DSP)
+ return signed_p ? gen_mulsidi3_32bit_r4000 : gen_umulsidi3_32bit_r4000;
+ return signed_p ? gen_mulsidi3_32bit : gen_umulsidi3_32bit;
+ }
+}
+
+/* Return true if PATTERN matches the kind of instruction generated by
+ umips_build_save_restore. SAVE_P is true for store. */
+
+bool
+umips_save_restore_pattern_p (bool save_p, rtx pattern)
+{
+ int n;
+ unsigned int i;
+ HOST_WIDE_INT first_offset = 0;
+ rtx first_base = 0;
+ unsigned int regmask = 0;
+
+ for (n = 0; n < XVECLEN (pattern, 0); n++)
+ {
+ rtx set, reg, mem, this_base;
+ HOST_WIDE_INT this_offset;
+
+ /* Check that we have a SET. */
+ set = XVECEXP (pattern, 0, n);
+ if (GET_CODE (set) != SET)
+ return false;
+
+ /* Check that the SET is a load (if restoring) or a store
+ (if saving). */
+ mem = save_p ? SET_DEST (set) : SET_SRC (set);
+ if (!MEM_P (mem) || MEM_VOLATILE_P (mem))
+ return false;
+
+ /* Check that the address is the sum of base and a possibly-zero
+ constant offset. Determine if the offset is in range. */
+ mips_split_plus (XEXP (mem, 0), &this_base, &this_offset);
+ if (!REG_P (this_base))
+ return false;
+
+ if (n == 0)
+ {
+ if (!UMIPS_12BIT_OFFSET_P (this_offset))
+ return false;
+ first_base = this_base;
+ first_offset = this_offset;
+ }
+ else
+ {
+ /* Check that the save slots are consecutive. */
+ if (REGNO (this_base) != REGNO (first_base)
+ || this_offset != first_offset + UNITS_PER_WORD * n)
+ return false;
+ }
+
+ /* Check that SET's other operand is a register. */
+ reg = save_p ? SET_SRC (set) : SET_DEST (set);
+ if (!REG_P (reg))
+ return false;
+
+ regmask |= 1 << REGNO (reg);
+ }
+
+ for (i = 0; i < ARRAY_SIZE (umips_swm_mask); i++)
+ if (regmask == umips_swm_mask[i])
+ return true;
+
+ return false;
+}
+
+/* Return the assembly instruction for microMIPS LWM or SWM.
+ SAVE_P and PATTERN are as for umips_save_restore_pattern_p. */
+
+const char *
+umips_output_save_restore (bool save_p, rtx pattern)
+{
+ static char buffer[300];
+ char *s;
+ int n;
+ HOST_WIDE_INT offset;
+ rtx base, mem, set, last_set, last_reg;
+
+ /* Parse the pattern. */
+ gcc_assert (umips_save_restore_pattern_p (save_p, pattern));
+
+ s = strcpy (buffer, save_p ? "swm\t" : "lwm\t");
+ s += strlen (s);
+ n = XVECLEN (pattern, 0);
+
+ set = XVECEXP (pattern, 0, 0);
+ mem = save_p ? SET_DEST (set) : SET_SRC (set);
+ mips_split_plus (XEXP (mem, 0), &base, &offset);
+
+ last_set = XVECEXP (pattern, 0, n - 1);
+ last_reg = save_p ? SET_SRC (last_set) : SET_DEST (last_set);
+
+ if (REGNO (last_reg) == 31)
+ n--;
+
+ gcc_assert (n <= 9);
+ if (n == 0)
+ ;
+ else if (n == 1)
+ s += sprintf (s, "%s,", reg_names[16]);
+ else if (n < 9)
+ s += sprintf (s, "%s-%s,", reg_names[16], reg_names[15 + n]);
+ else if (n == 9)
+ s += sprintf (s, "%s-%s,%s,", reg_names[16], reg_names[23],
+ reg_names[30]);
+
+ if (REGNO (last_reg) == 31)
+ s += sprintf (s, "%s,", reg_names[31]);
+
+ s += sprintf (s, "%d(%s)", (int)offset, reg_names[REGNO (base)]);
+ return buffer;
+}
+
+/* Return true if MEM1 and MEM2 use the same base register, and the
+ offset of MEM2 equals the offset of MEM1 plus 4. FIRST_REG is the
+ register into (from) which the contents of MEM1 will be loaded
+ (stored), depending on the value of LOAD_P.
+ SWAP_P is true when the 1st and 2nd instructions are swapped. */
+
+static bool
+umips_load_store_pair_p_1 (bool load_p, bool swap_p,
+ rtx first_reg, rtx mem1, rtx mem2)
+{
+ rtx base1, base2;
+ HOST_WIDE_INT offset1, offset2;
+
+ if (!MEM_P (mem1) || !MEM_P (mem2))
+ return false;
+
+ mips_split_plus (XEXP (mem1, 0), &base1, &offset1);
+ mips_split_plus (XEXP (mem2, 0), &base2, &offset2);
+
+ if (!REG_P (base1) || !rtx_equal_p (base1, base2))
+ return false;
+
+ /* Avoid invalid load pair instructions. */
+ if (load_p && REGNO (first_reg) == REGNO (base1))
+ return false;
+
+ /* We must avoid this case for anti-dependence.
+ Ex: lw $3, 4($3)
+ lw $2, 0($3)
+ first_reg is $2, but the base is $3. */
+ if (load_p
+ && swap_p
+ && REGNO (first_reg) + 1 == REGNO (base1))
+ return false;
+
+ if (offset2 != offset1 + 4)
+ return false;
+
+ if (!UMIPS_12BIT_OFFSET_P (offset1))
+ return false;
+
+ return true;
+}
+
+/* OPERANDS describes the operands to a pair of SETs, in the order
+ dest1, src1, dest2, src2. Return true if the operands can be used
+ in an LWP or SWP instruction; LOAD_P says which. */
+
+bool
+umips_load_store_pair_p (bool load_p, rtx *operands)
+{
+ rtx reg1, reg2, mem1, mem2;
+
+ if (load_p)
+ {
+ reg1 = operands[0];
+ reg2 = operands[2];
+ mem1 = operands[1];
+ mem2 = operands[3];
+ }
+ else
+ {
+ reg1 = operands[1];
+ reg2 = operands[3];
+ mem1 = operands[0];
+ mem2 = operands[2];
+ }
+
+ if (REGNO (reg2) == REGNO (reg1) + 1)
+ return umips_load_store_pair_p_1 (load_p, false, reg1, mem1, mem2);
+
+ if (REGNO (reg1) == REGNO (reg2) + 1)
+ return umips_load_store_pair_p_1 (load_p, true, reg2, mem2, mem1);
+
+ return false;
+}
+
+/* Return the assembly instruction for a microMIPS LWP or SWP in which
+ the first register is REG and the first memory slot is MEM.
+ LOAD_P is true for LWP. */
+
+static void
+umips_output_load_store_pair_1 (bool load_p, rtx reg, rtx mem)
+{
+ rtx ops[] = {reg, mem};
+
+ if (load_p)
+ output_asm_insn ("lwp\t%0,%1", ops);
+ else
+ output_asm_insn ("swp\t%0,%1", ops);
+}
+
+/* Output the assembly instruction for a microMIPS LWP or SWP instruction.
+ LOAD_P and OPERANDS are as for umips_load_store_pair_p. */
+
+void
+umips_output_load_store_pair (bool load_p, rtx *operands)
+{
+ rtx reg1, reg2, mem1, mem2;
+ if (load_p)
+ {
+ reg1 = operands[0];
+ reg2 = operands[2];
+ mem1 = operands[1];
+ mem2 = operands[3];
+ }
+ else
+ {
+ reg1 = operands[1];
+ reg2 = operands[3];
+ mem1 = operands[0];
+ mem2 = operands[2];
+ }
+
+ if (REGNO (reg2) == REGNO (reg1) + 1)
+ {
+ umips_output_load_store_pair_1 (load_p, reg1, mem1);
+ return;
+ }
+
+ gcc_assert (REGNO (reg1) == REGNO (reg2) + 1);
+ umips_output_load_store_pair_1 (load_p, reg2, mem2);
+}
+
+/* Return true if REG1 and REG2 match the criteria for a movep insn. */
+
+bool
+umips_movep_target_p (rtx reg1, rtx reg2)
+{
+ int regno1, regno2, pair;
+ unsigned int i;
+ static const int match[8] = {
+ 0x00000060, /* 5, 6 */
+ 0x000000a0, /* 5, 7 */
+ 0x000000c0, /* 6, 7 */
+ 0x00200010, /* 4, 21 */
+ 0x00400010, /* 4, 22 */
+ 0x00000030, /* 4, 5 */
+ 0x00000050, /* 4, 6 */
+ 0x00000090 /* 4, 7 */
+ };
+
+ if (!REG_P (reg1) || !REG_P (reg2))
+ return false;
+
+ regno1 = REGNO (reg1);
+ regno2 = REGNO (reg2);
+
+ if (!GP_REG_P (regno1) || !GP_REG_P (regno2))
+ return false;
+
+ pair = (1 << regno1) | (1 << regno2);
+
+ for (i = 0; i < ARRAY_SIZE (match); i++)
+ if (pair == match[i])
+ return true;
+
+ return false;
+}
+
+/* Return the size in bytes of the trampoline code, padded to
+ TRAMPOLINE_ALIGNMENT bits. The static chain pointer and target
+ function address immediately follow. */
+
+int
+mips_trampoline_code_size (void)
+{
+ if (TARGET_USE_PIC_FN_ADDR_REG)
+ return 4 * 4;
+ else if (ptr_mode == DImode)
+ return 8 * 4;
+ else if (ISA_HAS_LOAD_DELAY)
+ return 6 * 4;
+ else
+ return 4 * 4;
+}
+
+/* Implement TARGET_TRAMPOLINE_INIT. */
+
+static void
+mips_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
+{
+ rtx addr, end_addr, high, low, opcode, mem;
+ rtx trampoline[8];
+ unsigned int i, j;
+ HOST_WIDE_INT end_addr_offset, static_chain_offset, target_function_offset;
+
+ /* Work out the offsets of the pointers from the start of the
+ trampoline code. */
+ end_addr_offset = mips_trampoline_code_size ();
+ static_chain_offset = end_addr_offset;
+ target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
+
+ /* Get pointers to the beginning and end of the code block. */
+ addr = force_reg (Pmode, XEXP (m_tramp, 0));
+ end_addr = mips_force_binary (Pmode, PLUS, addr, GEN_INT (end_addr_offset));
+
+#define OP(X) gen_int_mode (X, SImode)
+
+ /* Build up the code in TRAMPOLINE. */
+ i = 0;
+ if (TARGET_USE_PIC_FN_ADDR_REG)
+ {
+ /* $25 contains the address of the trampoline. Emit code of the form:
+
+ l[wd] $1, target_function_offset($25)
+ l[wd] $static_chain, static_chain_offset($25)
+ jr $1
+ move $25,$1. */
+ trampoline[i++] = OP (MIPS_LOAD_PTR (AT_REGNUM,
+ target_function_offset,
+ PIC_FUNCTION_ADDR_REGNUM));
+ trampoline[i++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
+ static_chain_offset,
+ PIC_FUNCTION_ADDR_REGNUM));
+ trampoline[i++] = OP (MIPS_JR (AT_REGNUM));
+ trampoline[i++] = OP (MIPS_MOVE (PIC_FUNCTION_ADDR_REGNUM, AT_REGNUM));
+ }
+ else if (ptr_mode == DImode)
+ {
+ /* It's too cumbersome to create the full 64-bit address, so let's
+ instead use:
+
+ move $1, $31
+ bal 1f
+ nop
+ 1: l[wd] $25, target_function_offset - 12($31)
+ l[wd] $static_chain, static_chain_offset - 12($31)
+ jr $25
+ move $31, $1
+
+ where 12 is the offset of "1:" from the start of the code block. */
+ trampoline[i++] = OP (MIPS_MOVE (AT_REGNUM, RETURN_ADDR_REGNUM));
+ trampoline[i++] = OP (MIPS_BAL (1));
+ trampoline[i++] = OP (MIPS_NOP);
+ trampoline[i++] = OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM,
+ target_function_offset - 12,
+ RETURN_ADDR_REGNUM));
+ trampoline[i++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
+ static_chain_offset - 12,
+ RETURN_ADDR_REGNUM));
+ trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
+ trampoline[i++] = OP (MIPS_MOVE (RETURN_ADDR_REGNUM, AT_REGNUM));
+ }
+ else
+ {
+ /* If the target has load delays, emit:
+
+ lui $1, %hi(end_addr)
+ lw $25, %lo(end_addr + ...)($1)
+ lw $static_chain, %lo(end_addr + ...)($1)
+ jr $25
+ nop
+
+ Otherwise emit:
+
+ lui $1, %hi(end_addr)
+ lw $25, %lo(end_addr + ...)($1)
+ jr $25
+ lw $static_chain, %lo(end_addr + ...)($1). */
+
+ /* Split END_ADDR into %hi and %lo values. Trampolines are aligned
+ to 64 bits, so the %lo value will have the bottom 3 bits clear. */
+ high = expand_simple_binop (SImode, PLUS, end_addr, GEN_INT (0x8000),
+ NULL, false, OPTAB_WIDEN);
+ high = expand_simple_binop (SImode, LSHIFTRT, high, GEN_INT (16),
+ NULL, false, OPTAB_WIDEN);
+ low = convert_to_mode (SImode, gen_lowpart (HImode, end_addr), true);
+
+ /* Emit the LUI. */
+ opcode = OP (MIPS_LUI (AT_REGNUM, 0));
+ trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, high,
+ NULL, false, OPTAB_WIDEN);
+
+ /* Emit the load of the target function. */
+ opcode = OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM,
+ target_function_offset - end_addr_offset,
+ AT_REGNUM));
+ trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, low,
+ NULL, false, OPTAB_WIDEN);
+
+ /* Emit the JR here, if we can. */
+ if (!ISA_HAS_LOAD_DELAY)
+ trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
+
+ /* Emit the load of the static chain register. */
+ opcode = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
+ static_chain_offset - end_addr_offset,
+ AT_REGNUM));
+ trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, low,
+ NULL, false, OPTAB_WIDEN);
+
+ /* Emit the JR, if we couldn't above. */
+ if (ISA_HAS_LOAD_DELAY)
+ {
+ trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
+ trampoline[i++] = OP (MIPS_NOP);
+ }
+ }
+
+#undef OP
+
+ /* Copy the trampoline code. Leave any padding uninitialized. */
+ for (j = 0; j < i; j++)
+ {
+ mem = adjust_address (m_tramp, SImode, j * GET_MODE_SIZE (SImode));
+ mips_emit_move (mem, trampoline[j]);
+ }
+
+ /* Set up the static chain pointer field. */
+ mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
+ mips_emit_move (mem, chain_value);
+
+ /* Set up the target function field. */
+ mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
+ mips_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
+
+ /* Flush the code part of the trampoline. */
+ emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
+ emit_insn (gen_clear_cache (addr, end_addr));
+}
+
+/* Implement FUNCTION_PROFILER. */
+
+void mips_function_profiler (FILE *file)
+{
+ if (TARGET_MIPS16)
+ sorry ("mips16 function profiling");
+ if (TARGET_LONG_CALLS)
+ {
+ /* For TARGET_LONG_CALLS use $3 for the address of _mcount. */
+ if (Pmode == DImode)
+ fprintf (file, "\tdla\t%s,_mcount\n", reg_names[3]);
+ else
+ fprintf (file, "\tla\t%s,_mcount\n", reg_names[3]);
+ }
+ mips_push_asm_switch (&mips_noat);
+ fprintf (file, "\tmove\t%s,%s\t\t# save current return address\n",
+ reg_names[AT_REGNUM], reg_names[RETURN_ADDR_REGNUM]);
+ /* _mcount treats $2 as the static chain register. */
+ if (cfun->static_chain_decl != NULL)
+ fprintf (file, "\tmove\t%s,%s\n", reg_names[2],
+ reg_names[STATIC_CHAIN_REGNUM]);
+ if (TARGET_MCOUNT_RA_ADDRESS)
+ {
+ /* If TARGET_MCOUNT_RA_ADDRESS load $12 with the address of the
+ ra save location. */
+ if (cfun->machine->frame.ra_fp_offset == 0)
+ /* ra not saved, pass zero. */
+ fprintf (file, "\tmove\t%s,%s\n", reg_names[12], reg_names[0]);
+ else
+ fprintf (file, "\t%s\t%s," HOST_WIDE_INT_PRINT_DEC "(%s)\n",
+ Pmode == DImode ? "dla" : "la", reg_names[12],
+ cfun->machine->frame.ra_fp_offset,
+ reg_names[STACK_POINTER_REGNUM]);
+ }
+ if (!TARGET_NEWABI)
+ fprintf (file,
+ "\t%s\t%s,%s,%d\t\t# _mcount pops 2 words from stack\n",
+ TARGET_64BIT ? "dsubu" : "subu",
+ reg_names[STACK_POINTER_REGNUM],
+ reg_names[STACK_POINTER_REGNUM],
+ Pmode == DImode ? 16 : 8);
+
+ if (TARGET_LONG_CALLS)
+ fprintf (file, "\tjalr\t%s\n", reg_names[3]);
+ else
+ fprintf (file, "\tjal\t_mcount\n");
+ mips_pop_asm_switch (&mips_noat);
+ /* _mcount treats $2 as the static chain register. */
+ if (cfun->static_chain_decl != NULL)
+ fprintf (file, "\tmove\t%s,%s\n", reg_names[STATIC_CHAIN_REGNUM],
+ reg_names[2]);
+}
+
+/* Implement TARGET_SHIFT_TRUNCATION_MASK. We want to keep the default
+ behaviour of TARGET_SHIFT_TRUNCATION_MASK for non-vector modes even
+ when TARGET_LOONGSON_VECTORS is true. */
+
+static unsigned HOST_WIDE_INT
+mips_shift_truncation_mask (enum machine_mode mode)
+{
+ if (TARGET_LOONGSON_VECTORS && VECTOR_MODE_P (mode))
+ return 0;
+
+ return GET_MODE_BITSIZE (mode) - 1;
+}
+
+/* Implement TARGET_PREPARE_PCH_SAVE. */
+
+static void
+mips_prepare_pch_save (void)
+{
+ /* We are called in a context where the current MIPS16 vs. non-MIPS16
+ setting should be irrelevant. The question then is: which setting
+ makes most sense at load time?
+
+ The PCH is loaded before the first token is read. We should never
+ have switched into MIPS16 mode by that point, and thus should not
+ have populated mips16_globals. Nor can we load the entire contents
+ of mips16_globals from the PCH file, because mips16_globals contains
+ a combination of GGC and non-GGC data.
+
+ There is therefore no point in trying save the GGC part of
+ mips16_globals to the PCH file, or to preserve MIPS16ness across
+ the PCH save and load. The loading compiler would not have access
+ to the non-GGC parts of mips16_globals (either from the PCH file,
+ or from a copy that the loading compiler generated itself) and would
+ have to call target_reinit anyway.
+
+ It therefore seems best to switch back to non-MIPS16 mode at
+ save time, and to ensure that mips16_globals remains null after
+ a PCH load. */
+ mips_set_compression_mode (0);
+ mips16_globals = 0;
+}
+
+/* Generate or test for an insn that supports a constant permutation. */
+
+#define MAX_VECT_LEN 8
+
+struct expand_vec_perm_d
+{
+ rtx target, op0, op1;
+ unsigned char perm[MAX_VECT_LEN];
+ enum machine_mode vmode;
+ unsigned char nelt;
+ bool one_vector_p;
+ bool testing_p;
+};
+
+/* Construct (set target (vec_select op0 (parallel perm))) and
+ return true if that's a valid instruction in the active ISA. */
+
+static bool
+mips_expand_vselect (rtx target, rtx op0,
+ const unsigned char *perm, unsigned nelt)
+{
+ rtx rperm[MAX_VECT_LEN], x;
+ unsigned i;
+
+ for (i = 0; i < nelt; ++i)
+ rperm[i] = GEN_INT (perm[i]);
+
+ x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
+ x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
+ x = gen_rtx_SET (VOIDmode, target, x);
+
+ x = emit_insn (x);
+ if (recog_memoized (x) < 0)
+ {
+ remove_insn (x);
+ return false;
+ }
+ return true;
+}
+
+/* Similar, but generate a vec_concat from op0 and op1 as well. */
+
+static bool
+mips_expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
+ const unsigned char *perm, unsigned nelt)
+{
+ enum machine_mode v2mode;
+ rtx x;
+
+ v2mode = GET_MODE_2XWIDER_MODE (GET_MODE (op0));
+ x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
+ return mips_expand_vselect (target, x, perm, nelt);
+}
+
+/* Recognize patterns for even-odd extraction. */
+
+static bool
+mips_expand_vpc_loongson_even_odd (struct expand_vec_perm_d *d)
+{
+ unsigned i, odd, nelt = d->nelt;
+ rtx t0, t1, t2, t3;
+
+ if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
+ return false;
+ /* Even-odd for V2SI/V2SFmode is matched by interleave directly. */
+ if (nelt < 4)
+ return false;
+
+ odd = d->perm[0];
+ if (odd > 1)
+ return false;
+ for (i = 1; i < nelt; ++i)
+ if (d->perm[i] != i * 2 + odd)
+ return false;
+
+ if (d->testing_p)
+ return true;
+
+ /* We need 2*log2(N)-1 operations to achieve odd/even with interleave. */
+ t0 = gen_reg_rtx (d->vmode);
+ t1 = gen_reg_rtx (d->vmode);
+ switch (d->vmode)
+ {
+ case V4HImode:
+ emit_insn (gen_loongson_punpckhhw (t0, d->op0, d->op1));
+ emit_insn (gen_loongson_punpcklhw (t1, d->op0, d->op1));
+ if (odd)
+ emit_insn (gen_loongson_punpckhhw (d->target, t1, t0));
+ else
+ emit_insn (gen_loongson_punpcklhw (d->target, t1, t0));
+ break;
+
+ case V8QImode:
+ t2 = gen_reg_rtx (d->vmode);
+ t3 = gen_reg_rtx (d->vmode);
+ emit_insn (gen_loongson_punpckhbh (t0, d->op0, d->op1));
+ emit_insn (gen_loongson_punpcklbh (t1, d->op0, d->op1));
+ emit_insn (gen_loongson_punpckhbh (t2, t1, t0));
+ emit_insn (gen_loongson_punpcklbh (t3, t1, t0));
+ if (odd)
+ emit_insn (gen_loongson_punpckhbh (d->target, t3, t2));
+ else
+ emit_insn (gen_loongson_punpcklbh (d->target, t3, t2));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ return true;
+}
+
+/* Recognize patterns for the Loongson PSHUFH instruction. */
+
+static bool
+mips_expand_vpc_loongson_pshufh (struct expand_vec_perm_d *d)
+{
+ unsigned i, mask;
+ rtx rmask;
+
+ if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
+ return false;
+ if (d->vmode != V4HImode)
+ return false;
+ if (d->testing_p)
+ return true;
+
+ /* Convert the selector into the packed 8-bit form for pshufh. */
+ /* Recall that loongson is little-endian only. No big-endian
+ adjustment required. */
+ for (i = mask = 0; i < 4; i++)
+ mask |= (d->perm[i] & 3) << (i * 2);
+ rmask = force_reg (SImode, GEN_INT (mask));
+
+ if (d->one_vector_p)
+ emit_insn (gen_loongson_pshufh (d->target, d->op0, rmask));
+ else
+ {
+ rtx t0, t1, x, merge, rmerge[4];
+
+ t0 = gen_reg_rtx (V4HImode);
+ t1 = gen_reg_rtx (V4HImode);
+ emit_insn (gen_loongson_pshufh (t1, d->op1, rmask));
+ emit_insn (gen_loongson_pshufh (t0, d->op0, rmask));
+
+ for (i = 0; i < 4; ++i)
+ rmerge[i] = (d->perm[i] & 4 ? constm1_rtx : const0_rtx);
+ merge = gen_rtx_CONST_VECTOR (V4HImode, gen_rtvec_v (4, rmerge));
+ merge = force_reg (V4HImode, merge);
+
+ x = gen_rtx_AND (V4HImode, merge, t1);
+ emit_insn (gen_rtx_SET (VOIDmode, t1, x));
+
+ x = gen_rtx_NOT (V4HImode, merge);
+ x = gen_rtx_AND (V4HImode, x, t0);
+ emit_insn (gen_rtx_SET (VOIDmode, t0, x));
+
+ x = gen_rtx_IOR (V4HImode, t0, t1);
+ emit_insn (gen_rtx_SET (VOIDmode, d->target, x));
+ }
+
+ return true;
+}
+
+/* Recognize broadcast patterns for the Loongson. */
+
+static bool
+mips_expand_vpc_loongson_bcast (struct expand_vec_perm_d *d)
+{
+ unsigned i, elt;
+ rtx t0, t1;
+
+ if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
+ return false;
+ /* Note that we've already matched V2SI via punpck and V4HI via pshufh. */
+ if (d->vmode != V8QImode)
+ return false;
+ if (!d->one_vector_p)
+ return false;
+
+ elt = d->perm[0];
+ for (i = 1; i < 8; ++i)
+ if (d->perm[i] != elt)
+ return false;
+
+ if (d->testing_p)
+ return true;
+
+ /* With one interleave we put two of the desired element adjacent. */
+ t0 = gen_reg_rtx (V8QImode);
+ if (elt < 4)
+ emit_insn (gen_loongson_punpcklbh (t0, d->op0, d->op0));
+ else
+ emit_insn (gen_loongson_punpckhbh (t0, d->op0, d->op0));
+
+ /* Shuffle that one HImode element into all locations. */
+ elt &= 3;
+ elt *= 0x55;
+ t1 = gen_reg_rtx (V4HImode);
+ emit_insn (gen_loongson_pshufh (t1, gen_lowpart (V4HImode, t0),
+ force_reg (SImode, GEN_INT (elt))));
+
+ emit_move_insn (d->target, gen_lowpart (V8QImode, t1));
+ return true;
+}
+
+static bool
+mips_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
+{
+ unsigned int i, nelt = d->nelt;
+ unsigned char perm2[MAX_VECT_LEN];
+
+ if (d->one_vector_p)
+ {
+ /* Try interleave with alternating operands. */
+ memcpy (perm2, d->perm, sizeof(perm2));
+ for (i = 1; i < nelt; i += 2)
+ perm2[i] += nelt;
+ if (mips_expand_vselect_vconcat (d->target, d->op0, d->op1, perm2, nelt))
+ return true;
+ }
+ else
+ {
+ if (mips_expand_vselect_vconcat (d->target, d->op0, d->op1,
+ d->perm, nelt))
+ return true;
+
+ /* Try again with swapped operands. */
+ for (i = 0; i < nelt; ++i)
+ perm2[i] = (d->perm[i] + nelt) & (2 * nelt - 1);
+ if (mips_expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
+ return true;
+ }
+
+ if (mips_expand_vpc_loongson_even_odd (d))
+ return true;
+ if (mips_expand_vpc_loongson_pshufh (d))
+ return true;
+ if (mips_expand_vpc_loongson_bcast (d))
+ return true;
+ return false;
+}
+
+/* Expand a vec_perm_const pattern. */
+
+bool
+mips_expand_vec_perm_const (rtx operands[4])
+{
+ struct expand_vec_perm_d d;
+ int i, nelt, which;
+ unsigned char orig_perm[MAX_VECT_LEN];
+ rtx sel;
+ bool ok;
+
+ d.target = operands[0];
+ d.op0 = operands[1];
+ d.op1 = operands[2];
+ sel = operands[3];
+
+ d.vmode = GET_MODE (d.target);
+ gcc_assert (VECTOR_MODE_P (d.vmode));
+ d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
+ d.testing_p = false;
+
+ for (i = which = 0; i < nelt; ++i)
+ {
+ rtx e = XVECEXP (sel, 0, i);
+ int ei = INTVAL (e) & (2 * nelt - 1);
+ which |= (ei < nelt ? 1 : 2);
+ orig_perm[i] = ei;
+ }
+ memcpy (d.perm, orig_perm, MAX_VECT_LEN);
+
+ switch (which)
+ {
+ default:
+ gcc_unreachable();
+
+ case 3:
+ d.one_vector_p = false;
+ if (!rtx_equal_p (d.op0, d.op1))
+ break;
+ /* FALLTHRU */
+
+ case 2:
+ for (i = 0; i < nelt; ++i)
+ d.perm[i] &= nelt - 1;
+ d.op0 = d.op1;
+ d.one_vector_p = true;
+ break;
+
+ case 1:
+ d.op1 = d.op0;
+ d.one_vector_p = true;
+ break;
+ }
+
+ ok = mips_expand_vec_perm_const_1 (&d);
+
+ /* If we were given a two-vector permutation which just happened to
+ have both input vectors equal, we folded this into a one-vector
+ permutation. There are several loongson patterns that are matched
+ via direct vec_select+vec_concat expansion, but we do not have
+ support in mips_expand_vec_perm_const_1 to guess the adjustment
+ that should be made for a single operand. Just try again with
+ the original permutation. */
+ if (!ok && which == 3)
+ {
+ d.op0 = operands[1];
+ d.op1 = operands[2];
+ d.one_vector_p = false;
+ memcpy (d.perm, orig_perm, MAX_VECT_LEN);
+ ok = mips_expand_vec_perm_const_1 (&d);
+ }
+
+ return ok;
+}
+
+/* Implement TARGET_VECTORIZE_VEC_PERM_CONST_OK. */
+
+static bool
+mips_vectorize_vec_perm_const_ok (enum machine_mode vmode,
+ const unsigned char *sel)
+{
+ struct expand_vec_perm_d d;
+ unsigned int i, nelt, which;
+ bool ret;
+
+ d.vmode = vmode;
+ d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
+ d.testing_p = true;
+ memcpy (d.perm, sel, nelt);
+
+ /* Categorize the set of elements in the selector. */
+ for (i = which = 0; i < nelt; ++i)
+ {
+ unsigned char e = d.perm[i];
+ gcc_assert (e < 2 * nelt);
+ which |= (e < nelt ? 1 : 2);
+ }
+
+ /* For all elements from second vector, fold the elements to first. */
+ if (which == 2)
+ for (i = 0; i < nelt; ++i)
+ d.perm[i] -= nelt;
+
+ /* Check whether the mask can be applied to the vector type. */
+ d.one_vector_p = (which != 3);
+
+ d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
+ d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
+ if (!d.one_vector_p)
+ d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
+
+ start_sequence ();
+ ret = mips_expand_vec_perm_const_1 (&d);
+ end_sequence ();
+
+ return ret;
+}
+
+/* Expand an integral vector unpack operation. */
+
+void
+mips_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p)
+{
+ enum machine_mode imode = GET_MODE (operands[1]);
+ rtx (*unpack) (rtx, rtx, rtx);
+ rtx (*cmpgt) (rtx, rtx, rtx);
+ rtx tmp, dest, zero;
+
+ switch (imode)
+ {
+ case V8QImode:
+ if (high_p)
+ unpack = gen_loongson_punpckhbh;
+ else
+ unpack = gen_loongson_punpcklbh;
+ cmpgt = gen_loongson_pcmpgtb;
+ break;
+ case V4HImode:
+ if (high_p)
+ unpack = gen_loongson_punpckhhw;
+ else
+ unpack = gen_loongson_punpcklhw;
+ cmpgt = gen_loongson_pcmpgth;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ zero = force_reg (imode, CONST0_RTX (imode));
+ if (unsigned_p)
+ tmp = zero;
+ else
+ {
+ tmp = gen_reg_rtx (imode);
+ emit_insn (cmpgt (tmp, zero, operands[1]));
+ }
+
+ dest = gen_reg_rtx (imode);
+ emit_insn (unpack (dest, operands[1], tmp));
+
+ emit_move_insn (operands[0], gen_lowpart (GET_MODE (operands[0]), dest));
+}
+
+/* A subroutine of mips_expand_vec_init, match constant vector elements. */
+
+static inline bool
+mips_constant_elt_p (rtx x)
+{
+ return CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE;
+}
+
+/* A subroutine of mips_expand_vec_init, expand via broadcast. */
+
+static void
+mips_expand_vi_broadcast (enum machine_mode vmode, rtx target, rtx elt)
+{
+ struct expand_vec_perm_d d;
+ rtx t1;
+ bool ok;
+
+ if (elt != const0_rtx)
+ elt = force_reg (GET_MODE_INNER (vmode), elt);
+ if (REG_P (elt))
+ elt = gen_lowpart (DImode, elt);
+
+ t1 = gen_reg_rtx (vmode);
+ switch (vmode)
+ {
+ case V8QImode:
+ emit_insn (gen_loongson_vec_init1_v8qi (t1, elt));
+ break;
+ case V4HImode:
+ emit_insn (gen_loongson_vec_init1_v4hi (t1, elt));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ memset (&d, 0, sizeof (d));
+ d.target = target;
+ d.op0 = t1;
+ d.op1 = t1;
+ d.vmode = vmode;
+ d.nelt = GET_MODE_NUNITS (vmode);
+ d.one_vector_p = true;
+
+ ok = mips_expand_vec_perm_const_1 (&d);
+ gcc_assert (ok);
+}
+
+/* A subroutine of mips_expand_vec_init, replacing all of the non-constant
+ elements of VALS with zeros, copy the constant vector to TARGET. */
+
+static void
+mips_expand_vi_constant (enum machine_mode vmode, unsigned nelt,
+ rtx target, rtx vals)
+{
+ rtvec vec = shallow_copy_rtvec (XVEC (vals, 0));
+ unsigned i;
+
+ for (i = 0; i < nelt; ++i)
+ {
+ if (!mips_constant_elt_p (RTVEC_ELT (vec, i)))
+ RTVEC_ELT (vec, i) = const0_rtx;
+ }
+
+ emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, vec));
+}
+
+
+/* A subroutine of mips_expand_vec_init, expand via pinsrh. */
+
+static void
+mips_expand_vi_loongson_one_pinsrh (rtx target, rtx vals, unsigned one_var)
+{
+ mips_expand_vi_constant (V4HImode, 4, target, vals);
+
+ emit_insn (gen_vec_setv4hi (target, target, XVECEXP (vals, 0, one_var),
+ GEN_INT (one_var)));
+}
+
+/* A subroutine of mips_expand_vec_init, expand anything via memory. */
+
+static void
+mips_expand_vi_general (enum machine_mode vmode, enum machine_mode imode,
+ unsigned nelt, unsigned nvar, rtx target, rtx vals)
+{
+ rtx mem = assign_stack_temp (vmode, GET_MODE_SIZE (vmode));
+ unsigned int i, isize = GET_MODE_SIZE (imode);
+
+ if (nvar < nelt)
+ mips_expand_vi_constant (vmode, nelt, mem, vals);
+
+ for (i = 0; i < nelt; ++i)
+ {
+ rtx x = XVECEXP (vals, 0, i);
+ if (!mips_constant_elt_p (x))
+ emit_move_insn (adjust_address (mem, imode, i * isize), x);
+ }
+
+ emit_move_insn (target, mem);
+}
+
+/* Expand a vector initialization. */
+
+void
+mips_expand_vector_init (rtx target, rtx vals)
+{
+ enum machine_mode vmode = GET_MODE (target);
+ enum machine_mode imode = GET_MODE_INNER (vmode);
+ unsigned i, nelt = GET_MODE_NUNITS (vmode);
+ unsigned nvar = 0, one_var = -1u;
+ bool all_same = true;
+ rtx x;
+
+ for (i = 0; i < nelt; ++i)
+ {
+ x = XVECEXP (vals, 0, i);
+ if (!mips_constant_elt_p (x))
+ nvar++, one_var = i;
+ if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
+ all_same = false;
+ }
+
+ /* Load constants from the pool, or whatever's handy. */
+ if (nvar == 0)
+ {
+ emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)));
+ return;
+ }
+
+ /* For two-part initialization, always use CONCAT. */
+ if (nelt == 2)
+ {
+ rtx op0 = force_reg (imode, XVECEXP (vals, 0, 0));
+ rtx op1 = force_reg (imode, XVECEXP (vals, 0, 1));
+ x = gen_rtx_VEC_CONCAT (vmode, op0, op1);
+ emit_insn (gen_rtx_SET (VOIDmode, target, x));
+ return;
+ }
+
+ /* Loongson is the only cpu with vectors with more elements. */
+ gcc_assert (TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS);
+
+ /* If all values are identical, broadcast the value. */
+ if (all_same)
+ {
+ mips_expand_vi_broadcast (vmode, target, XVECEXP (vals, 0, 0));
+ return;
+ }
+
+ /* If we've only got one non-variable V4HImode, use PINSRH. */
+ if (nvar == 1 && vmode == V4HImode)
+ {
+ mips_expand_vi_loongson_one_pinsrh (target, vals, one_var);
+ return;
+ }
+
+ mips_expand_vi_general (vmode, imode, nelt, nvar, target, vals);
+}
+
+/* Expand a vector reduction. */
+
+void
+mips_expand_vec_reduc (rtx target, rtx in, rtx (*gen)(rtx, rtx, rtx))
+{
+ enum machine_mode vmode = GET_MODE (in);
+ unsigned char perm2[2];
+ rtx last, next, fold, x;
+ bool ok;
+
+ last = in;
+ fold = gen_reg_rtx (vmode);
+ switch (vmode)
+ {
+ case V2SFmode:
+ /* Use PUL/PLU to produce { L, H } op { H, L }.
+ By reversing the pair order, rather than a pure interleave high,
+ we avoid erroneous exceptional conditions that we might otherwise
+ produce from the computation of H op H. */
+ perm2[0] = 1;
+ perm2[1] = 2;
+ ok = mips_expand_vselect_vconcat (fold, last, last, perm2, 2);
+ gcc_assert (ok);
+ break;
+
+ case V2SImode:
+ /* Use interleave to produce { H, L } op { H, H }. */
+ emit_insn (gen_loongson_punpckhwd (fold, last, last));
+ break;
+
+ case V4HImode:
+ /* Perform the first reduction with interleave,
+ and subsequent reductions with shifts. */
+ emit_insn (gen_loongson_punpckhwd_hi (fold, last, last));
+
+ next = gen_reg_rtx (vmode);
+ emit_insn (gen (next, last, fold));
+ last = next;
+
+ fold = gen_reg_rtx (vmode);
+ x = force_reg (SImode, GEN_INT (16));
+ emit_insn (gen_vec_shr_v4hi (fold, last, x));
+ break;
+
+ case V8QImode:
+ emit_insn (gen_loongson_punpckhwd_qi (fold, last, last));
+
+ next = gen_reg_rtx (vmode);
+ emit_insn (gen (next, last, fold));
+ last = next;
+
+ fold = gen_reg_rtx (vmode);
+ x = force_reg (SImode, GEN_INT (16));
+ emit_insn (gen_vec_shr_v8qi (fold, last, x));
+
+ next = gen_reg_rtx (vmode);
+ emit_insn (gen (next, last, fold));
+ last = next;
+
+ fold = gen_reg_rtx (vmode);
+ x = force_reg (SImode, GEN_INT (8));
+ emit_insn (gen_vec_shr_v8qi (fold, last, x));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ emit_insn (gen (target, last, fold));
+}
+
+/* Expand a vector minimum/maximum. */
+
+void
+mips_expand_vec_minmax (rtx target, rtx op0, rtx op1,
+ rtx (*cmp) (rtx, rtx, rtx), bool min_p)
+{
+ enum machine_mode vmode = GET_MODE (target);
+ rtx tc, t0, t1, x;
+
+ tc = gen_reg_rtx (vmode);
+ t0 = gen_reg_rtx (vmode);
+ t1 = gen_reg_rtx (vmode);
+
+ /* op0 > op1 */
+ emit_insn (cmp (tc, op0, op1));
+
+ x = gen_rtx_AND (vmode, tc, (min_p ? op1 : op0));
+ emit_insn (gen_rtx_SET (VOIDmode, t0, x));
+
+ x = gen_rtx_NOT (vmode, tc);
+ x = gen_rtx_AND (vmode, x, (min_p ? op0 : op1));
+ emit_insn (gen_rtx_SET (VOIDmode, t1, x));
+
+ x = gen_rtx_IOR (vmode, t0, t1);
+ emit_insn (gen_rtx_SET (VOIDmode, target, x));
+}
+
+/* Implement TARGET_CASE_VALUES_THRESHOLD. */
+
+unsigned int
+mips_case_values_threshold (void)
+{
+ /* In MIPS16 mode using a larger case threshold generates smaller code. */
+ if (TARGET_MIPS16 && optimize_size)
+ return 10;
+ else
+ return default_case_values_threshold ();
+}
+
+/* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
+
+static void
+mips_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
+{
+ if (!TARGET_HARD_FLOAT_ABI)
+ return;
+ tree exceptions_var = create_tmp_var (MIPS_ATYPE_USI, NULL);
+ tree fcsr_orig_var = create_tmp_var (MIPS_ATYPE_USI, NULL);
+ tree fcsr_mod_var = create_tmp_var (MIPS_ATYPE_USI, NULL);
+ tree get_fcsr = mips_builtin_decls[MIPS_GET_FCSR];
+ tree set_fcsr = mips_builtin_decls[MIPS_SET_FCSR];
+ tree get_fcsr_hold_call = build_call_expr (get_fcsr, 0);
+ tree hold_assign_orig = build2 (MODIFY_EXPR, MIPS_ATYPE_USI,
+ fcsr_orig_var, get_fcsr_hold_call);
+ tree hold_mod_val = build2 (BIT_AND_EXPR, MIPS_ATYPE_USI, fcsr_orig_var,
+ build_int_cst (MIPS_ATYPE_USI, 0xfffff003));
+ tree hold_assign_mod = build2 (MODIFY_EXPR, MIPS_ATYPE_USI,
+ fcsr_mod_var, hold_mod_val);
+ tree set_fcsr_hold_call = build_call_expr (set_fcsr, 1, fcsr_mod_var);
+ tree hold_all = build2 (COMPOUND_EXPR, MIPS_ATYPE_USI,
+ hold_assign_orig, hold_assign_mod);
+ *hold = build2 (COMPOUND_EXPR, void_type_node, hold_all,
+ set_fcsr_hold_call);
+
+ *clear = build_call_expr (set_fcsr, 1, fcsr_mod_var);
+
+ tree get_fcsr_update_call = build_call_expr (get_fcsr, 0);
+ *update = build2 (MODIFY_EXPR, MIPS_ATYPE_USI,
+ exceptions_var, get_fcsr_update_call);
+ tree set_fcsr_update_call = build_call_expr (set_fcsr, 1, fcsr_orig_var);
+ *update = build2 (COMPOUND_EXPR, void_type_node, *update,
+ set_fcsr_update_call);
+ tree atomic_feraiseexcept
+ = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
+ tree int_exceptions_var = fold_convert (integer_type_node,
+ exceptions_var);
+ tree atomic_feraiseexcept_call = build_call_expr (atomic_feraiseexcept,
+ 1, int_exceptions_var);
+ *update = build2 (COMPOUND_EXPR, void_type_node, *update,
+ atomic_feraiseexcept_call);
+}
+
+/* Initialize the GCC target structure. */
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE mips_option_override
+
+#undef TARGET_LEGITIMIZE_ADDRESS
+#define TARGET_LEGITIMIZE_ADDRESS mips_legitimize_address
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
+#undef TARGET_ASM_FUNCTION_EPILOGUE
+#define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
+#undef TARGET_ASM_FUNCTION_RODATA_SECTION
+#define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
+
+#undef TARGET_SCHED_INIT
+#define TARGET_SCHED_INIT mips_sched_init
+#undef TARGET_SCHED_REORDER
+#define TARGET_SCHED_REORDER mips_sched_reorder
+#undef TARGET_SCHED_REORDER2
+#define TARGET_SCHED_REORDER2 mips_sched_reorder2
+#undef TARGET_SCHED_VARIABLE_ISSUE
+#define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
+#undef TARGET_SCHED_ADJUST_COST
+#define TARGET_SCHED_ADJUST_COST mips_adjust_cost
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE mips_issue_rate
+#undef TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN
+#define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN mips_init_dfa_post_cycle_insn
+#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
+#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE mips_dfa_post_advance_cycle
+#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
+#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
+ mips_multipass_dfa_lookahead
+#undef TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P
+#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \
+ mips_small_register_classes_for_mode_p
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
+
+#undef TARGET_INSERT_ATTRIBUTES
+#define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
+#undef TARGET_MERGE_DECL_ATTRIBUTES
+#define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
+#undef TARGET_CAN_INLINE_P
+#define TARGET_CAN_INLINE_P mips_can_inline_p
+#undef TARGET_SET_CURRENT_FUNCTION
+#define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
+
+#undef TARGET_VALID_POINTER_MODE
+#define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
+#undef TARGET_REGISTER_MOVE_COST
+#define TARGET_REGISTER_MOVE_COST mips_register_move_cost
+#undef TARGET_MEMORY_MOVE_COST
+#define TARGET_MEMORY_MOVE_COST mips_memory_move_cost
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS mips_rtx_costs
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST mips_address_cost
+
+#undef TARGET_IN_SMALL_DATA_P
+#define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
+
+#undef TARGET_MACHINE_DEPENDENT_REORG
+#define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
+
+#undef TARGET_PREFERRED_RELOAD_CLASS
+#define TARGET_PREFERRED_RELOAD_CLASS mips_preferred_reload_class
+
+#undef TARGET_EXPAND_TO_RTL_HOOK
+#define TARGET_EXPAND_TO_RTL_HOOK mips_expand_to_rtl_hook
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START mips_file_start
+#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
+#undef TARGET_ASM_CODE_END
+#define TARGET_ASM_CODE_END mips_code_end
+
+#undef TARGET_INIT_LIBFUNCS
+#define TARGET_INIT_LIBFUNCS mips_init_libfuncs
+
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
+#undef TARGET_EXPAND_BUILTIN_VA_START
+#define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
+#undef TARGET_GIMPLIFY_VA_ARG_EXPR
+#define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
+
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
+
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE mips_function_value
+#undef TARGET_LIBCALL_VALUE
+#define TARGET_LIBCALL_VALUE mips_libcall_value
+#undef TARGET_FUNCTION_VALUE_REGNO_P
+#define TARGET_FUNCTION_VALUE_REGNO_P mips_function_value_regno_p
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY mips_return_in_memory
+#undef TARGET_RETURN_IN_MSB
+#define TARGET_RETURN_IN_MSB mips_return_in_msb
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
+
+#undef TARGET_PRINT_OPERAND
+#define TARGET_PRINT_OPERAND mips_print_operand
+#undef TARGET_PRINT_OPERAND_ADDRESS
+#define TARGET_PRINT_OPERAND_ADDRESS mips_print_operand_address
+#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
+#define TARGET_PRINT_OPERAND_PUNCT_VALID_P mips_print_operand_punct_valid_p
+
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
+#undef TARGET_STRICT_ARGUMENT_NAMING
+#define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
+#undef TARGET_CALLEE_COPIES
+#define TARGET_CALLEE_COPIES mips_callee_copies
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG mips_function_arg
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE mips_function_arg_advance
+#undef TARGET_FUNCTION_ARG_BOUNDARY
+#define TARGET_FUNCTION_ARG_BOUNDARY mips_function_arg_boundary
+
+#undef TARGET_MODE_REP_EXTENDED
+#define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
+
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
+
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
+
+#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
+#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE mips_preferred_simd_mode
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS mips_init_builtins
+#undef TARGET_BUILTIN_DECL
+#define TARGET_BUILTIN_DECL mips_builtin_decl
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN mips_expand_builtin
+
+#undef TARGET_HAVE_TLS
+#define TARGET_HAVE_TLS HAVE_AS_TLS
+
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
+
+#undef TARGET_LEGITIMATE_CONSTANT_P
+#define TARGET_LEGITIMATE_CONSTANT_P mips_legitimate_constant_p
+
+#undef TARGET_ENCODE_SECTION_INFO
+#define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
+
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE mips_attribute_table
+/* All our function attributes are related to how out-of-line copies should
+ be compiled or called. They don't in themselves prevent inlining. */
+#undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
+#define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
+
+#undef TARGET_EXTRA_LIVE_ON_ENTRY
+#define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
+
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
+#undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
+#define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
+
+#undef TARGET_COMP_TYPE_ATTRIBUTES
+#define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
+
+#ifdef HAVE_AS_DTPRELWORD
+#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
+#define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
+#endif
+#undef TARGET_DWARF_REGISTER_SPAN
+#define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
+
+#undef TARGET_ASM_FINAL_POSTSCAN_INSN
+#define TARGET_ASM_FINAL_POSTSCAN_INSN mips_final_postscan_insn
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P mips_legitimate_address_p
+
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED mips_frame_pointer_required
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE mips_can_eliminate
+
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
+#define TARGET_CONDITIONAL_REGISTER_USAGE mips_conditional_register_usage
+
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT mips_trampoline_init
+
+#undef TARGET_ASM_OUTPUT_SOURCE_FILENAME
+#define TARGET_ASM_OUTPUT_SOURCE_FILENAME mips_output_filename
+
+#undef TARGET_SHIFT_TRUNCATION_MASK
+#define TARGET_SHIFT_TRUNCATION_MASK mips_shift_truncation_mask
+
+#undef TARGET_PREPARE_PCH_SAVE
+#define TARGET_PREPARE_PCH_SAVE mips_prepare_pch_save
+
+#undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
+#define TARGET_VECTORIZE_VEC_PERM_CONST_OK mips_vectorize_vec_perm_const_ok
+
+#undef TARGET_CASE_VALUES_THRESHOLD
+#define TARGET_CASE_VALUES_THRESHOLD mips_case_values_threshold
+
+#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
+#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV mips_atomic_assign_expand_fenv
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+#include "gt-mips.h"
diff --git a/gcc-4.9/gcc/config/mips/mips.h b/gcc-4.9/gcc/config/mips/mips.h
new file mode 100644
index 000000000..a786d4ce3
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips.h
@@ -0,0 +1,3005 @@
+/* Definitions of target machine for GNU compiler. MIPS version.
+ Copyright (C) 1989-2014 Free Software Foundation, Inc.
+ Contributed by A. Lichnewsky (lich@inria.inria.fr).
+ Changed by Michael Meissner (meissner@osf.org).
+ 64-bit r4000 support by Ian Lance Taylor (ian@cygnus.com) and
+ Brendan Eich (brendan@microunity.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#include "config/vxworks-dummy.h"
+
+#ifdef GENERATOR_FILE
+/* This is used in some insn conditions, so needs to be declared, but
+ does not need to be defined. */
+extern int target_flags_explicit;
+#endif
+
+/* MIPS external variables defined in mips.c. */
+
+/* Which ABI to use. ABI_32 (original 32, or o32), ABI_N32 (n32),
+ ABI_64 (n64) are all defined by SGI. ABI_O64 is o32 extended
+ to work on a 64-bit machine. */
+
+#define ABI_32 0
+#define ABI_N32 1
+#define ABI_64 2
+#define ABI_EABI 3
+#define ABI_O64 4
+
+/* Masks that affect tuning.
+
+ PTF_AVOID_BRANCHLIKELY
+ Set if it is usually not profitable to use branch-likely instructions
+ for this target, typically because the branches are always predicted
+ taken and so incur a large overhead when not taken.
+
+ PTF_AVOID_IMADD
+ Set if it is usually not profitable to use the integer MADD or MSUB
+ instructions because of the overhead of getting the result out of
+ the HI/LO registers. */
+
+#define PTF_AVOID_BRANCHLIKELY 0x1
+#define PTF_AVOID_IMADD 0x2
+
+/* Information about one recognized processor. Defined here for the
+ benefit of TARGET_CPU_CPP_BUILTINS. */
+struct mips_cpu_info {
+ /* The 'canonical' name of the processor as far as GCC is concerned.
+ It's typically a manufacturer's prefix followed by a numerical
+ designation. It should be lowercase. */
+ const char *name;
+
+ /* The internal processor number that most closely matches this
+ entry. Several processors can have the same value, if there's no
+ difference between them from GCC's point of view. */
+ enum processor cpu;
+
+ /* The ISA level that the processor implements. */
+ int isa;
+
+ /* A mask of PTF_* values. */
+ unsigned int tune_flags;
+};
+
+#include "config/mips/mips-opts.h"
+
+/* Macros to silence warnings about numbers being signed in traditional
+ C and unsigned in ISO C when compiled on 32-bit hosts. */
+
+#define BITMASK_HIGH (((unsigned long)1) << 31) /* 0x80000000 */
+#define BITMASK_UPPER16 ((unsigned long)0xffff << 16) /* 0xffff0000 */
+#define BITMASK_LOWER16 ((unsigned long)0xffff) /* 0x0000ffff */
+
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+
+/* True if we are generating position-independent VxWorks RTP code. */
+#define TARGET_RTP_PIC (TARGET_VXWORKS_RTP && flag_pic)
+
+/* True if the output file is marked as ".abicalls; .option pic0"
+ (-call_nonpic). */
+#define TARGET_ABICALLS_PIC0 \
+ (TARGET_ABSOLUTE_ABICALLS && TARGET_PLT)
+
+/* True if the output file is marked as ".abicalls; .option pic2" (-KPIC). */
+#define TARGET_ABICALLS_PIC2 \
+ (TARGET_ABICALLS && !TARGET_ABICALLS_PIC0)
+
+/* True if the call patterns should be split into a jalr followed by
+ an instruction to restore $gp. It is only safe to split the load
+ from the call when every use of $gp is explicit.
+
+ See mips_must_initialize_gp_p for details about how we manage the
+ global pointer. */
+
+#define TARGET_SPLIT_CALLS \
+ (TARGET_EXPLICIT_RELOCS && TARGET_CALL_CLOBBERED_GP && epilogue_completed)
+
+/* True if we're generating a form of -mabicalls in which we can use
+ operators like %hi and %lo to refer to locally-binding symbols.
+ We can only do this for -mno-shared, and only then if we can use
+ relocation operations instead of assembly macros. It isn't really
+ worth using absolute sequences for 64-bit symbols because GOT
+ accesses are so much shorter. */
+
+#define TARGET_ABSOLUTE_ABICALLS \
+ (TARGET_ABICALLS \
+ && !TARGET_SHARED \
+ && TARGET_EXPLICIT_RELOCS \
+ && !ABI_HAS_64BIT_SYMBOLS)
+
+/* True if we can optimize sibling calls. For simplicity, we only
+ handle cases in which call_insn_operand will reject invalid
+ sibcall addresses. There are two cases in which this isn't true:
+
+ - TARGET_MIPS16. call_insn_operand accepts constant addresses
+ but there is no direct jump instruction. It isn't worth
+ using sibling calls in this case anyway; they would usually
+ be longer than normal calls.
+
+ - TARGET_USE_GOT && !TARGET_EXPLICIT_RELOCS. call_insn_operand
+ accepts global constants, but all sibcalls must be indirect. */
+#define TARGET_SIBCALLS \
+ (!TARGET_MIPS16 && (!TARGET_USE_GOT || TARGET_EXPLICIT_RELOCS))
+
+/* True if we need to use a global offset table to access some symbols. */
+#define TARGET_USE_GOT (TARGET_ABICALLS || TARGET_RTP_PIC)
+
+/* True if TARGET_USE_GOT and if $gp is a call-clobbered register. */
+#define TARGET_CALL_CLOBBERED_GP (TARGET_ABICALLS && TARGET_OLDABI)
+
+/* True if TARGET_USE_GOT and if $gp is a call-saved register. */
+#define TARGET_CALL_SAVED_GP (TARGET_USE_GOT && !TARGET_CALL_CLOBBERED_GP)
+
+/* True if we should use .cprestore to store to the cprestore slot.
+
+ We continue to use .cprestore for explicit-reloc code so that JALs
+ inside inline asms will work correctly. */
+#define TARGET_CPRESTORE_DIRECTIVE \
+ (TARGET_ABICALLS_PIC2 && !TARGET_MIPS16)
+
+/* True if we can use the J and JAL instructions. */
+#define TARGET_ABSOLUTE_JUMPS \
+ (!flag_pic || TARGET_ABSOLUTE_ABICALLS)
+
+/* True if indirect calls must use register class PIC_FN_ADDR_REG.
+ This is true for both the PIC and non-PIC VxWorks RTP modes. */
+#define TARGET_USE_PIC_FN_ADDR_REG (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
+
+/* True if .gpword or .gpdword should be used for switch tables. */
+#define TARGET_GPWORD \
+ (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
+
+/* True if the output must have a writable .eh_frame.
+ See ASM_PREFERRED_EH_DATA_FORMAT for details. */
+#ifdef HAVE_LD_PERSONALITY_RELAXATION
+#define TARGET_WRITABLE_EH_FRAME 0
+#else
+#define TARGET_WRITABLE_EH_FRAME (flag_pic && TARGET_SHARED)
+#endif
+
+/* Test the assembler to set ISA_HAS_DSP_MULT to DSP Rev 1 or 2. */
+#ifdef HAVE_AS_DSPR1_MULT
+#define ISA_HAS_DSP_MULT ISA_HAS_DSP
+#else
+#define ISA_HAS_DSP_MULT ISA_HAS_DSPR2
+#endif
+
+/* The ISA compression flags that are currently in effect. */
+#define TARGET_COMPRESSION (target_flags & (MASK_MIPS16 | MASK_MICROMIPS))
+
+/* Generate mips16 code */
+#define TARGET_MIPS16 ((target_flags & MASK_MIPS16) != 0)
+/* Generate mips16e code. Default 16bit ASE for mips32* and mips64* */
+#define GENERATE_MIPS16E (TARGET_MIPS16 && mips_isa >= 32)
+/* Generate mips16e register save/restore sequences. */
+#define GENERATE_MIPS16E_SAVE_RESTORE (GENERATE_MIPS16E && mips_abi == ABI_32)
+
+/* True if we're generating a form of MIPS16 code in which general
+ text loads are allowed. */
+#define TARGET_MIPS16_TEXT_LOADS \
+ (TARGET_MIPS16 && mips_code_readable == CODE_READABLE_YES)
+
+/* True if we're generating a form of MIPS16 code in which PC-relative
+ loads are allowed. */
+#define TARGET_MIPS16_PCREL_LOADS \
+ (TARGET_MIPS16 && mips_code_readable >= CODE_READABLE_PCREL)
+
+/* Generic ISA defines. */
+#define ISA_MIPS1 (mips_isa == 1)
+#define ISA_MIPS2 (mips_isa == 2)
+#define ISA_MIPS3 (mips_isa == 3)
+#define ISA_MIPS4 (mips_isa == 4)
+#define ISA_MIPS32 (mips_isa == 32)
+#define ISA_MIPS32R2 (mips_isa == 33)
+#define ISA_MIPS64 (mips_isa == 64)
+#define ISA_MIPS64R2 (mips_isa == 65)
+
+/* Architecture target defines. */
+#define TARGET_LOONGSON_2E (mips_arch == PROCESSOR_LOONGSON_2E)
+#define TARGET_LOONGSON_2F (mips_arch == PROCESSOR_LOONGSON_2F)
+#define TARGET_LOONGSON_2EF (TARGET_LOONGSON_2E || TARGET_LOONGSON_2F)
+#define TARGET_LOONGSON_3A (mips_arch == PROCESSOR_LOONGSON_3A)
+#define TARGET_MIPS3900 (mips_arch == PROCESSOR_R3900)
+#define TARGET_MIPS4000 (mips_arch == PROCESSOR_R4000)
+#define TARGET_MIPS4120 (mips_arch == PROCESSOR_R4120)
+#define TARGET_MIPS4130 (mips_arch == PROCESSOR_R4130)
+#define TARGET_MIPS5400 (mips_arch == PROCESSOR_R5400)
+#define TARGET_MIPS5500 (mips_arch == PROCESSOR_R5500)
+#define TARGET_MIPS5900 (mips_arch == PROCESSOR_R5900)
+#define TARGET_MIPS7000 (mips_arch == PROCESSOR_R7000)
+#define TARGET_MIPS9000 (mips_arch == PROCESSOR_R9000)
+#define TARGET_OCTEON (mips_arch == PROCESSOR_OCTEON \
+ || mips_arch == PROCESSOR_OCTEON2)
+#define TARGET_OCTEON2 (mips_arch == PROCESSOR_OCTEON2)
+#define TARGET_SB1 (mips_arch == PROCESSOR_SB1 \
+ || mips_arch == PROCESSOR_SB1A)
+#define TARGET_SR71K (mips_arch == PROCESSOR_SR71000)
+#define TARGET_XLP (mips_arch == PROCESSOR_XLP)
+
+/* Scheduling target defines. */
+#define TUNE_20KC (mips_tune == PROCESSOR_20KC)
+#define TUNE_24K (mips_tune == PROCESSOR_24KC \
+ || mips_tune == PROCESSOR_24KF2_1 \
+ || mips_tune == PROCESSOR_24KF1_1)
+#define TUNE_74K (mips_tune == PROCESSOR_74KC \
+ || mips_tune == PROCESSOR_74KF2_1 \
+ || mips_tune == PROCESSOR_74KF1_1 \
+ || mips_tune == PROCESSOR_74KF3_2)
+#define TUNE_LOONGSON_2EF (mips_tune == PROCESSOR_LOONGSON_2E \
+ || mips_tune == PROCESSOR_LOONGSON_2F)
+#define TUNE_LOONGSON_3A (mips_tune == PROCESSOR_LOONGSON_3A)
+#define TUNE_MIPS3000 (mips_tune == PROCESSOR_R3000)
+#define TUNE_MIPS3900 (mips_tune == PROCESSOR_R3900)
+#define TUNE_MIPS4000 (mips_tune == PROCESSOR_R4000)
+#define TUNE_MIPS4120 (mips_tune == PROCESSOR_R4120)
+#define TUNE_MIPS4130 (mips_tune == PROCESSOR_R4130)
+#define TUNE_MIPS5000 (mips_tune == PROCESSOR_R5000)
+#define TUNE_MIPS5400 (mips_tune == PROCESSOR_R5400)
+#define TUNE_MIPS5500 (mips_tune == PROCESSOR_R5500)
+#define TUNE_MIPS6000 (mips_tune == PROCESSOR_R6000)
+#define TUNE_MIPS7000 (mips_tune == PROCESSOR_R7000)
+#define TUNE_MIPS9000 (mips_tune == PROCESSOR_R9000)
+#define TUNE_OCTEON (mips_tune == PROCESSOR_OCTEON \
+ || mips_tune == PROCESSOR_OCTEON2)
+#define TUNE_SB1 (mips_tune == PROCESSOR_SB1 \
+ || mips_tune == PROCESSOR_SB1A)
+
+/* Whether vector modes and intrinsics for ST Microelectronics
+ Loongson-2E/2F processors should be enabled. In o32 pairs of
+ floating-point registers provide 64-bit values. */
+#define TARGET_LOONGSON_VECTORS (TARGET_HARD_FLOAT_ABI \
+ && (TARGET_LOONGSON_2EF \
+ || TARGET_LOONGSON_3A))
+
+/* True if the pre-reload scheduler should try to create chains of
+ multiply-add or multiply-subtract instructions. For example,
+ suppose we have:
+
+ t1 = a * b
+ t2 = t1 + c * d
+ t3 = e * f
+ t4 = t3 - g * h
+
+ t1 will have a higher priority than t2 and t3 will have a higher
+ priority than t4. However, before reload, there is no dependence
+ between t1 and t3, and they can often have similar priorities.
+ The scheduler will then tend to prefer:
+
+ t1 = a * b
+ t3 = e * f
+ t2 = t1 + c * d
+ t4 = t3 - g * h
+
+ which stops us from making full use of macc/madd-style instructions.
+ This sort of situation occurs frequently in Fourier transforms and
+ in unrolled loops.
+
+ To counter this, the TUNE_MACC_CHAINS code will reorder the ready
+ queue so that chained multiply-add and multiply-subtract instructions
+ appear ahead of any other instruction that is likely to clobber lo.
+ In the example above, if t2 and t3 become ready at the same time,
+ the code ensures that t2 is scheduled first.
+
+ Multiply-accumulate instructions are a bigger win for some targets
+ than others, so this macro is defined on an opt-in basis. */
+#define TUNE_MACC_CHAINS (TUNE_MIPS5500 \
+ || TUNE_MIPS4120 \
+ || TUNE_MIPS4130 \
+ || TUNE_24K)
+
+#define TARGET_OLDABI (mips_abi == ABI_32 || mips_abi == ABI_O64)
+#define TARGET_NEWABI (mips_abi == ABI_N32 || mips_abi == ABI_64)
+
+/* TARGET_HARD_FLOAT and TARGET_SOFT_FLOAT reflect whether the FPU is
+ directly accessible, while the command-line options select
+ TARGET_HARD_FLOAT_ABI and TARGET_SOFT_FLOAT_ABI to reflect the ABI
+ in use. */
+#define TARGET_HARD_FLOAT (TARGET_HARD_FLOAT_ABI && !TARGET_MIPS16)
+#define TARGET_SOFT_FLOAT (TARGET_SOFT_FLOAT_ABI || TARGET_MIPS16)
+
+/* False if SC acts as a memory barrier with respect to itself,
+ otherwise a SYNC will be emitted after SC for atomic operations
+ that require ordering between the SC and following loads and
+ stores. It does not tell anything about ordering of loads and
+ stores prior to and following the SC, only about the SC itself and
+ those loads and stores follow it. */
+#define TARGET_SYNC_AFTER_SC (!TARGET_OCTEON && !TARGET_XLP)
+
+/* Define preprocessor macros for the -march and -mtune options.
+ PREFIX is either _MIPS_ARCH or _MIPS_TUNE, INFO is the selected
+ processor. If INFO's canonical name is "foo", define PREFIX to
+ be "foo", and define an additional macro PREFIX_FOO. */
+#define MIPS_CPP_SET_PROCESSOR(PREFIX, INFO) \
+ do \
+ { \
+ char *macro, *p; \
+ \
+ macro = concat ((PREFIX), "_", (INFO)->name, NULL); \
+ for (p = macro; *p != 0; p++) \
+ if (*p == '+') \
+ *p = 'P'; \
+ else \
+ *p = TOUPPER (*p); \
+ \
+ builtin_define (macro); \
+ builtin_define_with_value ((PREFIX), (INFO)->name, 1); \
+ free (macro); \
+ } \
+ while (0)
+
+/* Target CPU builtins. */
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_assert ("machine=mips"); \
+ builtin_assert ("cpu=mips"); \
+ builtin_define ("__mips__"); \
+ builtin_define ("_mips"); \
+ \
+ /* We do this here because __mips is defined below and so we \
+ can't use builtin_define_std. We don't ever want to define \
+ "mips" for VxWorks because some of the VxWorks headers \
+ construct include filenames from a root directory macro, \
+ an architecture macro and a filename, where the architecture \
+ macro expands to 'mips'. If we define 'mips' to 1, the \
+ architecture macro expands to 1 as well. */ \
+ if (!flag_iso && !TARGET_VXWORKS) \
+ builtin_define ("mips"); \
+ \
+ if (TARGET_64BIT) \
+ builtin_define ("__mips64"); \
+ \
+ /* Treat _R3000 and _R4000 like register-size \
+ defines, which is how they've historically \
+ been used. */ \
+ if (TARGET_64BIT) \
+ { \
+ builtin_define_std ("R4000"); \
+ builtin_define ("_R4000"); \
+ } \
+ else \
+ { \
+ builtin_define_std ("R3000"); \
+ builtin_define ("_R3000"); \
+ } \
+ \
+ if (TARGET_FLOAT64) \
+ builtin_define ("__mips_fpr=64"); \
+ else \
+ builtin_define ("__mips_fpr=32"); \
+ \
+ if (mips_base_compression_flags & MASK_MIPS16) \
+ builtin_define ("__mips16"); \
+ \
+ if (TARGET_MIPS3D) \
+ builtin_define ("__mips3d"); \
+ \
+ if (TARGET_SMARTMIPS) \
+ builtin_define ("__mips_smartmips"); \
+ \
+ if (mips_base_compression_flags & MASK_MICROMIPS) \
+ builtin_define ("__mips_micromips"); \
+ \
+ if (TARGET_MCU) \
+ builtin_define ("__mips_mcu"); \
+ \
+ if (TARGET_EVA) \
+ builtin_define ("__mips_eva"); \
+ \
+ if (TARGET_DSP) \
+ { \
+ builtin_define ("__mips_dsp"); \
+ if (TARGET_DSPR2) \
+ { \
+ builtin_define ("__mips_dspr2"); \
+ builtin_define ("__mips_dsp_rev=2"); \
+ } \
+ else \
+ builtin_define ("__mips_dsp_rev=1"); \
+ } \
+ \
+ MIPS_CPP_SET_PROCESSOR ("_MIPS_ARCH", mips_arch_info); \
+ MIPS_CPP_SET_PROCESSOR ("_MIPS_TUNE", mips_tune_info); \
+ \
+ if (ISA_MIPS1) \
+ { \
+ builtin_define ("__mips=1"); \
+ builtin_define ("_MIPS_ISA=_MIPS_ISA_MIPS1"); \
+ } \
+ else if (ISA_MIPS2) \
+ { \
+ builtin_define ("__mips=2"); \
+ builtin_define ("_MIPS_ISA=_MIPS_ISA_MIPS2"); \
+ } \
+ else if (ISA_MIPS3) \
+ { \
+ builtin_define ("__mips=3"); \
+ builtin_define ("_MIPS_ISA=_MIPS_ISA_MIPS3"); \
+ } \
+ else if (ISA_MIPS4) \
+ { \
+ builtin_define ("__mips=4"); \
+ builtin_define ("_MIPS_ISA=_MIPS_ISA_MIPS4"); \
+ } \
+ else if (ISA_MIPS32) \
+ { \
+ builtin_define ("__mips=32"); \
+ builtin_define ("__mips_isa_rev=1"); \
+ builtin_define ("_MIPS_ISA=_MIPS_ISA_MIPS32"); \
+ } \
+ else if (ISA_MIPS32R2) \
+ { \
+ builtin_define ("__mips=32"); \
+ builtin_define ("__mips_isa_rev=2"); \
+ builtin_define ("_MIPS_ISA=_MIPS_ISA_MIPS32"); \
+ } \
+ else if (ISA_MIPS64) \
+ { \
+ builtin_define ("__mips=64"); \
+ builtin_define ("__mips_isa_rev=1"); \
+ builtin_define ("_MIPS_ISA=_MIPS_ISA_MIPS64"); \
+ } \
+ else if (ISA_MIPS64R2) \
+ { \
+ builtin_define ("__mips=64"); \
+ builtin_define ("__mips_isa_rev=2"); \
+ builtin_define ("_MIPS_ISA=_MIPS_ISA_MIPS64"); \
+ } \
+ \
+ switch (mips_abi) \
+ { \
+ case ABI_32: \
+ builtin_define ("_ABIO32=1"); \
+ builtin_define ("_MIPS_SIM=_ABIO32"); \
+ break; \
+ \
+ case ABI_N32: \
+ builtin_define ("_ABIN32=2"); \
+ builtin_define ("_MIPS_SIM=_ABIN32"); \
+ break; \
+ \
+ case ABI_64: \
+ builtin_define ("_ABI64=3"); \
+ builtin_define ("_MIPS_SIM=_ABI64"); \
+ break; \
+ \
+ case ABI_O64: \
+ builtin_define ("_ABIO64=4"); \
+ builtin_define ("_MIPS_SIM=_ABIO64"); \
+ break; \
+ } \
+ \
+ builtin_define_with_int_value ("_MIPS_SZINT", INT_TYPE_SIZE); \
+ builtin_define_with_int_value ("_MIPS_SZLONG", LONG_TYPE_SIZE); \
+ builtin_define_with_int_value ("_MIPS_SZPTR", POINTER_SIZE); \
+ builtin_define_with_int_value ("_MIPS_FPSET", \
+ 32 / MAX_FPRS_PER_FMT); \
+ \
+ /* These defines reflect the ABI in use, not whether the \
+ FPU is directly accessible. */ \
+ if (TARGET_NO_FLOAT) \
+ builtin_define ("__mips_no_float"); \
+ else if (TARGET_HARD_FLOAT_ABI) \
+ builtin_define ("__mips_hard_float"); \
+ else \
+ builtin_define ("__mips_soft_float"); \
+ \
+ if (TARGET_SINGLE_FLOAT) \
+ builtin_define ("__mips_single_float"); \
+ \
+ if (TARGET_PAIRED_SINGLE_FLOAT) \
+ builtin_define ("__mips_paired_single_float"); \
+ \
+ if (mips_abs == MIPS_IEEE_754_2008) \
+ builtin_define ("__mips_abs2008"); \
+ \
+ if (mips_nan == MIPS_IEEE_754_2008) \
+ builtin_define ("__mips_nan2008"); \
+ \
+ if (TARGET_BIG_ENDIAN) \
+ { \
+ builtin_define_std ("MIPSEB"); \
+ builtin_define ("_MIPSEB"); \
+ } \
+ else \
+ { \
+ builtin_define_std ("MIPSEL"); \
+ builtin_define ("_MIPSEL"); \
+ } \
+ \
+ /* Whether calls should go through $25. The separate __PIC__ \
+ macro indicates whether abicalls code might use a GOT. */ \
+ if (TARGET_ABICALLS) \
+ builtin_define ("__mips_abicalls"); \
+ \
+ /* Whether Loongson vector modes are enabled. */ \
+ if (TARGET_LOONGSON_VECTORS) \
+ builtin_define ("__mips_loongson_vector_rev"); \
+ \
+ /* Historical Octeon macro. */ \
+ if (TARGET_OCTEON) \
+ builtin_define ("__OCTEON__"); \
+ \
+ if (TARGET_SYNCI) \
+ builtin_define ("__mips_synci"); \
+ \
+ /* Macros dependent on the C dialect. */ \
+ if (preprocessing_asm_p ()) \
+ { \
+ builtin_define_std ("LANGUAGE_ASSEMBLY"); \
+ builtin_define ("_LANGUAGE_ASSEMBLY"); \
+ } \
+ else if (c_dialect_cxx ()) \
+ { \
+ builtin_define ("_LANGUAGE_C_PLUS_PLUS"); \
+ builtin_define ("__LANGUAGE_C_PLUS_PLUS"); \
+ builtin_define ("__LANGUAGE_C_PLUS_PLUS__"); \
+ } \
+ else \
+ { \
+ builtin_define_std ("LANGUAGE_C"); \
+ builtin_define ("_LANGUAGE_C"); \
+ } \
+ if (c_dialect_objc ()) \
+ { \
+ builtin_define ("_LANGUAGE_OBJECTIVE_C"); \
+ builtin_define ("__LANGUAGE_OBJECTIVE_C"); \
+ /* Bizarre, but retained for backwards compatibility. */ \
+ builtin_define_std ("LANGUAGE_C"); \
+ builtin_define ("_LANGUAGE_C"); \
+ } \
+ \
+ if (mips_abi == ABI_EABI) \
+ builtin_define ("__mips_eabi"); \
+ \
+ if (TARGET_CACHE_BUILTIN) \
+ builtin_define ("__GCC_HAVE_BUILTIN_MIPS_CACHE"); \
+ } \
+ while (0)
+
+/* Default target_flags if no switches are specified */
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT 0
+#endif
+
+#ifndef TARGET_ENDIAN_DEFAULT
+#define TARGET_ENDIAN_DEFAULT MASK_BIG_ENDIAN
+#endif
+
+#ifdef IN_LIBGCC2
+#undef TARGET_64BIT
+/* Make this compile time constant for libgcc2 */
+#ifdef __mips64
+#define TARGET_64BIT 1
+#else
+#define TARGET_64BIT 0
+#endif
+#endif /* IN_LIBGCC2 */
+
+/* Force the call stack unwinders in unwind.inc not to be MIPS16 code
+ when compiled with hardware floating point. This is because MIPS16
+ code cannot save and restore the floating-point registers, which is
+ important if in a mixed MIPS16/non-MIPS16 environment. */
+
+#ifdef IN_LIBGCC2
+#if __mips_hard_float
+#define LIBGCC2_UNWIND_ATTRIBUTE __attribute__((__nomips16__))
+#endif
+#endif /* IN_LIBGCC2 */
+
+#define TARGET_LIBGCC_SDATA_SECTION ".sdata"
+
+#ifndef MULTILIB_ENDIAN_DEFAULT
+#if TARGET_ENDIAN_DEFAULT == 0
+#define MULTILIB_ENDIAN_DEFAULT "EL"
+#else
+#define MULTILIB_ENDIAN_DEFAULT "EB"
+#endif
+#endif
+
+#ifndef MULTILIB_ISA_DEFAULT
+#if MIPS_ISA_DEFAULT == 1
+#define MULTILIB_ISA_DEFAULT "mips1"
+#elif MIPS_ISA_DEFAULT == 2
+#define MULTILIB_ISA_DEFAULT "mips2"
+#elif MIPS_ISA_DEFAULT == 3
+#define MULTILIB_ISA_DEFAULT "mips3"
+#elif MIPS_ISA_DEFAULT == 4
+#define MULTILIB_ISA_DEFAULT "mips4"
+#elif MIPS_ISA_DEFAULT == 32
+#define MULTILIB_ISA_DEFAULT "mips32"
+#elif MIPS_ISA_DEFAULT == 33
+#define MULTILIB_ISA_DEFAULT "mips32r2"
+#elif MIPS_ISA_DEFAULT == 64
+#define MULTILIB_ISA_DEFAULT "mips64"
+#elif MIPS_ISA_DEFAULT == 65
+#define MULTILIB_ISA_DEFAULT "mips64r2"
+#else
+#define MULTILIB_ISA_DEFAULT "mips1"
+#endif
+#endif
+
+#ifndef MIPS_ABI_DEFAULT
+#define MIPS_ABI_DEFAULT ABI_32
+#endif
+
+/* Use the most portable ABI flag for the ASM specs. */
+
+#if MIPS_ABI_DEFAULT == ABI_32
+#define MULTILIB_ABI_DEFAULT "mabi=32"
+#elif MIPS_ABI_DEFAULT == ABI_O64
+#define MULTILIB_ABI_DEFAULT "mabi=o64"
+#elif MIPS_ABI_DEFAULT == ABI_N32
+#define MULTILIB_ABI_DEFAULT "mabi=n32"
+#elif MIPS_ABI_DEFAULT == ABI_64
+#define MULTILIB_ABI_DEFAULT "mabi=64"
+#elif MIPS_ABI_DEFAULT == ABI_EABI
+#define MULTILIB_ABI_DEFAULT "mabi=eabi"
+#endif
+
+#ifndef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS \
+ { MULTILIB_ENDIAN_DEFAULT, MULTILIB_ISA_DEFAULT, MULTILIB_ABI_DEFAULT }
+#endif
+
+/* We must pass -EL to the linker by default for little endian embedded
+ targets using linker scripts with a OUTPUT_FORMAT line. Otherwise, the
+ linker will default to using big-endian output files. The OUTPUT_FORMAT
+ line must be in the linker script, otherwise -EB/-EL will not work. */
+
+#ifndef ENDIAN_SPEC
+#if TARGET_ENDIAN_DEFAULT == 0
+#define ENDIAN_SPEC "%{!EB:%{!meb:-EL}} %{EB|meb:-EB}"
+#else
+#define ENDIAN_SPEC "%{!EL:%{!mel:-EB}} %{EL|mel:-EL}"
+#endif
+#endif
+
+/* A spec condition that matches all non-mips16 -mips arguments. */
+
+#define MIPS_ISA_LEVEL_OPTION_SPEC \
+ "mips1|mips2|mips3|mips4|mips32*|mips64*"
+
+/* A spec condition that matches all non-mips16 architecture arguments. */
+
+#define MIPS_ARCH_OPTION_SPEC \
+ MIPS_ISA_LEVEL_OPTION_SPEC "|march=*"
+
+/* A spec that infers a -mips argument from an -march argument,
+ or injects the default if no architecture is specified. */
+
+#define MIPS_ISA_LEVEL_SPEC \
+ "%{" MIPS_ISA_LEVEL_OPTION_SPEC ":;: \
+ %{march=mips1|march=r2000|march=r3000|march=r3900:-mips1} \
+ %{march=mips2|march=r6000:-mips2} \
+ %{march=mips3|march=r4*|march=vr4*|march=orion|march=loongson2*:-mips3} \
+ %{march=mips4|march=r8000|march=vr5*|march=rm7000|march=rm9000 \
+ |march=r10000|march=r12000|march=r14000|march=r16000:-mips4} \
+ %{march=mips32|march=4kc|march=4km|march=4kp|march=4ksc:-mips32} \
+ %{march=mips32r2|march=m4k|march=4ke*|march=4ksd|march=24k* \
+ |march=34k*|march=74k*|march=m14k*|march=1004k*: -mips32r2} \
+ %{march=mips64|march=5k*|march=20k*|march=sb1*|march=sr71000 \
+ |march=xlr: -mips64} \
+ %{march=mips64r2|march=loongson3a|march=octeon|march=xlp: -mips64r2} \
+ %{!march=*: -" MULTILIB_ISA_DEFAULT "}}"
+
+/* A spec that infers a -mhard-float or -msoft-float setting from an
+ -march argument. Note that soft-float and hard-float code are not
+ link-compatible. */
+
+#define MIPS_ARCH_FLOAT_SPEC \
+ "%{mhard-float|msoft-float|mno-float|march=mips*:; \
+ march=vr41*|march=m4k|march=4k*|march=24kc|march=24kec \
+ |march=34kc|march=34kn|march=74kc|march=1004kc|march=5kc \
+ |march=m14k*|march=octeon|march=xlr: -msoft-float; \
+ march=*: -mhard-float}"
+
+/* A spec condition that matches 32-bit options. It only works if
+ MIPS_ISA_LEVEL_SPEC has been applied. */
+
+#define MIPS_32BIT_OPTION_SPEC \
+ "mips1|mips2|mips32*|mgp32"
+
+/* Infer a -msynci setting from a -mips argument, on the assumption that
+ -msynci is desired where possible. */
+#define MIPS_ISA_SYNCI_SPEC \
+ "%{msynci|mno-synci:;:%{mips32r2|mips64r2:-msynci;:-mno-synci}}"
+
+#if (MIPS_ABI_DEFAULT == ABI_O64 \
+ || MIPS_ABI_DEFAULT == ABI_N32 \
+ || MIPS_ABI_DEFAULT == ABI_64)
+#define OPT_ARCH64 "mabi=32|mgp32:;"
+#define OPT_ARCH32 "mabi=32|mgp32"
+#else
+#define OPT_ARCH64 "mabi=o64|mabi=n32|mabi=64|mgp64"
+#define OPT_ARCH32 "mabi=o64|mabi=n32|mabi=64|mgp64:;"
+#endif
+
+/* Support for a compile-time default CPU, et cetera. The rules are:
+ --with-arch is ignored if -march is specified or a -mips is specified
+ (other than -mips16); likewise --with-arch-32 and --with-arch-64.
+ --with-tune is ignored if -mtune is specified; likewise
+ --with-tune-32 and --with-tune-64.
+ --with-abi is ignored if -mabi is specified.
+ --with-float is ignored if -mhard-float or -msoft-float are
+ specified.
+ --with-nan is ignored if -mnan is specified.
+ --with-divide is ignored if -mdivide-traps or -mdivide-breaks are
+ specified. */
+#define OPTION_DEFAULT_SPECS \
+ {"arch", "%{" MIPS_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}" }, \
+ {"arch_32", "%{" OPT_ARCH32 ":%{" MIPS_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}}" }, \
+ {"arch_64", "%{" OPT_ARCH64 ":%{" MIPS_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}}" }, \
+ {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
+ {"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:-mtune=%(VALUE)}}" }, \
+ {"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:-mtune=%(VALUE)}}" }, \
+ {"abi", "%{!mabi=*:-mabi=%(VALUE)}" }, \
+ {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \
+ {"fpu", "%{!msingle-float:%{!mdouble-float:-m%(VALUE)-float}}" }, \
+ {"nan", "%{!mnan=*:-mnan=%(VALUE)}" }, \
+ {"divide", "%{!mdivide-traps:%{!mdivide-breaks:-mdivide-%(VALUE)}}" }, \
+ {"llsc", "%{!mllsc:%{!mno-llsc:-m%(VALUE)}}" }, \
+ {"mips-plt", "%{!mplt:%{!mno-plt:-m%(VALUE)}}" }, \
+ {"synci", "%{!msynci:%{!mno-synci:-m%(VALUE)}}" }
+
+/* A spec that infers the -mdsp setting from an -march argument. */
+#define BASE_DRIVER_SELF_SPECS \
+ "%{!mno-dsp: \
+ %{march=24ke*|march=34kc*|march=34kf*|march=34kx*|march=1004k*: -mdsp} \
+ %{march=74k*|march=m14ke*: %{!mno-dspr2: -mdspr2 -mdsp}}}"
+
+#define DRIVER_SELF_SPECS BASE_DRIVER_SELF_SPECS
+
+#define GENERATE_DIVIDE_TRAPS (TARGET_DIVIDE_TRAPS \
+ && ISA_HAS_COND_TRAP)
+
+#define GENERATE_BRANCHLIKELY (TARGET_BRANCHLIKELY && !TARGET_MIPS16)
+
+/* True if the ABI can only work with 64-bit integer registers. We
+ generally allow ad-hoc variations for TARGET_SINGLE_FLOAT, but
+ otherwise floating-point registers must also be 64-bit. */
+#define ABI_NEEDS_64BIT_REGS (TARGET_NEWABI || mips_abi == ABI_O64)
+
+/* Likewise for 32-bit regs. */
+#define ABI_NEEDS_32BIT_REGS (mips_abi == ABI_32)
+
+/* True if the file format uses 64-bit symbols. At present, this is
+ only true for n64, which uses 64-bit ELF. */
+#define FILE_HAS_64BIT_SYMBOLS (mips_abi == ABI_64)
+
+/* True if symbols are 64 bits wide. This is usually determined by
+ the ABI's file format, but it can be overridden by -msym32. Note that
+ overriding the size with -msym32 changes the ABI of relocatable objects,
+ although it doesn't change the ABI of a fully-linked object. */
+#define ABI_HAS_64BIT_SYMBOLS (FILE_HAS_64BIT_SYMBOLS \
+ && Pmode == DImode \
+ && !TARGET_SYM32)
+
+/* ISA has instructions for managing 64-bit fp and gp regs (e.g. mips3). */
+#define ISA_HAS_64BIT_REGS (ISA_MIPS3 \
+ || ISA_MIPS4 \
+ || ISA_MIPS64 \
+ || ISA_MIPS64R2)
+
+/* ISA has branch likely instructions (e.g. mips2). */
+/* Disable branchlikely for tx39 until compare rewrite. They haven't
+ been generated up to this point. */
+#define ISA_HAS_BRANCHLIKELY (!ISA_MIPS1)
+
+/* ISA has a three-operand multiplication instruction (usually spelt "mul"). */
+#define ISA_HAS_MUL3 ((TARGET_MIPS3900 \
+ || TARGET_MIPS5400 \
+ || TARGET_MIPS5500 \
+ || TARGET_MIPS5900 \
+ || TARGET_MIPS7000 \
+ || TARGET_MIPS9000 \
+ || TARGET_MAD \
+ || ISA_MIPS32 \
+ || ISA_MIPS32R2 \
+ || ISA_MIPS64 \
+ || ISA_MIPS64R2) \
+ && !TARGET_MIPS16)
+
+/* ISA has a three-operand multiplication instruction. */
+#define ISA_HAS_DMUL3 (TARGET_64BIT \
+ && TARGET_OCTEON \
+ && !TARGET_MIPS16)
+
+/* ISA supports instructions DMULT and DMULTU. */
+#define ISA_HAS_DMULT (TARGET_64BIT && !TARGET_MIPS5900)
+
+/* ISA supports instructions MULT and MULTU.
+ This is always true, but the macro is needed for ISA_HAS_<D>MULT
+ in mips.md. */
+#define ISA_HAS_MULT (1)
+
+/* ISA supports instructions DDIV and DDIVU. */
+#define ISA_HAS_DDIV (TARGET_64BIT && !TARGET_MIPS5900)
+
+/* ISA supports instructions DIV and DIVU.
+ This is always true, but the macro is needed for ISA_HAS_<D>DIV
+ in mips.md. */
+#define ISA_HAS_DIV (1)
+
+#define ISA_HAS_DIV3 ((TARGET_LOONGSON_2EF \
+ || TARGET_LOONGSON_3A) \
+ && !TARGET_MIPS16)
+
+/* ISA has the floating-point conditional move instructions introduced
+ in mips4. */
+#define ISA_HAS_FP_CONDMOVE ((ISA_MIPS4 \
+ || ISA_MIPS32 \
+ || ISA_MIPS32R2 \
+ || ISA_MIPS64 \
+ || ISA_MIPS64R2) \
+ && !TARGET_MIPS5500 \
+ && !TARGET_MIPS16)
+
+/* ISA has the integer conditional move instructions introduced in mips4 and
+ ST Loongson 2E/2F. */
+#define ISA_HAS_CONDMOVE (ISA_HAS_FP_CONDMOVE \
+ || TARGET_MIPS5900 \
+ || TARGET_LOONGSON_2EF)
+
+/* ISA has LDC1 and SDC1. */
+#define ISA_HAS_LDC1_SDC1 (!ISA_MIPS1 \
+ && !TARGET_MIPS5900 \
+ && !TARGET_MIPS16)
+
+/* ISA has the mips4 FP condition code instructions: FP-compare to CC,
+ branch on CC, and move (both FP and non-FP) on CC. */
+#define ISA_HAS_8CC (ISA_MIPS4 \
+ || ISA_MIPS32 \
+ || ISA_MIPS32R2 \
+ || ISA_MIPS64 \
+ || ISA_MIPS64R2)
+
+/* This is a catch all for other mips4 instructions: indexed load, the
+ FP madd and msub instructions, and the FP recip and recip sqrt
+ instructions. Note that this macro should only be used by other
+ ISA_HAS_* macros. */
+#define ISA_HAS_FP4 ((ISA_MIPS4 \
+ || ISA_MIPS32R2 \
+ || ISA_MIPS64 \
+ || ISA_MIPS64R2) \
+ && !TARGET_MIPS16)
+
+/* ISA has floating-point indexed load and store instructions
+ (LWXC1, LDXC1, SWXC1 and SDXC1). */
+#define ISA_HAS_LXC1_SXC1 ISA_HAS_FP4
+
+/* ISA has paired-single instructions. */
+#define ISA_HAS_PAIRED_SINGLE (ISA_MIPS32R2 || ISA_MIPS64 || ISA_MIPS64R2)
+
+/* ISA has conditional trap instructions. */
+#define ISA_HAS_COND_TRAP (!ISA_MIPS1 \
+ && !TARGET_MIPS16)
+
+/* ISA has integer multiply-accumulate instructions, madd and msub. */
+#define ISA_HAS_MADD_MSUB (ISA_MIPS32 \
+ || ISA_MIPS32R2 \
+ || ISA_MIPS64 \
+ || ISA_MIPS64R2)
+
+/* Integer multiply-accumulate instructions should be generated. */
+#define GENERATE_MADD_MSUB (TARGET_IMADD && !TARGET_MIPS16)
+
+/* ISA has floating-point madd and msub instructions 'd = a * b [+-] c'. */
+#define ISA_HAS_FP_MADD4_MSUB4 ISA_HAS_FP4
+
+/* ISA has floating-point madd and msub instructions 'c = a * b [+-] c'. */
+#define ISA_HAS_FP_MADD3_MSUB3 TARGET_LOONGSON_2EF
+
+/* ISA has floating-point nmadd and nmsub instructions
+ 'd = -((a * b) [+-] c)'. */
+#define ISA_HAS_NMADD4_NMSUB4 ISA_HAS_FP4
+
+/* ISA has floating-point nmadd and nmsub instructions
+ 'c = -((a * b) [+-] c)'. */
+#define ISA_HAS_NMADD3_NMSUB3 TARGET_LOONGSON_2EF
+
+/* ISA has floating-point RECIP.fmt and RSQRT.fmt instructions. The
+ MIPS64 rev. 1 ISA says that RECIP.D and RSQRT.D are unpredictable when
+ doubles are stored in pairs of FPRs, so for safety's sake, we apply
+ this restriction to the MIPS IV ISA too. */
+#define ISA_HAS_FP_RECIP_RSQRT(MODE) \
+ (((ISA_HAS_FP4 \
+ && ((MODE) == SFmode \
+ || ((TARGET_FLOAT64 \
+ || ISA_MIPS32R2 \
+ || ISA_MIPS64R2) \
+ && (MODE) == DFmode))) \
+ || (TARGET_SB1 \
+ && (MODE) == V2SFmode)) \
+ && !TARGET_MIPS16)
+
+/* ISA has count leading zeroes/ones instruction (not implemented). */
+#define ISA_HAS_CLZ_CLO ((ISA_MIPS32 \
+ || ISA_MIPS32R2 \
+ || ISA_MIPS64 \
+ || ISA_MIPS64R2) \
+ && !TARGET_MIPS16)
+
+/* ISA has three operand multiply instructions that put
+ the high part in an accumulator: mulhi or mulhiu. */
+#define ISA_HAS_MULHI ((TARGET_MIPS5400 \
+ || TARGET_MIPS5500 \
+ || TARGET_SR71K) \
+ && !TARGET_MIPS16)
+
+/* ISA has three operand multiply instructions that negate the
+ result and put the result in an accumulator. */
+#define ISA_HAS_MULS ((TARGET_MIPS5400 \
+ || TARGET_MIPS5500 \
+ || TARGET_SR71K) \
+ && !TARGET_MIPS16)
+
+/* ISA has three operand multiply instructions that subtract the
+ result from a 4th operand and put the result in an accumulator. */
+#define ISA_HAS_MSAC ((TARGET_MIPS5400 \
+ || TARGET_MIPS5500 \
+ || TARGET_SR71K) \
+ && !TARGET_MIPS16)
+
+/* ISA has three operand multiply instructions that add the result
+ to a 4th operand and put the result in an accumulator. */
+#define ISA_HAS_MACC ((TARGET_MIPS4120 \
+ || TARGET_MIPS4130 \
+ || TARGET_MIPS5400 \
+ || TARGET_MIPS5500 \
+ || TARGET_SR71K) \
+ && !TARGET_MIPS16)
+
+/* ISA has NEC VR-style MACC, MACCHI, DMACC and DMACCHI instructions. */
+#define ISA_HAS_MACCHI ((TARGET_MIPS4120 \
+ || TARGET_MIPS4130) \
+ && !TARGET_MIPS16)
+
+/* ISA has the "ror" (rotate right) instructions. */
+#define ISA_HAS_ROR ((ISA_MIPS32R2 \
+ || ISA_MIPS64R2 \
+ || TARGET_MIPS5400 \
+ || TARGET_MIPS5500 \
+ || TARGET_SR71K \
+ || TARGET_SMARTMIPS) \
+ && !TARGET_MIPS16)
+
+/* ISA has the WSBH (word swap bytes within halfwords) instruction.
+ 64-bit targets also provide DSBH and DSHD. */
+#define ISA_HAS_WSBH ((ISA_MIPS32R2 || ISA_MIPS64R2) \
+ && !TARGET_MIPS16)
+
+/* ISA has data prefetch instructions. This controls use of 'pref'. */
+#define ISA_HAS_PREFETCH ((ISA_MIPS4 \
+ || TARGET_LOONGSON_2EF \
+ || TARGET_MIPS5900 \
+ || ISA_MIPS32 \
+ || ISA_MIPS32R2 \
+ || ISA_MIPS64 \
+ || ISA_MIPS64R2) \
+ && !TARGET_MIPS16)
+
+/* ISA has data indexed prefetch instructions. This controls use of
+ 'prefx', along with TARGET_HARD_FLOAT and TARGET_DOUBLE_FLOAT.
+ (prefx is a cop1x instruction, so can only be used if FP is
+ enabled.) */
+#define ISA_HAS_PREFETCHX ISA_HAS_FP4
+
+/* True if trunc.w.s and trunc.w.d are real (not synthetic)
+ instructions. Both require TARGET_HARD_FLOAT, and trunc.w.d
+ also requires TARGET_DOUBLE_FLOAT. */
+#define ISA_HAS_TRUNC_W (!ISA_MIPS1)
+
+/* ISA includes the MIPS32r2 seb and seh instructions. */
+#define ISA_HAS_SEB_SEH ((ISA_MIPS32R2 \
+ || ISA_MIPS64R2) \
+ && !TARGET_MIPS16)
+
+/* ISA includes the MIPS32/64 rev 2 ext and ins instructions. */
+#define ISA_HAS_EXT_INS ((ISA_MIPS32R2 \
+ || ISA_MIPS64R2) \
+ && !TARGET_MIPS16)
+
+/* ISA has instructions for accessing top part of 64-bit fp regs. */
+#define ISA_HAS_MXHC1 (TARGET_FLOAT64 \
+ && (ISA_MIPS32R2 \
+ || ISA_MIPS64R2))
+
+/* ISA has lwxs instruction (load w/scaled index address. */
+#define ISA_HAS_LWXS ((TARGET_SMARTMIPS || TARGET_MICROMIPS) \
+ && !TARGET_MIPS16)
+
+/* ISA has lbx, lbux, lhx, lhx, lhux, lwx, lwux, or ldx instruction. */
+#define ISA_HAS_LBX (TARGET_OCTEON2)
+#define ISA_HAS_LBUX (ISA_HAS_DSP || TARGET_OCTEON2)
+#define ISA_HAS_LHX (ISA_HAS_DSP || TARGET_OCTEON2)
+#define ISA_HAS_LHUX (TARGET_OCTEON2)
+#define ISA_HAS_LWX (ISA_HAS_DSP || TARGET_OCTEON2)
+#define ISA_HAS_LWUX (TARGET_OCTEON2 && TARGET_64BIT)
+#define ISA_HAS_LDX ((ISA_HAS_DSP || TARGET_OCTEON2) \
+ && TARGET_64BIT)
+
+/* The DSP ASE is available. */
+#define ISA_HAS_DSP (TARGET_DSP && !TARGET_MIPS16)
+
+/* Revision 2 of the DSP ASE is available. */
+#define ISA_HAS_DSPR2 (TARGET_DSPR2 && !TARGET_MIPS16)
+
+/* True if the result of a load is not available to the next instruction.
+ A nop will then be needed between instructions like "lw $4,..."
+ and "addiu $4,$4,1". */
+#define ISA_HAS_LOAD_DELAY (ISA_MIPS1 \
+ && !TARGET_MIPS3900 \
+ && !TARGET_MIPS5900 \
+ && !TARGET_MIPS16 \
+ && !TARGET_MICROMIPS)
+
+/* Likewise mtc1 and mfc1. */
+#define ISA_HAS_XFER_DELAY (mips_isa <= 3 \
+ && !TARGET_MIPS5900 \
+ && !TARGET_LOONGSON_2EF)
+
+/* Likewise floating-point comparisons. */
+#define ISA_HAS_FCMP_DELAY (mips_isa <= 3 \
+ && !TARGET_MIPS5900 \
+ && !TARGET_LOONGSON_2EF)
+
+/* True if mflo and mfhi can be immediately followed by instructions
+ which write to the HI and LO registers.
+
+ According to MIPS specifications, MIPS ISAs I, II, and III need
+ (at least) two instructions between the reads of HI/LO and
+ instructions which write them, and later ISAs do not. Contradicting
+ the MIPS specifications, some MIPS IV processor user manuals (e.g.
+ the UM for the NEC Vr5000) document needing the instructions between
+ HI/LO reads and writes, as well. Therefore, we declare only MIPS32,
+ MIPS64 and later ISAs to have the interlocks, plus any specific
+ earlier-ISA CPUs for which CPU documentation declares that the
+ instructions are really interlocked. */
+#define ISA_HAS_HILO_INTERLOCKS (ISA_MIPS32 \
+ || ISA_MIPS32R2 \
+ || ISA_MIPS64 \
+ || ISA_MIPS64R2 \
+ || TARGET_MIPS5500 \
+ || TARGET_MIPS5900 \
+ || TARGET_LOONGSON_2EF)
+
+/* ISA includes synci, jr.hb and jalr.hb. */
+#define ISA_HAS_SYNCI ((ISA_MIPS32R2 \
+ || ISA_MIPS64R2) \
+ && !TARGET_MIPS16)
+
+/* ISA includes sync. */
+#define ISA_HAS_SYNC ((mips_isa >= 2 || TARGET_MIPS3900) && !TARGET_MIPS16)
+#define GENERATE_SYNC \
+ (target_flags_explicit & MASK_LLSC \
+ ? TARGET_LLSC && !TARGET_MIPS16 \
+ : ISA_HAS_SYNC)
+
+/* ISA includes ll and sc. Note that this implies ISA_HAS_SYNC
+ because the expanders use both ISA_HAS_SYNC and ISA_HAS_LL_SC
+ instructions. */
+#define ISA_HAS_LL_SC (mips_isa >= 2 && !TARGET_MIPS5900 && !TARGET_MIPS16)
+#define GENERATE_LL_SC \
+ (target_flags_explicit & MASK_LLSC \
+ ? TARGET_LLSC && !TARGET_MIPS16 \
+ : ISA_HAS_LL_SC)
+
+#define ISA_HAS_SWAP (TARGET_XLP)
+#define ISA_HAS_LDADD (TARGET_XLP)
+
+/* ISA includes the baddu instruction. */
+#define ISA_HAS_BADDU (TARGET_OCTEON && !TARGET_MIPS16)
+
+/* ISA includes the bbit* instructions. */
+#define ISA_HAS_BBIT (TARGET_OCTEON && !TARGET_MIPS16)
+
+/* ISA includes the cins instruction. */
+#define ISA_HAS_CINS (TARGET_OCTEON && !TARGET_MIPS16)
+
+/* ISA includes the exts instruction. */
+#define ISA_HAS_EXTS (TARGET_OCTEON && !TARGET_MIPS16)
+
+/* ISA includes the seq and sne instructions. */
+#define ISA_HAS_SEQ_SNE (TARGET_OCTEON && !TARGET_MIPS16)
+
+/* ISA includes the pop instruction. */
+#define ISA_HAS_POP (TARGET_OCTEON && !TARGET_MIPS16)
+
+/* The CACHE instruction is available in non-MIPS16 code. */
+#define TARGET_CACHE_BUILTIN (mips_isa >= 3)
+
+/* The CACHE instruction is available. */
+#define ISA_HAS_CACHE (TARGET_CACHE_BUILTIN && !TARGET_MIPS16)
+
+/* Tell collect what flags to pass to nm. */
+#ifndef NM_FLAGS
+#define NM_FLAGS "-Bn"
+#endif
+
+
+/* SUBTARGET_ASM_DEBUGGING_SPEC handles passing debugging options to
+ the assembler. It may be overridden by subtargets.
+
+ Beginning with gas 2.13, -mdebug must be passed to correctly handle
+ COFF debugging info. */
+
+#ifndef SUBTARGET_ASM_DEBUGGING_SPEC
+#define SUBTARGET_ASM_DEBUGGING_SPEC "\
+%{g} %{g0} %{g1} %{g2} %{g3} \
+%{ggdb:-g} %{ggdb0:-g0} %{ggdb1:-g1} %{ggdb2:-g2} %{ggdb3:-g3} \
+%{gstabs:-g} %{gstabs0:-g0} %{gstabs1:-g1} %{gstabs2:-g2} %{gstabs3:-g3} \
+%{gstabs+:-g} %{gstabs+0:-g0} %{gstabs+1:-g1} %{gstabs+2:-g2} %{gstabs+3:-g3} \
+%{gcoff:-g} %{gcoff0:-g0} %{gcoff1:-g1} %{gcoff2:-g2} %{gcoff3:-g3} \
+%{gcoff*:-mdebug} %{!gcoff*:-no-mdebug}"
+#endif
+
+/* SUBTARGET_ASM_SPEC is always passed to the assembler. It may be
+ overridden by subtargets. */
+
+#ifndef SUBTARGET_ASM_SPEC
+#define SUBTARGET_ASM_SPEC ""
+#endif
+
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{G*} %(endian_spec) %{mips1} %{mips2} %{mips3} %{mips4} \
+%{mips32*} %{mips64*} \
+%{mips16} %{mno-mips16:-no-mips16} \
+%{mmicromips} %{mno-micromips} \
+%{mips3d} %{mno-mips3d:-no-mips3d} \
+%{mdmx} %{mno-mdmx:-no-mdmx} \
+%{mdsp} %{mno-dsp} \
+%{mdspr2} %{mno-dspr2} \
+%{mmcu} %{mno-mcu} \
+%{meva} %{mno-eva} \
+%{mvirt} %{mno-virt} \
+%{msmartmips} %{mno-smartmips} \
+%{mmt} %{mno-mt} \
+%{mfix-rm7000} %{mno-fix-rm7000} \
+%{mfix-vr4120} %{mfix-vr4130} \
+%{mfix-24k} \
+%{noasmopt:-O0; O0|fno-delayed-branch:-O1; O*:-O2; :-O1} \
+%(subtarget_asm_debugging_spec) \
+%{mabi=*} %{!mabi=*: %(asm_abi_default_spec)} \
+%{mgp32} %{mgp64} %{march=*} %{mxgot:-xgot} \
+%{mfp32} %{mfp64} %{mnan=*} \
+%{mshared} %{mno-shared} \
+%{msym32} %{mno-sym32} \
+%{mtune=*} \
+%(subtarget_asm_spec)"
+
+/* Extra switches sometimes passed to the linker. */
+
+#ifndef LINK_SPEC
+#define LINK_SPEC "\
+%(endian_spec) \
+%{G*} %{mips1} %{mips2} %{mips3} %{mips4} %{mips32*} %{mips64*} \
+%{shared}"
+#endif /* LINK_SPEC defined */
+
+
+/* Specs for the compiler proper */
+
+/* SUBTARGET_CC1_SPEC is passed to the compiler proper. It may be
+ overridden by subtargets. */
+#ifndef SUBTARGET_CC1_SPEC
+#define SUBTARGET_CC1_SPEC ""
+#endif
+
+/* CC1_SPEC is the set of arguments to pass to the compiler proper. */
+
+#undef CC1_SPEC
+#define CC1_SPEC "\
+%{G*} %{EB:-meb} %{EL:-mel} %{EB:%{EL:%emay not use both -EB and -EL}} \
+%(subtarget_cc1_spec)"
+
+/* Preprocessor specs. */
+
+/* SUBTARGET_CPP_SPEC is passed to the preprocessor. It may be
+ overridden by subtargets. */
+#ifndef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC ""
+#endif
+
+#define CPP_SPEC "%(subtarget_cpp_spec)"
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GCC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#define EXTRA_SPECS \
+ { "subtarget_cc1_spec", SUBTARGET_CC1_SPEC }, \
+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
+ { "subtarget_asm_debugging_spec", SUBTARGET_ASM_DEBUGGING_SPEC }, \
+ { "subtarget_asm_spec", SUBTARGET_ASM_SPEC }, \
+ { "asm_abi_default_spec", "-" MULTILIB_ABI_DEFAULT }, \
+ { "endian_spec", ENDIAN_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS
+#endif
+
+#define DBX_DEBUGGING_INFO 1 /* generate stabs (OSF/rose) */
+#define DWARF2_DEBUGGING_INFO 1 /* dwarf2 debugging info */
+
+#ifndef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+#endif
+
+/* The size of DWARF addresses should be the same as the size of symbols
+ in the target file format. They shouldn't depend on things like -msym32,
+ because many DWARF consumers do not allow the mixture of address sizes
+ that one would then get from linking -msym32 code with -msym64 code.
+
+ Note that the default POINTER_SIZE test is not appropriate for MIPS.
+ EABI64 has 64-bit pointers but uses 32-bit ELF. */
+#define DWARF2_ADDR_SIZE (FILE_HAS_64BIT_SYMBOLS ? 8 : 4)
+
+/* By default, turn on GDB extensions. */
+#define DEFAULT_GDB_EXTENSIONS 1
+
+/* Local compiler-generated symbols must have a prefix that the assembler
+ understands. By default, this is $, although some targets (e.g.,
+ NetBSD-ELF) need to override this. */
+
+#ifndef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "$"
+#endif
+
+/* By default on the mips, external symbols do not have an underscore
+ prepended, but some targets (e.g., NetBSD) require this. */
+
+#ifndef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+#endif
+
+/* On Sun 4, this limit is 2048. We use 1500 to be safe,
+ since the length can run past this up to a continuation point. */
+#undef DBX_CONTIN_LENGTH
+#define DBX_CONTIN_LENGTH 1500
+
+/* How to renumber registers for dbx and gdb. */
+#define DBX_REGISTER_NUMBER(REGNO) mips_dbx_regno[REGNO]
+
+/* The mapping from gcc register number to DWARF 2 CFA column number. */
+#define DWARF_FRAME_REGNUM(REGNO) mips_dwarf_regno[REGNO]
+
+/* The DWARF 2 CFA column which tracks the return address. */
+#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM
+
+/* Before the prologue, RA lives in r31. */
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (VOIDmode, RETURN_ADDR_REGNUM)
+
+/* Describe how we implement __builtin_eh_return. */
+#define EH_RETURN_DATA_REGNO(N) \
+ ((N) < (TARGET_MIPS16 ? 2 : 4) ? (N) + GP_ARG_FIRST : INVALID_REGNUM)
+
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, GP_REG_FIRST + 3)
+
+#define EH_USES(N) mips_eh_uses (N)
+
+/* Offsets recorded in opcodes are a multiple of this alignment factor.
+ The default for this in 64-bit mode is 8, which causes problems with
+ SFmode register saves. */
+#define DWARF_CIE_DATA_ALIGNMENT -4
+
+/* Correct the offset of automatic variables and arguments. Note that
+ the MIPS debug format wants all automatic variables and arguments
+ to be in terms of the virtual frame pointer (stack pointer before
+ any adjustment in the function), while the MIPS 3.0 linker wants
+ the frame pointer to be the stack pointer after the initial
+ adjustment. */
+
+#define DEBUGGER_AUTO_OFFSET(X) \
+ mips_debugger_offset (X, (HOST_WIDE_INT) 0)
+#define DEBUGGER_ARG_OFFSET(OFFSET, X) \
+ mips_debugger_offset (X, (HOST_WIDE_INT) OFFSET)
+
+/* Target machine storage layout */
+
+#define BITS_BIG_ENDIAN 0
+#define BYTES_BIG_ENDIAN (TARGET_BIG_ENDIAN != 0)
+#define WORDS_BIG_ENDIAN (TARGET_BIG_ENDIAN != 0)
+
+#define MAX_BITS_PER_WORD 64
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
+#ifndef IN_LIBGCC2
+#define MIN_UNITS_PER_WORD 4
+#endif
+
+/* For MIPS, width of a floating point register. */
+#define UNITS_PER_FPREG (TARGET_FLOAT64 ? 8 : 4)
+
+/* The number of consecutive floating-point registers needed to store the
+ largest format supported by the FPU. */
+#define MAX_FPRS_PER_FMT (TARGET_FLOAT64 || TARGET_SINGLE_FLOAT ? 1 : 2)
+
+/* The number of consecutive floating-point registers needed to store the
+ smallest format supported by the FPU. */
+#define MIN_FPRS_PER_FMT \
+ (ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64 || ISA_MIPS64R2 \
+ ? 1 : MAX_FPRS_PER_FMT)
+
+/* The largest size of value that can be held in floating-point
+ registers and moved with a single instruction. */
+#define UNITS_PER_HWFPVALUE \
+ (TARGET_SOFT_FLOAT_ABI ? 0 : MAX_FPRS_PER_FMT * UNITS_PER_FPREG)
+
+/* The largest size of value that can be held in floating-point
+ registers. */
+#define UNITS_PER_FPVALUE \
+ (TARGET_SOFT_FLOAT_ABI ? 0 \
+ : TARGET_SINGLE_FLOAT ? UNITS_PER_FPREG \
+ : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT)
+
+/* The number of bytes in a double. */
+#define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT)
+
+/* Set the sizes of the core types. */
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE (TARGET_LONG64 ? 64 : 32)
+#define LONG_LONG_TYPE_SIZE 64
+
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_DOUBLE_TYPE_SIZE (TARGET_NEWABI ? 128 : 64)
+
+/* Define the sizes of fixed-point types. */
+#define SHORT_FRACT_TYPE_SIZE 8
+#define FRACT_TYPE_SIZE 16
+#define LONG_FRACT_TYPE_SIZE 32
+#define LONG_LONG_FRACT_TYPE_SIZE 64
+
+#define SHORT_ACCUM_TYPE_SIZE 16
+#define ACCUM_TYPE_SIZE 32
+#define LONG_ACCUM_TYPE_SIZE 64
+/* FIXME. LONG_LONG_ACCUM_TYPE_SIZE should be 128 bits, but GCC
+ doesn't support 128-bit integers for MIPS32 currently. */
+#define LONG_LONG_ACCUM_TYPE_SIZE (TARGET_64BIT ? 128 : 64)
+
+/* long double is not a fixed mode, but the idea is that, if we
+ support long double, we also want a 128-bit integer type. */
+#define MAX_FIXED_MODE_SIZE LONG_DOUBLE_TYPE_SIZE
+
+#ifdef IN_LIBGCC2
+#if ((defined _ABIN32 && _MIPS_SIM == _ABIN32) \
+ || (defined _ABI64 && _MIPS_SIM == _ABI64))
+# define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 128
+# else
+# define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+# endif
+#endif
+
+/* Width in bits of a pointer. */
+#ifndef POINTER_SIZE
+#define POINTER_SIZE ((TARGET_LONG64 && TARGET_64BIT) ? 64 : 32)
+#endif
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY BITS_PER_WORD
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 32
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 32
+
+/* Every structure's size must be a multiple of this. */
+/* 8 is observed right on a DECstation and on riscos 4.02. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* There is no point aligning anything to a rounder boundary than this. */
+#define BIGGEST_ALIGNMENT LONG_DOUBLE_TYPE_SIZE
+
+/* All accesses must be aligned. */
+#define STRICT_ALIGNMENT 1
+
+/* Define this if you wish to imitate the way many other C compilers
+ handle alignment of bitfields and the structures that contain
+ them.
+
+ The behavior is that the type written for a bit-field (`int',
+ `short', or other integer type) imposes an alignment for the
+ entire structure, as if the structure really did contain an
+ ordinary field of that type. In addition, the bit-field is placed
+ within the structure so that it would fit within such a field,
+ not crossing a boundary for it.
+
+ Thus, on most machines, a bit-field whose type is written as `int'
+ would not cross a four-byte boundary, and would force four-byte
+ alignment for the whole structure. (The alignment used may not
+ be four bytes; it is controlled by the other alignment
+ parameters.)
+
+ If the macro is defined, its definition should be a C expression;
+ a nonzero value for the expression enables this behavior. */
+
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* If defined, a C expression to compute the alignment given to a
+ constant that is being placed in memory. CONSTANT is the constant
+ and ALIGN is the alignment that the object would ordinarily have.
+ The value of this macro is used instead of that alignment to align
+ the object.
+
+ If this macro is not defined, then ALIGN is used.
+
+ The typical use of this macro is to increase alignment for string
+ constants to be word aligned so that `strcpy' calls that copy
+ constants can be done inline. */
+
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST || TREE_CODE (EXP) == CONSTRUCTOR) \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* If defined, a C expression to compute the alignment for a static
+ variable. TYPE is the data type, and ALIGN is the alignment that
+ the object would ordinarily have. The value of this macro is used
+ instead of that alignment to align the object.
+
+ If this macro is not defined, then ALIGN is used.
+
+ One use of this macro is to increase alignment of medium-size
+ data to make it all fit in fewer cache lines. Another is to
+ cause character arrays to be word-aligned so that `strcpy' calls
+ that copy constants to character arrays can be done inline. */
+
+#undef DATA_ALIGNMENT
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ ((((ALIGN) < BITS_PER_WORD) \
+ && (TREE_CODE (TYPE) == ARRAY_TYPE \
+ || TREE_CODE (TYPE) == UNION_TYPE \
+ || TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
+
+/* We need this for the same reason as DATA_ALIGNMENT, namely to cause
+ character arrays to be word-aligned so that `strcpy' calls that copy
+ constants to character arrays can be done inline, and 'strcmp' can be
+ optimised to use word loads. */
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
+ DATA_ALIGNMENT (TYPE, ALIGN)
+
+#define PAD_VARARGS_DOWN \
+ (FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward)
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* When in 64-bit mode, move insns will sign extend SImode and CCmode
+ moves. All other references are zero extended. */
+#define LOAD_EXTEND_OP(MODE) \
+ (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \
+ ? SIGN_EXTEND : ZERO_EXTEND)
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+ { \
+ if ((MODE) == SImode) \
+ (UNSIGNEDP) = 0; \
+ (MODE) = Pmode; \
+ }
+
+/* Pmode is always the same as ptr_mode, but not always the same as word_mode.
+ Extensions of pointers to word_mode must be signed. */
+#define POINTERS_EXTEND_UNSIGNED false
+
+/* Define if loading short immediate values into registers sign extends. */
+#define SHORT_IMMEDIATES_SIGN_EXTEND
+
+/* The [d]clz instructions have the natural values at 0. */
+
+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+ ((VALUE) = GET_MODE_BITSIZE (MODE), 2)
+
+/* Standard register usage. */
+
+/* Number of hardware registers. We have:
+
+ - 32 integer registers
+ - 32 floating point registers
+ - 8 condition code registers
+ - 2 accumulator registers (hi and lo)
+ - 32 registers each for coprocessors 0, 2 and 3
+ - 4 fake registers:
+ - ARG_POINTER_REGNUM
+ - FRAME_POINTER_REGNUM
+ - GOT_VERSION_REGNUM (see the comment above load_call<mode> for details)
+ - CPRESTORE_SLOT_REGNUM
+ - 2 dummy entries that were used at various times in the past.
+ - 6 DSP accumulator registers (3 hi-lo pairs) for MIPS DSP ASE
+ - 6 DSP control registers */
+
+#define FIRST_PSEUDO_REGISTER 188
+
+/* By default, fix the kernel registers ($26 and $27), the global
+ pointer ($28) and the stack pointer ($29). This can change
+ depending on the command-line options.
+
+ Regarding coprocessor registers: without evidence to the contrary,
+ it's best to assume that each coprocessor register has a unique
+ use. This can be overridden, in, e.g., mips_option_override or
+ TARGET_CONDITIONAL_REGISTER_USAGE should the assumption be
+ inappropriate for a particular target. */
+
+#define FIXED_REGISTERS \
+{ \
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, \
+ /* COP0 registers */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ /* COP2 registers */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ /* COP3 registers */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ /* 6 DSP accumulator registers & 6 control registers */ \
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1 \
+}
+
+
+/* Set up this array for o32 by default.
+
+ Note that we don't mark $31 as a call-clobbered register. The idea is
+ that it's really the call instructions themselves which clobber $31.
+ We don't care what the called function does with it afterwards.
+
+ This approach makes it easier to implement sibcalls. Unlike normal
+ calls, sibcalls don't clobber $31, so the register reaches the
+ called function in tact. EPILOGUE_USES says that $31 is useful
+ to the called function. */
+
+#define CALL_USED_REGISTERS \
+{ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ /* COP0 registers */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ /* COP2 registers */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ /* COP3 registers */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ /* 6 DSP accumulator registers & 6 control registers */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 \
+}
+
+
+/* Define this since $28, though fixed, is call-saved in many ABIs. */
+
+#define CALL_REALLY_USED_REGISTERS \
+{ /* General registers. */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, \
+ /* Floating-point registers. */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ /* Others. */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, \
+ /* COP0 registers */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ /* COP2 registers */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ /* COP3 registers */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ /* 6 DSP accumulator registers & 6 control registers */ \
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0 \
+}
+
+/* Internal macros to classify a register number as to whether it's a
+ general purpose register, a floating point register, a
+ multiply/divide register, or a status register. */
+
+#define GP_REG_FIRST 0
+#define GP_REG_LAST 31
+#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1)
+#define GP_DBX_FIRST 0
+#define K0_REG_NUM (GP_REG_FIRST + 26)
+#define K1_REG_NUM (GP_REG_FIRST + 27)
+#define KERNEL_REG_P(REGNO) (IN_RANGE (REGNO, K0_REG_NUM, K1_REG_NUM))
+
+#define FP_REG_FIRST 32
+#define FP_REG_LAST 63
+#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1)
+#define FP_DBX_FIRST ((write_symbols == DBX_DEBUG) ? 38 : 32)
+
+#define MD_REG_FIRST 64
+#define MD_REG_LAST 65
+#define MD_REG_NUM (MD_REG_LAST - MD_REG_FIRST + 1)
+#define MD_DBX_FIRST (FP_DBX_FIRST + FP_REG_NUM)
+
+/* The DWARF 2 CFA column which tracks the return address from a
+ signal handler context. This means that to maintain backwards
+ compatibility, no hard register can be assigned this column if it
+ would need to be handled by the DWARF unwinder. */
+#define DWARF_ALT_FRAME_RETURN_COLUMN 66
+
+#define ST_REG_FIRST 67
+#define ST_REG_LAST 74
+#define ST_REG_NUM (ST_REG_LAST - ST_REG_FIRST + 1)
+
+
+/* FIXME: renumber. */
+#define COP0_REG_FIRST 80
+#define COP0_REG_LAST 111
+#define COP0_REG_NUM (COP0_REG_LAST - COP0_REG_FIRST + 1)
+
+#define COP0_STATUS_REG_NUM (COP0_REG_FIRST + 12)
+#define COP0_CAUSE_REG_NUM (COP0_REG_FIRST + 13)
+#define COP0_EPC_REG_NUM (COP0_REG_FIRST + 14)
+
+#define COP2_REG_FIRST 112
+#define COP2_REG_LAST 143
+#define COP2_REG_NUM (COP2_REG_LAST - COP2_REG_FIRST + 1)
+
+#define COP3_REG_FIRST 144
+#define COP3_REG_LAST 175
+#define COP3_REG_NUM (COP3_REG_LAST - COP3_REG_FIRST + 1)
+
+/* These definitions assume that COP0, 2 and 3 are numbered consecutively. */
+#define ALL_COP_REG_FIRST COP0_REG_FIRST
+#define ALL_COP_REG_LAST COP3_REG_LAST
+#define ALL_COP_REG_NUM (ALL_COP_REG_LAST - ALL_COP_REG_FIRST + 1)
+
+#define DSP_ACC_REG_FIRST 176
+#define DSP_ACC_REG_LAST 181
+#define DSP_ACC_REG_NUM (DSP_ACC_REG_LAST - DSP_ACC_REG_FIRST + 1)
+
+#define AT_REGNUM (GP_REG_FIRST + 1)
+#define HI_REGNUM (TARGET_BIG_ENDIAN ? MD_REG_FIRST : MD_REG_FIRST + 1)
+#define LO_REGNUM (TARGET_BIG_ENDIAN ? MD_REG_FIRST + 1 : MD_REG_FIRST)
+
+/* A few bitfield locations for the coprocessor registers. */
+/* Request Interrupt Priority Level is from bit 10 to bit 15 of
+ the cause register for the EIC interrupt mode. */
+#define CAUSE_IPL 10
+/* Interrupt Priority Level is from bit 10 to bit 15 of the status register. */
+#define SR_IPL 10
+/* Exception Level is at bit 1 of the status register. */
+#define SR_EXL 1
+/* Interrupt Enable is at bit 0 of the status register. */
+#define SR_IE 0
+
+/* FPSW_REGNUM is the single condition code used if !ISA_HAS_8CC.
+ If ISA_HAS_8CC, it should not be used, and an arbitrary ST_REG
+ should be used instead. */
+#define FPSW_REGNUM ST_REG_FIRST
+
+#define GP_REG_P(REGNO) \
+ ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM)
+#define M16_REG_P(REGNO) \
+ (((REGNO) >= 2 && (REGNO) <= 7) || (REGNO) == 16 || (REGNO) == 17)
+#define M16STORE_REG_P(REGNO) \
+ (((REGNO) >= 2 && (REGNO) <= 7) || (REGNO) == 0 || (REGNO) == 17)
+#define FP_REG_P(REGNO) \
+ ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM)
+#define MD_REG_P(REGNO) \
+ ((unsigned int) ((int) (REGNO) - MD_REG_FIRST) < MD_REG_NUM)
+#define ST_REG_P(REGNO) \
+ ((unsigned int) ((int) (REGNO) - ST_REG_FIRST) < ST_REG_NUM)
+#define COP0_REG_P(REGNO) \
+ ((unsigned int) ((int) (REGNO) - COP0_REG_FIRST) < COP0_REG_NUM)
+#define COP2_REG_P(REGNO) \
+ ((unsigned int) ((int) (REGNO) - COP2_REG_FIRST) < COP2_REG_NUM)
+#define COP3_REG_P(REGNO) \
+ ((unsigned int) ((int) (REGNO) - COP3_REG_FIRST) < COP3_REG_NUM)
+#define ALL_COP_REG_P(REGNO) \
+ ((unsigned int) ((int) (REGNO) - COP0_REG_FIRST) < ALL_COP_REG_NUM)
+/* Test if REGNO is one of the 6 new DSP accumulators. */
+#define DSP_ACC_REG_P(REGNO) \
+ ((unsigned int) ((int) (REGNO) - DSP_ACC_REG_FIRST) < DSP_ACC_REG_NUM)
+/* Test if REGNO is hi, lo, or one of the 6 new DSP accumulators. */
+#define ACC_REG_P(REGNO) \
+ (MD_REG_P (REGNO) || DSP_ACC_REG_P (REGNO))
+
+#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
+
+/* True if X is (const (unspec [(const_int 0)] UNSPEC_GP)). This is used
+ to initialize the mips16 gp pseudo register. */
+#define CONST_GP_P(X) \
+ (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == UNSPEC \
+ && XINT (XEXP (X, 0), 1) == UNSPEC_GP)
+
+/* Return coprocessor number from register number. */
+
+#define COPNUM_AS_CHAR_FROM_REGNUM(REGNO) \
+ (COP0_REG_P (REGNO) ? '0' : COP2_REG_P (REGNO) ? '2' \
+ : COP3_REG_P (REGNO) ? '3' : '?')
+
+
+#define HARD_REGNO_NREGS(REGNO, MODE) mips_hard_regno_nregs (REGNO, MODE)
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ mips_hard_regno_mode_ok[ (int)(MODE) ][ (REGNO) ]
+
+#define MODES_TIEABLE_P mips_modes_tieable_p
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM (GP_REG_FIRST + 29)
+
+/* These two registers don't really exist: they get eliminated to either
+ the stack or hard frame pointer. */
+#define ARG_POINTER_REGNUM 77
+#define FRAME_POINTER_REGNUM 78
+
+/* $30 is not available on the mips16, so we use $17 as the frame
+ pointer. */
+#define HARD_FRAME_POINTER_REGNUM \
+ (TARGET_MIPS16 ? GP_REG_FIRST + 17 : GP_REG_FIRST + 30)
+
+#define HARD_FRAME_POINTER_IS_FRAME_POINTER 0
+#define HARD_FRAME_POINTER_IS_ARG_POINTER 0
+
+/* Register in which static-chain is passed to a function. */
+#define STATIC_CHAIN_REGNUM (GP_REG_FIRST + 15)
+
+/* Registers used as temporaries in prologue/epilogue code:
+
+ - If a MIPS16 PIC function needs access to _gp, it first loads
+ the value into MIPS16_PIC_TEMP and then copies it to $gp.
+
+ - The prologue can use MIPS_PROLOGUE_TEMP as a general temporary
+ register. The register must not conflict with MIPS16_PIC_TEMP.
+
+ - If we aren't generating MIPS16 code, the prologue can also use
+ MIPS_PROLOGUE_TEMP2 as a general temporary register.
+
+ - The epilogue can use MIPS_EPILOGUE_TEMP as a general temporary
+ register.
+
+ If we're generating MIPS16 code, these registers must come from the
+ core set of 8. The prologue registers mustn't conflict with any
+ incoming arguments, the static chain pointer, or the frame pointer.
+ The epilogue temporary mustn't conflict with the return registers,
+ the PIC call register ($25), the frame pointer, the EH stack adjustment,
+ or the EH data registers.
+
+ If we're generating interrupt handlers, we use K0 as a temporary register
+ in prologue/epilogue code. */
+
+#define MIPS16_PIC_TEMP_REGNUM (GP_REG_FIRST + 2)
+#define MIPS_PROLOGUE_TEMP_REGNUM \
+ (cfun->machine->interrupt_handler_p ? K0_REG_NUM : GP_REG_FIRST + 3)
+#define MIPS_PROLOGUE_TEMP2_REGNUM \
+ (TARGET_MIPS16 \
+ ? (gcc_unreachable (), INVALID_REGNUM) \
+ : cfun->machine->interrupt_handler_p ? K1_REG_NUM : GP_REG_FIRST + 12)
+#define MIPS_EPILOGUE_TEMP_REGNUM \
+ (cfun->machine->interrupt_handler_p \
+ ? K0_REG_NUM \
+ : GP_REG_FIRST + (TARGET_MIPS16 ? 6 : 8))
+
+#define MIPS16_PIC_TEMP gen_rtx_REG (Pmode, MIPS16_PIC_TEMP_REGNUM)
+#define MIPS_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, MIPS_PROLOGUE_TEMP_REGNUM)
+#define MIPS_PROLOGUE_TEMP2(MODE) \
+ gen_rtx_REG (MODE, MIPS_PROLOGUE_TEMP2_REGNUM)
+#define MIPS_EPILOGUE_TEMP(MODE) gen_rtx_REG (MODE, MIPS_EPILOGUE_TEMP_REGNUM)
+
+/* Define this macro if it is as good or better to call a constant
+ function address than to call an address kept in a register. */
+#define NO_FUNCTION_CSE 1
+
+/* The ABI-defined global pointer. Sometimes we use a different
+ register in leaf functions: see PIC_OFFSET_TABLE_REGNUM. */
+#define GLOBAL_POINTER_REGNUM (GP_REG_FIRST + 28)
+
+/* We normally use $28 as the global pointer. However, when generating
+ n32/64 PIC, it is better for leaf functions to use a call-clobbered
+ register instead. They can then avoid saving and restoring $28
+ and perhaps avoid using a frame at all.
+
+ When a leaf function uses something other than $28, mips_expand_prologue
+ will modify pic_offset_table_rtx in place. Take the register number
+ from there after reload. */
+#define PIC_OFFSET_TABLE_REGNUM \
+ (reload_completed ? REGNO (pic_offset_table_rtx) : GLOBAL_POINTER_REGNUM)
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+enum reg_class
+{
+ NO_REGS, /* no registers in set */
+ M16_REGS, /* mips16 directly accessible registers */
+ T_REG, /* mips16 T register ($24) */
+ M16_T_REGS, /* mips16 registers plus T register */
+ PIC_FN_ADDR_REG, /* SVR4 PIC function address register */
+ V1_REG, /* Register $v1 ($3) used for TLS access. */
+ LEA_REGS, /* Every GPR except $25 */
+ GR_REGS, /* integer registers */
+ FP_REGS, /* floating point registers */
+ MD0_REG, /* first multiply/divide register */
+ MD1_REG, /* second multiply/divide register */
+ MD_REGS, /* multiply/divide registers (hi/lo) */
+ COP0_REGS, /* generic coprocessor classes */
+ COP2_REGS,
+ COP3_REGS,
+ ST_REGS, /* status registers (fp status) */
+ DSP_ACC_REGS, /* DSP accumulator registers */
+ ACC_REGS, /* Hi/Lo and DSP accumulator registers */
+ FRAME_REGS, /* $arg and $frame */
+ GR_AND_MD0_REGS, /* union classes */
+ GR_AND_MD1_REGS,
+ GR_AND_MD_REGS,
+ GR_AND_ACC_REGS,
+ ALL_REGS, /* all registers */
+ LIM_REG_CLASSES /* max value + 1 */
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define GENERAL_REGS GR_REGS
+
+/* An initializer containing the names of the register classes as C
+ string constants. These names are used in writing some of the
+ debugging dumps. */
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "M16_REGS", \
+ "T_REG", \
+ "M16_T_REGS", \
+ "PIC_FN_ADDR_REG", \
+ "V1_REG", \
+ "LEA_REGS", \
+ "GR_REGS", \
+ "FP_REGS", \
+ "MD0_REG", \
+ "MD1_REG", \
+ "MD_REGS", \
+ /* coprocessor registers */ \
+ "COP0_REGS", \
+ "COP2_REGS", \
+ "COP3_REGS", \
+ "ST_REGS", \
+ "DSP_ACC_REGS", \
+ "ACC_REGS", \
+ "FRAME_REGS", \
+ "GR_AND_MD0_REGS", \
+ "GR_AND_MD1_REGS", \
+ "GR_AND_MD_REGS", \
+ "GR_AND_ACC_REGS", \
+ "ALL_REGS" \
+}
+
+/* An initializer containing the contents of the register classes,
+ as integers which are bit masks. The Nth integer specifies the
+ contents of class N. The way the integer MASK is interpreted is
+ that register R is in the class if `MASK & (1 << R)' is 1.
+
+ When the machine has more than 32 registers, an integer does not
+ suffice. Then the integers are replaced by sub-initializers,
+ braced groupings containing several integers. Each
+ sub-initializer must be suitable as an initializer for the type
+ `HARD_REG_SET' which is defined in `hard-reg-set.h'. */
+
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
+ { 0x000300fc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* M16_REGS */ \
+ { 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* T_REG */ \
+ { 0x010300fc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* M16_T_REGS */ \
+ { 0x02000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* PIC_FN_ADDR_REG */ \
+ { 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* V1_REG */ \
+ { 0xfdffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* LEA_REGS */ \
+ { 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* GR_REGS */ \
+ { 0x00000000, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* FP_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000000 }, /* MD0_REG */ \
+ { 0x00000000, 0x00000000, 0x00000002, 0x00000000, 0x00000000, 0x00000000 }, /* MD1_REG */ \
+ { 0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000000 }, /* MD_REGS */ \
+ { 0x00000000, 0x00000000, 0xffff0000, 0x0000ffff, 0x00000000, 0x00000000 }, /* COP0_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0xffff0000, 0x0000ffff, 0x00000000 }, /* COP2_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffff0000, 0x0000ffff }, /* COP3_REGS */ \
+ { 0x00000000, 0x00000000, 0x000007f8, 0x00000000, 0x00000000, 0x00000000 }, /* ST_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x003f0000 }, /* DSP_ACC_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x003f0000 }, /* ACC_REGS */ \
+ { 0x00000000, 0x00000000, 0x00006000, 0x00000000, 0x00000000, 0x00000000 }, /* FRAME_REGS */ \
+ { 0xffffffff, 0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000000 }, /* GR_AND_MD0_REGS */ \
+ { 0xffffffff, 0x00000000, 0x00000002, 0x00000000, 0x00000000, 0x00000000 }, /* GR_AND_MD1_REGS */ \
+ { 0xffffffff, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000000 }, /* GR_AND_MD_REGS */ \
+ { 0xffffffff, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x003f0000 }, /* GR_AND_ACC_REGS */ \
+ { 0xffffffff, 0xffffffff, 0xffff67ff, 0xffffffff, 0xffffffff, 0x0fffffff } /* ALL_REGS */ \
+}
+
+
+/* A C expression whose value is a register class containing hard
+ register REGNO. In general there is more that one such class;
+ choose a class which is "minimal", meaning that no smaller class
+ also contains the register. */
+
+#define REGNO_REG_CLASS(REGNO) mips_regno_to_class[ (REGNO) ]
+
+/* A macro whose definition is the name of the class to which a
+ valid base register must belong. A base register is one used in
+ an address which is the register value plus a displacement. */
+
+#define BASE_REG_CLASS (TARGET_MIPS16 ? M16_REGS : GR_REGS)
+
+/* A macro whose definition is the name of the class to which a
+ valid index register must belong. An index register is one used
+ in an address where its value is either multiplied by a scale
+ factor or added to another register (as well as added to a
+ displacement). */
+
+#define INDEX_REG_CLASS NO_REGS
+
+/* We generally want to put call-clobbered registers ahead of
+ call-saved ones. (IRA expects this.) */
+
+#define REG_ALLOC_ORDER \
+{ /* Accumulator registers. When GPRs and accumulators have equal \
+ cost, we generally prefer to use accumulators. For example, \
+ a division of multiplication result is better allocated to LO, \
+ so that we put the MFLO at the point of use instead of at the \
+ point of definition. It's also needed if we're to take advantage \
+ of the extra accumulators available with -mdspr2. In some cases, \
+ it can also help to reduce register pressure. */ \
+ 64, 65,176,177,178,179,180,181, \
+ /* Call-clobbered GPRs. */ \
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \
+ 24, 25, 31, \
+ /* The global pointer. This is call-clobbered for o32 and o64 \
+ abicalls, call-saved for n32 and n64 abicalls, and a program \
+ invariant otherwise. Putting it between the call-clobbered \
+ and call-saved registers should cope with all eventualities. */ \
+ 28, \
+ /* Call-saved GPRs. */ \
+ 16, 17, 18, 19, 20, 21, 22, 23, 30, \
+ /* GPRs that can never be exposed to the register allocator. */ \
+ 0, 26, 27, 29, \
+ /* Call-clobbered FPRs. */ \
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \
+ 48, 49, 50, 51, \
+ /* FPRs that are usually call-saved. The odd ones are actually \
+ call-clobbered for n32, but listing them ahead of the even \
+ registers might encourage the register allocator to fragment \
+ the available FPR pairs. We need paired FPRs to store long \
+ doubles, so it isn't clear that using a different order \
+ for n32 would be a win. */ \
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, \
+ /* None of the remaining classes have defined call-saved \
+ registers. */ \
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, \
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, \
+ 96, 97, 98, 99, 100,101,102,103,104,105,106,107,108,109,110,111, \
+ 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, \
+ 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, \
+ 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, \
+ 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, \
+ 182,183,184,185,186,187 \
+}
+
+/* ADJUST_REG_ALLOC_ORDER is a macro which permits reg_alloc_order
+ to be rearranged based on a particular function. On the mips16, we
+ want to allocate $24 (T_REG) before other registers for
+ instructions for which it is possible. */
+
+#define ADJUST_REG_ALLOC_ORDER mips_order_regs_for_local_alloc ()
+
+/* True if VALUE is an unsigned 6-bit number. */
+
+#define UIMM6_OPERAND(VALUE) \
+ (((VALUE) & ~(unsigned HOST_WIDE_INT) 0x3f) == 0)
+
+/* True if VALUE is a signed 10-bit number. */
+
+#define IMM10_OPERAND(VALUE) \
+ ((unsigned HOST_WIDE_INT) (VALUE) + 0x200 < 0x400)
+
+/* True if VALUE is a signed 16-bit number. */
+
+#define SMALL_OPERAND(VALUE) \
+ ((unsigned HOST_WIDE_INT) (VALUE) + 0x8000 < 0x10000)
+
+/* True if VALUE is an unsigned 16-bit number. */
+
+#define SMALL_OPERAND_UNSIGNED(VALUE) \
+ (((VALUE) & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
+
+/* True if VALUE can be loaded into a register using LUI. */
+
+#define LUI_OPERAND(VALUE) \
+ (((VALUE) | 0x7fff0000) == 0x7fff0000 \
+ || ((VALUE) | 0x7fff0000) + 0x10000 == 0)
+
+/* Return a value X with the low 16 bits clear, and such that
+ VALUE - X is a signed 16-bit value. */
+
+#define CONST_HIGH_PART(VALUE) \
+ (((VALUE) + 0x8000) & ~(unsigned HOST_WIDE_INT) 0xffff)
+
+#define CONST_LOW_PART(VALUE) \
+ ((VALUE) - CONST_HIGH_PART (VALUE))
+
+#define SMALL_INT(X) SMALL_OPERAND (INTVAL (X))
+#define SMALL_INT_UNSIGNED(X) SMALL_OPERAND_UNSIGNED (INTVAL (X))
+#define LUI_INT(X) LUI_OPERAND (INTVAL (X))
+#define UMIPS_12BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -2048, 2047))
+
+/* The HI and LO registers can only be reloaded via the general
+ registers. Condition code registers can only be loaded to the
+ general registers, and from the floating point registers. */
+
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ mips_secondary_reload_class (CLASS, MODE, X, true)
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ mips_secondary_reload_class (CLASS, MODE, X, false)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+
+#define CLASS_MAX_NREGS(CLASS, MODE) mips_class_max_nregs (CLASS, MODE)
+
+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
+ mips_cannot_change_mode_class (FROM, TO, CLASS)
+
+/* Stack layout; function entry, exit and calling. */
+
+#define STACK_GROWS_DOWNWARD
+
+#define FRAME_GROWS_DOWNWARD flag_stack_protect
+
+/* Size of the area allocated in the frame to save the GP. */
+
+#define MIPS_GP_SAVE_AREA_SIZE \
+ (TARGET_CALL_CLOBBERED_GP ? MIPS_STACK_ALIGN (UNITS_PER_WORD) : 0)
+
+/* The offset of the first local variable from the frame pointer. See
+ mips_compute_frame_info for details about the frame layout. */
+
+#define STARTING_FRAME_OFFSET \
+ (FRAME_GROWS_DOWNWARD \
+ ? 0 \
+ : crtl->outgoing_args_size + MIPS_GP_SAVE_AREA_SIZE)
+
+#define RETURN_ADDR_RTX mips_return_addr
+
+/* Mask off the MIPS16 ISA bit in unwind addresses.
+
+ The reason for this is a little subtle. When unwinding a call,
+ we are given the call's return address, which on most targets
+ is the address of the following instruction. However, what we
+ actually want to find is the EH region for the call itself.
+ The target-independent unwind code therefore searches for "RA - 1".
+
+ In the MIPS16 case, RA is always an odd-valued (ISA-encoded) address.
+ RA - 1 is therefore the real (even-valued) start of the return
+ instruction. EH region labels are usually odd-valued MIPS16 symbols
+ too, so a search for an even address within a MIPS16 region would
+ usually work.
+
+ However, there is an exception. If the end of an EH region is also
+ the end of a function, the end label is allowed to be even. This is
+ necessary because a following non-MIPS16 function may also need EH
+ information for its first instruction.
+
+ Thus a MIPS16 region may be terminated by an ISA-encoded or a
+ non-ISA-encoded address. This probably isn't ideal, but it is
+ the traditional (legacy) behavior. It is therefore only safe
+ to search MIPS EH regions for an _odd-valued_ address.
+
+ Masking off the ISA bit means that the target-independent code
+ will search for "(RA & -2) - 1", which is guaranteed to be odd. */
+#define MASK_RETURN_ADDR GEN_INT (-2)
+
+
+/* Similarly, don't use the least-significant bit to tell pointers to
+ code from vtable index. */
+
+#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta
+
+/* The eliminations to $17 are only used for mips16 code. See the
+ definition of HARD_FRAME_POINTER_REGNUM. */
+
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, GP_REG_FIRST + 30}, \
+ { ARG_POINTER_REGNUM, GP_REG_FIRST + 17}, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, GP_REG_FIRST + 30}, \
+ { FRAME_POINTER_REGNUM, GP_REG_FIRST + 17}}
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ (OFFSET) = mips_initial_elimination_offset ((FROM), (TO))
+
+/* Allocate stack space for arguments at the beginning of each function. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* The argument pointer always points to the first argument. */
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* o32 and o64 reserve stack space for all argument registers. */
+#define REG_PARM_STACK_SPACE(FNDECL) \
+ (TARGET_OLDABI \
+ ? (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD) \
+ : 0)
+
+/* Define this if it is the responsibility of the caller to
+ allocate the area reserved for arguments passed in registers.
+ If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect
+ of this macro is to determine whether the space is included in
+ `crtl->outgoing_args_size'. */
+#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
+
+#define STACK_BOUNDARY (TARGET_NEWABI ? 128 : 64)
+
+/* Symbolic macros for the registers used to return integer and floating
+ point values. */
+
+#define GP_RETURN (GP_REG_FIRST + 2)
+#define FP_RETURN ((TARGET_SOFT_FLOAT) ? GP_RETURN : (FP_REG_FIRST + 0))
+
+#define MAX_ARGS_IN_REGISTERS (TARGET_OLDABI ? 4 : 8)
+
+/* Symbolic macros for the first/last argument registers. */
+
+#define GP_ARG_FIRST (GP_REG_FIRST + 4)
+#define GP_ARG_LAST (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
+#define FP_ARG_FIRST (FP_REG_FIRST + 12)
+#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
+
+/* 1 if N is a possible register number for function argument passing.
+ We have no FP argument registers when soft-float. When FP registers
+ are 32 bits, we can't directly reference the odd numbered ones. */
+
+#define FUNCTION_ARG_REGNO_P(N) \
+ ((IN_RANGE((N), GP_ARG_FIRST, GP_ARG_LAST) \
+ || (IN_RANGE((N), FP_ARG_FIRST, FP_ARG_LAST))) \
+ && !fixed_regs[N])
+
+/* This structure has to cope with two different argument allocation
+ schemes. Most MIPS ABIs view the arguments as a structure, of which
+ the first N words go in registers and the rest go on the stack. If I
+ < N, the Ith word might go in Ith integer argument register or in a
+ floating-point register. For these ABIs, we only need to remember
+ the offset of the current argument into the structure.
+
+ The EABI instead allocates the integer and floating-point arguments
+ separately. The first N words of FP arguments go in FP registers,
+ the rest go on the stack. Likewise, the first N words of the other
+ arguments go in integer registers, and the rest go on the stack. We
+ need to maintain three counts: the number of integer registers used,
+ the number of floating-point registers used, and the number of words
+ passed on the stack.
+
+ We could keep separate information for the two ABIs (a word count for
+ the standard ABIs, and three separate counts for the EABI). But it
+ seems simpler to view the standard ABIs as forms of EABI that do not
+ allocate floating-point registers.
+
+ So for the standard ABIs, the first N words are allocated to integer
+ registers, and mips_function_arg decides on an argument-by-argument
+ basis whether that argument should really go in an integer register,
+ or in a floating-point one. */
+
+typedef struct mips_args {
+ /* Always true for varargs functions. Otherwise true if at least
+ one argument has been passed in an integer register. */
+ int gp_reg_found;
+
+ /* The number of arguments seen so far. */
+ unsigned int arg_number;
+
+ /* The number of integer registers used so far. For all ABIs except
+ EABI, this is the number of words that have been added to the
+ argument structure, limited to MAX_ARGS_IN_REGISTERS. */
+ unsigned int num_gprs;
+
+ /* For EABI, the number of floating-point registers used so far. */
+ unsigned int num_fprs;
+
+ /* The number of words passed on the stack. */
+ unsigned int stack_words;
+
+ /* On the mips16, we need to keep track of which floating point
+ arguments were passed in general registers, but would have been
+ passed in the FP regs if this were a 32-bit function, so that we
+ can move them to the FP regs if we wind up calling a 32-bit
+ function. We record this information in fp_code, encoded in base
+ four. A zero digit means no floating point argument, a one digit
+ means an SFmode argument, and a two digit means a DFmode argument,
+ and a three digit is not used. The low order digit is the first
+ argument. Thus 6 == 1 * 4 + 2 means a DFmode argument followed by
+ an SFmode argument. ??? A more sophisticated approach will be
+ needed if MIPS_ABI != ABI_32. */
+ int fp_code;
+
+ /* True if the function has a prototype. */
+ int prototype;
+} CUMULATIVE_ARGS;
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ mips_init_cumulative_args (&CUM, FNTYPE)
+
+#define FUNCTION_ARG_PADDING(MODE, TYPE) \
+ (mips_pad_arg_upward (MODE, TYPE) ? upward : downward)
+
+#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
+ (mips_pad_reg_upward (MODE, TYPE) ? upward : downward)
+
+/* True if using EABI and varargs can be passed in floating-point
+ registers. Under these conditions, we need a more complex form
+ of va_list, which tracks GPR, FPR and stack arguments separately. */
+#define EABI_FLOAT_VARARGS_P \
+ (mips_abi == ABI_EABI && UNITS_PER_FPVALUE >= UNITS_PER_DOUBLE)
+
+
+#define EPILOGUE_USES(REGNO) mips_epilogue_uses (REGNO)
+
+/* Treat LOC as a byte offset from the stack pointer and round it up
+ to the next fully-aligned offset. */
+#define MIPS_STACK_ALIGN(LOC) \
+ (TARGET_NEWABI ? ((LOC) + 15) & -16 : ((LOC) + 7) & -8)
+
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) mips_function_profiler ((FILE))
+
+/* The profiler preserves all interesting registers, including $31. */
+#define MIPS_SAVE_REG_FOR_PROFILING_P(REGNO) false
+
+/* No mips port has ever used the profiler counter word, so don't emit it
+ or the label for it. */
+
+#define NO_PROFILE_COUNTERS 1
+
+/* Define this macro if the code for function profiling should come
+ before the function prologue. Normally, the profiling code comes
+ after. */
+
+/* #define PROFILE_BEFORE_PROLOGUE */
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+
+#define EXIT_IGNORE_STACK 1
+
+
+/* Trampolines are a block of code followed by two pointers. */
+
+#define TRAMPOLINE_SIZE \
+ (mips_trampoline_code_size () + GET_MODE_SIZE (ptr_mode) * 2)
+
+/* Forcing a 64-bit alignment for 32-bit targets allows us to load two
+ pointers from a single LUI base. */
+
+#define TRAMPOLINE_ALIGNMENT 64
+
+/* mips_trampoline_init calls this library function to flush
+ program and data caches. */
+
+#ifndef CACHE_FLUSH_FUNC
+#define CACHE_FLUSH_FUNC "_flush_cache"
+#endif
+
+#define MIPS_ICACHE_SYNC(ADDR, SIZE) \
+ /* Flush both caches. We need to flush the data cache in case \
+ the system has a write-back cache. */ \
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mips_cache_flush_func), \
+ LCT_NORMAL, VOIDmode, 3, ADDR, Pmode, SIZE, Pmode, \
+ GEN_INT (3), TYPE_MODE (integer_type_node))
+
+
+/* Addressing modes, and classification of registers for them. */
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) 0
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ mips_regno_mode_ok_for_base_p (REGNO, MODE, 1)
+
+/* Maximum number of registers that can appear in a valid memory address. */
+
+#define MAX_REGS_PER_ADDRESS 1
+
+/* Check for constness inline but use mips_legitimate_address_p
+ to check whether a constant really is an address. */
+
+#define CONSTANT_ADDRESS_P(X) \
+ (CONSTANT_P (X) && memory_address_p (SImode, X))
+
+/* This handles the magic '..CURRENT_FUNCTION' symbol, which means
+ 'the start of the function that this code is output in'. */
+
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
+ if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \
+ asm_fprintf ((FILE), "%U%s", \
+ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); \
+ else \
+ asm_fprintf ((FILE), "%U%s", (NAME))
+
+/* Flag to mark a function decl symbol that requires a long call. */
+#define SYMBOL_FLAG_LONG_CALL (SYMBOL_FLAG_MACH_DEP << 0)
+#define SYMBOL_REF_LONG_CALL_P(X) \
+ ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_LONG_CALL) != 0)
+
+/* This flag marks functions that cannot be lazily bound. */
+#define SYMBOL_FLAG_BIND_NOW (SYMBOL_FLAG_MACH_DEP << 1)
+#define SYMBOL_REF_BIND_NOW_P(RTX) \
+ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_BIND_NOW) != 0)
+
+/* True if we're generating a form of MIPS16 code in which jump tables
+ are stored in the text section and encoded as 16-bit PC-relative
+ offsets. This is only possible when general text loads are allowed,
+ since the table access itself will be an "lh" instruction. If the
+ PC-relative offsets grow too large, 32-bit offsets are used instead. */
+#define TARGET_MIPS16_SHORT_JUMP_TABLES TARGET_MIPS16_TEXT_LOADS
+
+#define JUMP_TABLES_IN_TEXT_SECTION TARGET_MIPS16_SHORT_JUMP_TABLES
+
+#define CASE_VECTOR_MODE (TARGET_MIPS16_SHORT_JUMP_TABLES ? SImode : ptr_mode)
+
+/* Only use short offsets if their range will not overflow. */
+#define CASE_VECTOR_SHORTEN_MODE(MIN, MAX, BODY) \
+ (!TARGET_MIPS16_SHORT_JUMP_TABLES ? ptr_mode \
+ : ((MIN) >= -32768 && (MAX) < 32768) ? HImode \
+ : SImode)
+
+#define CASE_VECTOR_PC_RELATIVE TARGET_MIPS16_SHORT_JUMP_TABLES
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#ifndef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 1
+#endif
+
+/* Although LDC1 and SDC1 provide 64-bit moves on 32-bit targets,
+ we generally don't want to use them for copying arbitrary data.
+ A single N-word move is usually the same cost as N single-word moves. */
+#define MOVE_MAX UNITS_PER_WORD
+#define MAX_MOVE_MAX 8
+
+/* Define this macro as a C expression which is nonzero if
+ accessing less than a word of memory (i.e. a `char' or a
+ `short') is no faster than accessing a word of memory, i.e., if
+ such access require more than one instruction or if there is no
+ difference in cost between byte and (aligned) word loads.
+
+ On RISC machines, it tends to generate better code to define
+ this as 1, since it avoids making a QI or HI mode register.
+
+ But, generating word accesses for -mips16 is generally bad as shifts
+ (often extended) would be needed for byte accesses. */
+#define SLOW_BYTE_ACCESS (!TARGET_MIPS16)
+
+/* Standard MIPS integer shifts truncate the shift amount to the
+ width of the shifted operand. However, Loongson vector shifts
+ do not truncate the shift amount at all. */
+#define SHIFT_COUNT_TRUNCATED (!TARGET_LOONGSON_VECTORS)
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) \
+ (TARGET_64BIT ? ((INPREC) <= 32 || (OUTPREC) > 32) : 1)
+
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+
+#ifndef Pmode
+#define Pmode (TARGET_64BIT && TARGET_LONG64 ? DImode : SImode)
+#endif
+
+/* Give call MEMs SImode since it is the "most permissive" mode
+ for both 32-bit and 64-bit targets. */
+
+#define FUNCTION_MODE SImode
+
+
+/* We allocate $fcc registers by hand and can't cope with moves of
+ CCmode registers to and from pseudos (or memory). */
+#define AVOID_CCMODE_COPIES
+
+/* A C expression for the cost of a branch instruction. A value of
+ 1 is the default; other values are interpreted relative to that. */
+
+#define BRANCH_COST(speed_p, predictable_p) mips_branch_cost
+#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
+
+/* The MIPS port has several functions that return an instruction count.
+ Multiplying the count by this value gives the number of bytes that
+ the instructions occupy. */
+#define BASE_INSN_LENGTH (TARGET_MIPS16 ? 2 : 4)
+
+/* The length of a NOP in bytes. */
+#define NOP_INSN_LENGTH (TARGET_COMPRESSION ? 2 : 4)
+
+/* If defined, modifies the length assigned to instruction INSN as a
+ function of the context in which it is used. LENGTH is an lvalue
+ that contains the initially computed length of the insn and should
+ be updated with the correct length of the insn. */
+#define ADJUST_INSN_LENGTH(INSN, LENGTH) \
+ ((LENGTH) = mips_adjust_insn_length ((INSN), (LENGTH)))
+
+/* Return the asm template for a non-MIPS16 conditional branch instruction.
+ OPCODE is the opcode's mnemonic and OPERANDS is the asm template for
+ its operands. */
+#define MIPS_BRANCH(OPCODE, OPERANDS) \
+ "%*" OPCODE "%?\t" OPERANDS "%/"
+
+/* Return an asm string that forces INSN to be treated as an absolute
+ J or JAL instruction instead of an assembler macro. */
+#define MIPS_ABSOLUTE_JUMP(INSN) \
+ (TARGET_ABICALLS_PIC2 \
+ ? ".option\tpic0\n\t" INSN "\n\t.option\tpic2" \
+ : INSN)
+
+/* Return the asm template for a call. INSN is the instruction's mnemonic
+ ("j" or "jal"), OPERANDS are its operands, TARGET_OPNO is the operand
+ number of the target. SIZE_OPNO is the operand number of the argument size
+ operand that can optionally hold the call attributes. If SIZE_OPNO is not
+ -1 and the call is indirect, use the function symbol from the call
+ attributes to attach a R_MIPS_JALR relocation to the call.
+
+ When generating GOT code without explicit relocation operators,
+ all calls should use assembly macros. Otherwise, all indirect
+ calls should use "jr" or "jalr"; we will arrange to restore $gp
+ afterwards if necessary. Finally, we can only generate direct
+ calls for -mabicalls by temporarily switching to non-PIC mode.
+
+ For microMIPS jal(r), we try to generate jal(r)s when a 16-bit
+ instruction is in the delay slot of jal(r). */
+#define MIPS_CALL(INSN, OPERANDS, TARGET_OPNO, SIZE_OPNO) \
+ (TARGET_USE_GOT && !TARGET_EXPLICIT_RELOCS \
+ ? "%*" INSN "\t%" #TARGET_OPNO "%/" \
+ : REG_P (OPERANDS[TARGET_OPNO]) \
+ ? (mips_get_pic_call_symbol (OPERANDS, SIZE_OPNO) \
+ ? ("%*.reloc\t1f,R_MIPS_JALR,%" #SIZE_OPNO "\n" \
+ "1:\t" INSN "r\t%" #TARGET_OPNO "%/") \
+ : TARGET_MICROMIPS && !TARGET_INTERLINK_COMPRESSED \
+ ? "%*" INSN "r%!\t%" #TARGET_OPNO "%/" \
+ : "%*" INSN "r\t%" #TARGET_OPNO "%/") \
+ : TARGET_MICROMIPS && !TARGET_INTERLINK_COMPRESSED \
+ ? MIPS_ABSOLUTE_JUMP ("%*" INSN "%!\t%" #TARGET_OPNO "%/") \
+ : MIPS_ABSOLUTE_JUMP ("%*" INSN "\t%" #TARGET_OPNO "%/")) \
+
+/* Similar to MIPS_CALL, but this is for MICROMIPS "j" to generate
+ "jrc" when nop is in the delay slot of "jr". */
+
+#define MICROMIPS_J(INSN, OPERANDS, OPNO) \
+ (TARGET_USE_GOT && !TARGET_EXPLICIT_RELOCS \
+ ? "%*j\t%" #OPNO "%/" \
+ : REG_P (OPERANDS[OPNO]) \
+ ? "%*jr%:\t%" #OPNO \
+ : MIPS_ABSOLUTE_JUMP ("%*" INSN "\t%" #OPNO "%/"))
+
+
+/* Control the assembler format that we output. */
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#ifndef ASM_APP_ON
+#define ASM_APP_ON " #APP\n"
+#endif
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#ifndef ASM_APP_OFF
+#define ASM_APP_OFF " #NO_APP\n"
+#endif
+
+#define REGISTER_NAMES \
+{ "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7", \
+ "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", \
+ "$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23", \
+ "$24", "$25", "$26", "$27", "$28", "$sp", "$fp", "$31", \
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", \
+ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", \
+ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23", \
+ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31", \
+ "hi", "lo", "", "$fcc0","$fcc1","$fcc2","$fcc3","$fcc4", \
+ "$fcc5","$fcc6","$fcc7","", "$cprestore", "$arg", "$frame", "$fakec", \
+ "$c0r0", "$c0r1", "$c0r2", "$c0r3", "$c0r4", "$c0r5", "$c0r6", "$c0r7", \
+ "$c0r8", "$c0r9", "$c0r10","$c0r11","$c0r12","$c0r13","$c0r14","$c0r15", \
+ "$c0r16","$c0r17","$c0r18","$c0r19","$c0r20","$c0r21","$c0r22","$c0r23", \
+ "$c0r24","$c0r25","$c0r26","$c0r27","$c0r28","$c0r29","$c0r30","$c0r31", \
+ "$c2r0", "$c2r1", "$c2r2", "$c2r3", "$c2r4", "$c2r5", "$c2r6", "$c2r7", \
+ "$c2r8", "$c2r9", "$c2r10","$c2r11","$c2r12","$c2r13","$c2r14","$c2r15", \
+ "$c2r16","$c2r17","$c2r18","$c2r19","$c2r20","$c2r21","$c2r22","$c2r23", \
+ "$c2r24","$c2r25","$c2r26","$c2r27","$c2r28","$c2r29","$c2r30","$c2r31", \
+ "$c3r0", "$c3r1", "$c3r2", "$c3r3", "$c3r4", "$c3r5", "$c3r6", "$c3r7", \
+ "$c3r8", "$c3r9", "$c3r10","$c3r11","$c3r12","$c3r13","$c3r14","$c3r15", \
+ "$c3r16","$c3r17","$c3r18","$c3r19","$c3r20","$c3r21","$c3r22","$c3r23", \
+ "$c3r24","$c3r25","$c3r26","$c3r27","$c3r28","$c3r29","$c3r30","$c3r31", \
+ "$ac1hi","$ac1lo","$ac2hi","$ac2lo","$ac3hi","$ac3lo","$dsp_po","$dsp_sc", \
+ "$dsp_ca","$dsp_ou","$dsp_cc","$dsp_ef" }
+
+/* List the "software" names for each register. Also list the numerical
+ names for $fp and $sp. */
+
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ { "$29", 29 + GP_REG_FIRST }, \
+ { "$30", 30 + GP_REG_FIRST }, \
+ { "at", 1 + GP_REG_FIRST }, \
+ { "v0", 2 + GP_REG_FIRST }, \
+ { "v1", 3 + GP_REG_FIRST }, \
+ { "a0", 4 + GP_REG_FIRST }, \
+ { "a1", 5 + GP_REG_FIRST }, \
+ { "a2", 6 + GP_REG_FIRST }, \
+ { "a3", 7 + GP_REG_FIRST }, \
+ { "t0", 8 + GP_REG_FIRST }, \
+ { "t1", 9 + GP_REG_FIRST }, \
+ { "t2", 10 + GP_REG_FIRST }, \
+ { "t3", 11 + GP_REG_FIRST }, \
+ { "t4", 12 + GP_REG_FIRST }, \
+ { "t5", 13 + GP_REG_FIRST }, \
+ { "t6", 14 + GP_REG_FIRST }, \
+ { "t7", 15 + GP_REG_FIRST }, \
+ { "s0", 16 + GP_REG_FIRST }, \
+ { "s1", 17 + GP_REG_FIRST }, \
+ { "s2", 18 + GP_REG_FIRST }, \
+ { "s3", 19 + GP_REG_FIRST }, \
+ { "s4", 20 + GP_REG_FIRST }, \
+ { "s5", 21 + GP_REG_FIRST }, \
+ { "s6", 22 + GP_REG_FIRST }, \
+ { "s7", 23 + GP_REG_FIRST }, \
+ { "t8", 24 + GP_REG_FIRST }, \
+ { "t9", 25 + GP_REG_FIRST }, \
+ { "k0", 26 + GP_REG_FIRST }, \
+ { "k1", 27 + GP_REG_FIRST }, \
+ { "gp", 28 + GP_REG_FIRST }, \
+ { "sp", 29 + GP_REG_FIRST }, \
+ { "fp", 30 + GP_REG_FIRST }, \
+ { "ra", 31 + GP_REG_FIRST } \
+}
+
+#define DBR_OUTPUT_SEQEND(STREAM) \
+do \
+ { \
+ /* Undo the effect of '%*'. */ \
+ mips_pop_asm_switch (&mips_nomacro); \
+ mips_pop_asm_switch (&mips_noreorder); \
+ /* Emit a blank line after the delay slot for emphasis. */ \
+ fputs ("\n", STREAM); \
+ } \
+while (0)
+
+/* The MIPS implementation uses some labels for its own purpose. The
+ following lists what labels are created, and are all formed by the
+ pattern $L[a-z].*. The machine independent portion of GCC creates
+ labels matching: $L[A-Z][0-9]+ and $L[0-9]+.
+
+ LM[0-9]+ Silicon Graphics/ECOFF stabs label before each stmt.
+ $Lb[0-9]+ Begin blocks for MIPS debug support
+ $Lc[0-9]+ Label for use in s<xx> operation.
+ $Le[0-9]+ End blocks for MIPS debug support */
+
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME(STREAM, NAME, DECL) \
+ mips_declare_object (STREAM, NAME, "", ":\n")
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP "\t.globl\t"
+
+/* This says how to define a global common symbol. */
+
+#define ASM_OUTPUT_ALIGNED_DECL_COMMON mips_output_aligned_decl_common
+
+/* This says how to define a local common symbol (i.e., not visible to
+ linker). */
+
+#ifndef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(STREAM, NAME, SIZE, ALIGN) \
+ mips_declare_common_object (STREAM, NAME, "\n\t.lcomm\t", SIZE, ALIGN, false)
+#endif
+
+/* This says how to output an external. It would be possible not to
+ output anything and let undefined symbol become external. However
+ the assembler uses length information on externals to allocate in
+ data/sdata bss/sbss, thereby saving exec time. */
+
+#undef ASM_OUTPUT_EXTERNAL
+#define ASM_OUTPUT_EXTERNAL(STREAM,DECL,NAME) \
+ mips_output_external(STREAM,DECL,NAME)
+
+/* This is how to declare a function name. The actual work of
+ emitting the label is moved to function_prologue, so that we can
+ get the line number correctly emitted before the .ent directive,
+ and after any .file directives. Define as empty so that the function
+ is not declared before the .ent directive elsewhere. */
+
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(STREAM,NAME,DECL)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM))
+
+/* Print debug labels as "foo = ." rather than "foo:" because they should
+ represent a byte pointer rather than an ISA-encoded address. This is
+ particularly important for code like:
+
+ $LFBxxx = .
+ .cfi_startproc
+ ...
+ .section .gcc_except_table,...
+ ...
+ .uleb128 foo-$LFBxxx
+
+ The .uleb128 requies $LFBxxx to match the FDE start address, which is
+ likewise a byte pointer rather than an ISA-encoded address.
+
+ At the time of writing, this hook is not used for the function end
+ label:
+
+ $LFExxx:
+ .end foo
+
+ But this doesn't matter, because GAS doesn't treat a pre-.end label
+ as a MIPS16 one anyway. */
+
+#define ASM_OUTPUT_DEBUG_LABEL(FILE, PREFIX, NUM) \
+ fprintf (FILE, "%s%s%d = .\n", LOCAL_LABEL_PREFIX, PREFIX, NUM)
+
+/* This is how to output an element of a case-vector that is absolute. */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
+ fprintf (STREAM, "\t%s\t%sL%d\n", \
+ ptr_mode == DImode ? ".dword" : ".word", \
+ LOCAL_LABEL_PREFIX, \
+ VALUE)
+
+/* This is how to output an element of a case-vector. We can make the
+ entries PC-relative in MIPS16 code and GP-relative when .gp(d)word
+ is supported. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
+do { \
+ if (TARGET_MIPS16_SHORT_JUMP_TABLES) \
+ { \
+ if (GET_MODE (BODY) == HImode) \
+ fprintf (STREAM, "\t.half\t%sL%d-%sL%d\n", \
+ LOCAL_LABEL_PREFIX, VALUE, LOCAL_LABEL_PREFIX, REL); \
+ else \
+ fprintf (STREAM, "\t.word\t%sL%d-%sL%d\n", \
+ LOCAL_LABEL_PREFIX, VALUE, LOCAL_LABEL_PREFIX, REL); \
+ } \
+ else if (TARGET_GPWORD) \
+ fprintf (STREAM, "\t%s\t%sL%d\n", \
+ ptr_mode == DImode ? ".gpdword" : ".gpword", \
+ LOCAL_LABEL_PREFIX, VALUE); \
+ else if (TARGET_RTP_PIC) \
+ { \
+ /* Make the entry relative to the start of the function. */ \
+ rtx fnsym = XEXP (DECL_RTL (current_function_decl), 0); \
+ fprintf (STREAM, "\t%s\t%sL%d-", \
+ Pmode == DImode ? ".dword" : ".word", \
+ LOCAL_LABEL_PREFIX, VALUE); \
+ assemble_name (STREAM, XSTR (fnsym, 0)); \
+ fprintf (STREAM, "\n"); \
+ } \
+ else \
+ fprintf (STREAM, "\t%s\t%sL%d\n", \
+ ptr_mode == DImode ? ".dword" : ".word", \
+ LOCAL_LABEL_PREFIX, VALUE); \
+} while (0)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
+ fprintf (STREAM, "\t.align\t%d\n", (LOG))
+
+/* This is how to output an assembler line to advance the location
+ counter by SIZE bytes. */
+
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(STREAM,SIZE) \
+ fprintf (STREAM, "\t.space\t"HOST_WIDE_INT_PRINT_UNSIGNED"\n", (SIZE))
+
+/* This is how to output a string. */
+#undef ASM_OUTPUT_ASCII
+#define ASM_OUTPUT_ASCII mips_output_ascii
+
+
+/* Default to -G 8 */
+#ifndef MIPS_DEFAULT_GVALUE
+#define MIPS_DEFAULT_GVALUE 8
+#endif
+
+/* Define the strings to put out for each section in the object file. */
+#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */
+#define DATA_SECTION_ASM_OP "\t.data" /* large data */
+
+#undef READONLY_DATA_SECTION_ASM_OP
+#define READONLY_DATA_SECTION_ASM_OP "\t.rdata" /* read-only data */
+
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+do \
+ { \
+ fprintf (STREAM, "\t%s\t%s,%s,-8\n\t%s\t%s,0(%s)\n", \
+ TARGET_64BIT ? "daddiu" : "addiu", \
+ reg_names[STACK_POINTER_REGNUM], \
+ reg_names[STACK_POINTER_REGNUM], \
+ TARGET_64BIT ? "sd" : "sw", \
+ reg_names[REGNO], \
+ reg_names[STACK_POINTER_REGNUM]); \
+ } \
+while (0)
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+do \
+ { \
+ mips_push_asm_switch (&mips_noreorder); \
+ fprintf (STREAM, "\t%s\t%s,0(%s)\n\t%s\t%s,%s,8\n", \
+ TARGET_64BIT ? "ld" : "lw", \
+ reg_names[REGNO], \
+ reg_names[STACK_POINTER_REGNUM], \
+ TARGET_64BIT ? "daddu" : "addu", \
+ reg_names[STACK_POINTER_REGNUM], \
+ reg_names[STACK_POINTER_REGNUM]); \
+ mips_pop_asm_switch (&mips_noreorder); \
+ } \
+while (0)
+
+/* How to start an assembler comment.
+ The leading space is important (the mips native assembler requires it). */
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START " #"
+#endif
+
+#undef SIZE_TYPE
+#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int")
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int")
+
+/* The maximum number of bytes that can be copied by one iteration of
+ a movmemsi loop; see mips_block_move_loop. */
+#define MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER \
+ (UNITS_PER_WORD * 4)
+
+/* The maximum number of bytes that can be copied by a straight-line
+ implementation of movmemsi; see mips_block_move_straight. We want
+ to make sure that any loop-based implementation will iterate at
+ least twice. */
+#define MIPS_MAX_MOVE_BYTES_STRAIGHT \
+ (MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER * 2)
+
+/* The base cost of a memcpy call, for MOVE_RATIO and friends. These
+ values were determined experimentally by benchmarking with CSiBE.
+ In theory, the call overhead is higher for TARGET_ABICALLS (especially
+ for o32 where we have to restore $gp afterwards as well as make an
+ indirect call), but in practice, bumping this up higher for
+ TARGET_ABICALLS doesn't make much difference to code size. */
+
+#define MIPS_CALL_RATIO 8
+
+/* Any loop-based implementation of movmemsi will have at least
+ MIPS_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory
+ moves, so allow individual copies of fewer elements.
+
+ When movmemsi is not available, use a value approximating
+ the length of a memcpy call sequence, so that move_by_pieces
+ will generate inline code if it is shorter than a function call.
+ Since move_by_pieces_ninsns counts memory-to-memory moves, but
+ we'll have to generate a load/store pair for each, halve the
+ value of MIPS_CALL_RATIO to take that into account. */
+
+#define MOVE_RATIO(speed) \
+ (HAVE_movmemsi \
+ ? MIPS_MAX_MOVE_BYTES_STRAIGHT / MOVE_MAX \
+ : MIPS_CALL_RATIO / 2)
+
+#define MOVE_BY_PIECES_P(SIZE, ALIGN) \
+ mips_move_by_pieces_p (SIZE, ALIGN)
+
+/* For CLEAR_RATIO, when optimizing for size, give a better estimate
+ of the length of a memset call, but use the default otherwise. */
+
+#define CLEAR_RATIO(speed)\
+ ((speed) ? 15 : MIPS_CALL_RATIO)
+
+/* This is similar to CLEAR_RATIO, but for a non-zero constant, so when
+ optimizing for size adjust the ratio to account for the overhead of
+ loading the constant and replicating it across the word. */
+
+#define SET_RATIO(speed) \
+ ((speed) ? 15 : MIPS_CALL_RATIO - 2)
+
+#define STORE_BY_PIECES_P(SIZE, ALIGN) \
+ mips_store_by_pieces_p (SIZE, ALIGN)
+
+/* Since the bits of the _init and _fini function is spread across
+ many object files, each potentially with its own GP, we must assume
+ we need to load our GP. We don't preserve $gp or $ra, since each
+ init/fini chunk is supposed to initialize $gp, and crti/crtn
+ already take care of preserving $ra and, when appropriate, $gp. */
+#if (defined _ABIO32 && _MIPS_SIM == _ABIO32)
+#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
+ asm (SECTION_OP "\n\
+ .set push\n\
+ .set nomips16\n\
+ .set noreorder\n\
+ bal 1f\n\
+ nop\n\
+1: .cpload $31\n\
+ .set reorder\n\
+ jal " USER_LABEL_PREFIX #FUNC "\n\
+ .set pop\n\
+ " TEXT_SECTION_ASM_OP);
+#elif ((defined _ABIN32 && _MIPS_SIM == _ABIN32) \
+ || (defined _ABI64 && _MIPS_SIM == _ABI64))
+#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
+ asm (SECTION_OP "\n\
+ .set push\n\
+ .set nomips16\n\
+ .set noreorder\n\
+ bal 1f\n\
+ nop\n\
+1: .set reorder\n\
+ .cpsetup $31, $2, 1b\n\
+ jal " USER_LABEL_PREFIX #FUNC "\n\
+ .set pop\n\
+ " TEXT_SECTION_ASM_OP);
+#endif
+
+#ifndef HAVE_AS_TLS
+#define HAVE_AS_TLS 0
+#endif
+
+#ifndef HAVE_AS_NAN
+#define HAVE_AS_NAN 0
+#endif
+
+#ifndef USED_FOR_TARGET
+/* Information about ".set noFOO; ...; .set FOO" blocks. */
+struct mips_asm_switch {
+ /* The FOO in the description above. */
+ const char *name;
+
+ /* The current block nesting level, or 0 if we aren't in a block. */
+ int nesting_level;
+};
+
+extern const enum reg_class mips_regno_to_class[];
+extern bool mips_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
+extern const char *current_function_file; /* filename current function is in */
+extern int num_source_filenames; /* current .file # */
+extern struct mips_asm_switch mips_noreorder;
+extern struct mips_asm_switch mips_nomacro;
+extern struct mips_asm_switch mips_noat;
+extern int mips_dbx_regno[];
+extern int mips_dwarf_regno[];
+extern bool mips_split_p[];
+extern bool mips_split_hi_p[];
+extern bool mips_use_pcrel_pool_p[];
+extern const char *mips_lo_relocs[];
+extern const char *mips_hi_relocs[];
+extern enum processor mips_arch; /* which cpu to codegen for */
+extern enum processor mips_tune; /* which cpu to schedule for */
+extern int mips_isa; /* architectural level */
+extern const struct mips_cpu_info *mips_arch_info;
+extern const struct mips_cpu_info *mips_tune_info;
+extern unsigned int mips_base_compression_flags;
+extern GTY(()) struct target_globals *mips16_globals;
+#endif
+
+/* Enable querying of DFA units. */
+#define CPU_UNITS_QUERY 1
+
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ mips_final_prescan_insn (INSN, OPVEC, NOPERANDS)
+
+/* As on most targets, we want the .eh_frame section to be read-only where
+ possible. And as on most targets, this means two things:
+
+ (a) Non-locally-binding pointers must have an indirect encoding,
+ so that the addresses in the .eh_frame section itself become
+ locally-binding.
+
+ (b) A shared library's .eh_frame section must encode locally-binding
+ pointers in a relative (relocation-free) form.
+
+ However, MIPS has traditionally not allowed directives like:
+
+ .long x-.
+
+ in cases where "x" is in a different section, or is not defined in the
+ same assembly file. We are therefore unable to emit the PC-relative
+ form required by (b) at assembly time.
+
+ Fortunately, the linker is able to convert absolute addresses into
+ PC-relative addresses on our behalf. Unfortunately, only certain
+ versions of the linker know how to do this for indirect pointers,
+ and for personality data. We must fall back on using writable
+ .eh_frame sections for shared libraries if the linker does not
+ support this feature. */
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
+ (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_absptr)
+
+/* For switching between MIPS16 and non-MIPS16 modes. */
+#define SWITCHABLE_TARGET 1
+
+/* Several named MIPS patterns depend on Pmode. These patterns have the
+ form <NAME>_si for Pmode == SImode and <NAME>_di for Pmode == DImode.
+ Add the appropriate suffix to generator function NAME and invoke it
+ with arguments ARGS. */
+#define PMODE_INSN(NAME, ARGS) \
+ (Pmode == SImode ? NAME ## _si ARGS : NAME ## _di ARGS)
diff --git a/gcc-4.9/gcc/config/mips/mips.md b/gcc-4.9/gcc/config/mips/mips.md
new file mode 100644
index 000000000..1e3e9e659
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips.md
@@ -0,0 +1,7190 @@
+;; Mips.md Machine Description for MIPS based processors
+;; Copyright (C) 1989-2014 Free Software Foundation, Inc.
+;; Contributed by A. Lichnewsky, lich@inria.inria.fr
+;; Changes by Michael Meissner, meissner@osf.org
+;; 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
+;; Brendan Eich, brendan@microunity.com.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_enum "processor" [
+ r3000
+ 4kc
+ 4kp
+ 5kc
+ 5kf
+ 20kc
+ 24kc
+ 24kf2_1
+ 24kf1_1
+ 74kc
+ 74kf2_1
+ 74kf1_1
+ 74kf3_2
+ loongson_2e
+ loongson_2f
+ loongson_3a
+ m4k
+ octeon
+ octeon2
+ r3900
+ r6000
+ r4000
+ r4100
+ r4111
+ r4120
+ r4130
+ r4300
+ r4600
+ r4650
+ r4700
+ r5000
+ r5400
+ r5500
+ r5900
+ r7000
+ r8000
+ r9000
+ r10000
+ sb1
+ sb1a
+ sr71000
+ xlr
+ xlp
+])
+
+(define_c_enum "unspec" [
+ ;; Unaligned accesses.
+ UNSPEC_LOAD_LEFT
+ UNSPEC_LOAD_RIGHT
+ UNSPEC_STORE_LEFT
+ UNSPEC_STORE_RIGHT
+
+ ;; Integer operations that are too cumbersome to describe directly.
+ UNSPEC_WSBH
+ UNSPEC_DSBH
+ UNSPEC_DSHD
+
+ ;; Floating-point moves.
+ UNSPEC_LOAD_LOW
+ UNSPEC_LOAD_HIGH
+ UNSPEC_STORE_WORD
+ UNSPEC_MFHC1
+ UNSPEC_MTHC1
+
+ ;; Floating-point environment.
+ UNSPEC_GET_FCSR
+ UNSPEC_SET_FCSR
+
+ ;; HI/LO moves.
+ UNSPEC_MFHI
+ UNSPEC_MTHI
+ UNSPEC_SET_HILO
+
+ ;; GP manipulation.
+ UNSPEC_LOADGP
+ UNSPEC_COPYGP
+ UNSPEC_MOVE_GP
+ UNSPEC_POTENTIAL_CPRESTORE
+ UNSPEC_CPRESTORE
+ UNSPEC_RESTORE_GP
+ UNSPEC_EH_RETURN
+ UNSPEC_GP
+ UNSPEC_SET_GOT_VERSION
+ UNSPEC_UPDATE_GOT_VERSION
+
+ ;; Symbolic accesses.
+ UNSPEC_LOAD_CALL
+ UNSPEC_LOAD_GOT
+ UNSPEC_TLS_LDM
+ UNSPEC_TLS_GET_TP
+ UNSPEC_UNSHIFTED_HIGH
+
+ ;; MIPS16 constant pools.
+ UNSPEC_ALIGN
+ UNSPEC_CONSTTABLE_INT
+ UNSPEC_CONSTTABLE_FLOAT
+
+ ;; Blockage and synchronisation.
+ UNSPEC_BLOCKAGE
+ UNSPEC_CLEAR_HAZARD
+ UNSPEC_RDHWR
+ UNSPEC_SYNCI
+ UNSPEC_SYNC
+
+ ;; Cache manipulation.
+ UNSPEC_MIPS_CACHE
+ UNSPEC_R10K_CACHE_BARRIER
+
+ ;; Interrupt handling.
+ UNSPEC_ERET
+ UNSPEC_DERET
+ UNSPEC_DI
+ UNSPEC_EHB
+ UNSPEC_RDPGPR
+ UNSPEC_COP0
+
+ ;; Used in a call expression in place of args_size. It's present for PIC
+ ;; indirect calls where it contains args_size and the function symbol.
+ UNSPEC_CALL_ATTR
+
+ ;; MIPS16 casesi jump table dispatch.
+ UNSPEC_CASESI_DISPATCH
+
+ ;; Stack checking.
+ UNSPEC_PROBE_STACK_RANGE
+])
+
+(define_constants
+ [(TLS_GET_TP_REGNUM 3)
+ (GET_FCSR_REGNUM 2)
+ (SET_FCSR_REGNUM 4)
+ (MIPS16_T_REGNUM 24)
+ (PIC_FUNCTION_ADDR_REGNUM 25)
+ (RETURN_ADDR_REGNUM 31)
+ (CPRESTORE_SLOT_REGNUM 76)
+ (GOT_VERSION_REGNUM 79)
+
+ ;; PIC long branch sequences are never longer than 100 bytes.
+ (MAX_PIC_BRANCH_LENGTH 100)
+ ]
+)
+
+(include "predicates.md")
+(include "constraints.md")
+
+;; ....................
+;;
+;; Attributes
+;;
+;; ....................
+
+(define_attr "got" "unset,xgot_high,load"
+ (const_string "unset"))
+
+;; For jal instructions, this attribute is DIRECT when the target address
+;; is symbolic and INDIRECT when it is a register.
+(define_attr "jal" "unset,direct,indirect"
+ (const_string "unset"))
+
+;; This attribute is YES if the instruction is a jal macro (not a
+;; real jal instruction).
+;;
+;; jal is always a macro for TARGET_CALL_CLOBBERED_GP because it includes
+;; an instruction to restore $gp. Direct jals are also macros for
+;; !TARGET_ABSOLUTE_JUMPS because they first load the target address
+;; into a register.
+(define_attr "jal_macro" "no,yes"
+ (cond [(eq_attr "jal" "direct")
+ (symbol_ref "(TARGET_CALL_CLOBBERED_GP || !TARGET_ABSOLUTE_JUMPS
+ ? JAL_MACRO_YES : JAL_MACRO_NO)")
+ (eq_attr "jal" "indirect")
+ (symbol_ref "(TARGET_CALL_CLOBBERED_GP
+ ? JAL_MACRO_YES : JAL_MACRO_NO)")]
+ (const_string "no")))
+
+;; Classification of moves, extensions and truncations. Most values
+;; are as for "type" (see below) but there are also the following
+;; move-specific values:
+;;
+;; constN move an N-constraint integer into a MIPS16 register
+;; sll0 "sll DEST,SRC,0", which on 64-bit targets is guaranteed
+;; to produce a sign-extended DEST, even if SRC is not
+;; properly sign-extended
+;; ext_ins EXT, DEXT, INS or DINS instruction
+;; andi a single ANDI instruction
+;; loadpool move a constant into a MIPS16 register by loading it
+;; from the pool
+;; shift_shift a shift left followed by a shift right
+;;
+;; This attribute is used to determine the instruction's length and
+;; scheduling type. For doubleword moves, the attribute always describes
+;; the split instructions; in some cases, it is more appropriate for the
+;; scheduling type to be "multi" instead.
+(define_attr "move_type"
+ "unknown,load,fpload,store,fpstore,mtc,mfc,mtlo,mflo,imul,move,fmove,
+ const,constN,signext,ext_ins,logical,arith,sll0,andi,loadpool,
+ shift_shift"
+ (const_string "unknown"))
+
+(define_attr "alu_type" "unknown,add,sub,not,nor,and,or,xor"
+ (const_string "unknown"))
+
+;; Main data type used by the insn
+(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FPSW"
+ (const_string "unknown"))
+
+;; True if the main data type is twice the size of a word.
+(define_attr "dword_mode" "no,yes"
+ (cond [(and (eq_attr "mode" "DI,DF")
+ (not (match_test "TARGET_64BIT")))
+ (const_string "yes")
+
+ (and (eq_attr "mode" "TI,TF")
+ (match_test "TARGET_64BIT"))
+ (const_string "yes")]
+ (const_string "no")))
+
+;; Attributes describing a sync loop. These loops have the form:
+;;
+;; if (RELEASE_BARRIER == YES) sync
+;; 1: OLDVAL = *MEM
+;; if ((OLDVAL & INCLUSIVE_MASK) != REQUIRED_OLDVAL) goto 2
+;; CMP = 0 [delay slot]
+;; $TMP1 = OLDVAL & EXCLUSIVE_MASK
+;; $TMP2 = INSN1 (OLDVAL, INSN1_OP2)
+;; $TMP3 = INSN2 ($TMP2, INCLUSIVE_MASK)
+;; $AT |= $TMP1 | $TMP3
+;; if (!commit (*MEM = $AT)) goto 1.
+;; if (INSN1 != MOVE && INSN1 != LI) NEWVAL = $TMP3 [delay slot]
+;; CMP = 1
+;; if (ACQUIRE_BARRIER == YES) sync
+;; 2:
+;;
+;; where "$" values are temporaries and where the other values are
+;; specified by the attributes below. Values are specified as operand
+;; numbers and insns are specified as enums. If no operand number is
+;; specified, the following values are used instead:
+;;
+;; - OLDVAL: $AT
+;; - CMP: NONE
+;; - NEWVAL: $AT
+;; - INCLUSIVE_MASK: -1
+;; - REQUIRED_OLDVAL: OLDVAL & INCLUSIVE_MASK
+;; - EXCLUSIVE_MASK: 0
+;;
+;; MEM and INSN1_OP2 are required.
+;;
+;; Ideally, the operand attributes would be integers, with -1 meaning "none",
+;; but the gen* programs don't yet support that.
+(define_attr "sync_mem" "none,0,1,2,3,4,5" (const_string "none"))
+(define_attr "sync_oldval" "none,0,1,2,3,4,5" (const_string "none"))
+(define_attr "sync_cmp" "none,0,1,2,3,4,5" (const_string "none"))
+(define_attr "sync_newval" "none,0,1,2,3,4,5" (const_string "none"))
+(define_attr "sync_inclusive_mask" "none,0,1,2,3,4,5" (const_string "none"))
+(define_attr "sync_exclusive_mask" "none,0,1,2,3,4,5" (const_string "none"))
+(define_attr "sync_required_oldval" "none,0,1,2,3,4,5" (const_string "none"))
+(define_attr "sync_insn1_op2" "none,0,1,2,3,4,5" (const_string "none"))
+(define_attr "sync_insn1" "move,li,addu,addiu,subu,and,andi,or,ori,xor,xori"
+ (const_string "move"))
+(define_attr "sync_insn2" "nop,and,xor,not"
+ (const_string "nop"))
+;; Memory model specifier.
+;; "0"-"9" values specify the operand that stores the memory model value.
+;; "10" specifies MEMMODEL_ACQ_REL,
+;; "11" specifies MEMMODEL_ACQUIRE.
+(define_attr "sync_memmodel" "" (const_int 10))
+
+;; Accumulator operand for madd patterns.
+(define_attr "accum_in" "none,0,1,2,3,4,5" (const_string "none"))
+
+;; Classification of each insn.
+;; branch conditional branch
+;; jump unconditional jump
+;; call unconditional call
+;; load load instruction(s)
+;; fpload floating point load
+;; fpidxload floating point indexed load
+;; store store instruction(s)
+;; fpstore floating point store
+;; fpidxstore floating point indexed store
+;; prefetch memory prefetch (register + offset)
+;; prefetchx memory indexed prefetch (register + register)
+;; condmove conditional moves
+;; mtc transfer to coprocessor
+;; mfc transfer from coprocessor
+;; mthi transfer to a hi register
+;; mtlo transfer to a lo register
+;; mfhi transfer from a hi register
+;; mflo transfer from a lo register
+;; const load constant
+;; arith integer arithmetic instructions
+;; logical integer logical instructions
+;; shift integer shift instructions
+;; slt set less than instructions
+;; signext sign extend instructions
+;; clz the clz and clo instructions
+;; pop the pop instruction
+;; trap trap if instructions
+;; imul integer multiply 2 operands
+;; imul3 integer multiply 3 operands
+;; imul3nc integer multiply 3 operands without clobbering HI/LO
+;; imadd integer multiply-add
+;; idiv integer divide 2 operands
+;; idiv3 integer divide 3 operands
+;; move integer register move ({,D}ADD{,U} with rt = 0)
+;; fmove floating point register move
+;; fadd floating point add/subtract
+;; fmul floating point multiply
+;; fmadd floating point multiply-add
+;; fdiv floating point divide
+;; frdiv floating point reciprocal divide
+;; frdiv1 floating point reciprocal divide step 1
+;; frdiv2 floating point reciprocal divide step 2
+;; fabs floating point absolute value
+;; fneg floating point negation
+;; fcmp floating point compare
+;; fcvt floating point convert
+;; fsqrt floating point square root
+;; frsqrt floating point reciprocal square root
+;; frsqrt1 floating point reciprocal square root step1
+;; frsqrt2 floating point reciprocal square root step2
+;; dspmac DSP MAC instructions not saturating the accumulator
+;; dspmacsat DSP MAC instructions that saturate the accumulator
+;; accext DSP accumulator extract instructions
+;; accmod DSP accumulator modify instructions
+;; dspalu DSP ALU instructions not saturating the result
+;; dspalusat DSP ALU instructions that saturate the result
+;; multi multiword sequence (or user asm statements)
+;; atomic atomic memory update instruction
+;; syncloop memory atomic operation implemented as a sync loop
+;; nop no operation
+;; ghost an instruction that produces no real code
+;; multimem microMIPS multiword load and store
+(define_attr "type"
+ "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore,
+ prefetch,prefetchx,condmove,mtc,mfc,mthi,mtlo,mfhi,mflo,const,arith,logical,
+ shift,slt,signext,clz,pop,trap,imul,imul3,imul3nc,imadd,idiv,idiv3,move,
+ fmove,fadd,fmul,fmadd,fdiv,frdiv,frdiv1,frdiv2,fabs,fneg,fcmp,fcvt,fsqrt,
+ frsqrt,frsqrt1,frsqrt2,dspmac,dspmacsat,accext,accmod,dspalu,dspalusat,
+ multi,atomic,syncloop,nop,ghost,multimem"
+ (cond [(eq_attr "jal" "!unset") (const_string "call")
+ (eq_attr "got" "load") (const_string "load")
+
+ (eq_attr "alu_type" "add,sub") (const_string "arith")
+
+ (eq_attr "alu_type" "not,nor,and,or,xor") (const_string "logical")
+
+ ;; If a doubleword move uses these expensive instructions,
+ ;; it is usually better to schedule them in the same way
+ ;; as the singleword form, rather than as "multi".
+ (eq_attr "move_type" "load") (const_string "load")
+ (eq_attr "move_type" "fpload") (const_string "fpload")
+ (eq_attr "move_type" "store") (const_string "store")
+ (eq_attr "move_type" "fpstore") (const_string "fpstore")
+ (eq_attr "move_type" "mtc") (const_string "mtc")
+ (eq_attr "move_type" "mfc") (const_string "mfc")
+ (eq_attr "move_type" "mtlo") (const_string "mtlo")
+ (eq_attr "move_type" "mflo") (const_string "mflo")
+
+ ;; These types of move are always single insns.
+ (eq_attr "move_type" "imul") (const_string "imul")
+ (eq_attr "move_type" "fmove") (const_string "fmove")
+ (eq_attr "move_type" "loadpool") (const_string "load")
+ (eq_attr "move_type" "signext") (const_string "signext")
+ (eq_attr "move_type" "ext_ins") (const_string "arith")
+ (eq_attr "move_type" "arith") (const_string "arith")
+ (eq_attr "move_type" "logical") (const_string "logical")
+ (eq_attr "move_type" "sll0") (const_string "shift")
+ (eq_attr "move_type" "andi") (const_string "logical")
+
+ ;; These types of move are always split.
+ (eq_attr "move_type" "constN,shift_shift")
+ (const_string "multi")
+
+ ;; These types of move are split for doubleword modes only.
+ (and (eq_attr "move_type" "move,const")
+ (eq_attr "dword_mode" "yes"))
+ (const_string "multi")
+ (eq_attr "move_type" "move") (const_string "move")
+ (eq_attr "move_type" "const") (const_string "const")
+ (eq_attr "sync_mem" "!none") (const_string "syncloop")]
+ (const_string "unknown")))
+
+;; Mode for conversion types (fcvt)
+;; I2S integer to float single (SI/DI to SF)
+;; I2D integer to float double (SI/DI to DF)
+;; S2I float to integer (SF to SI/DI)
+;; D2I float to integer (DF to SI/DI)
+;; D2S double to float single
+;; S2D float single to double
+
+(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D"
+ (const_string "unknown"))
+
+;; Is this an extended instruction in mips16 mode?
+(define_attr "extended_mips16" "no,yes"
+ (if_then_else (ior ;; In general, constant-pool loads are extended
+ ;; instructions. We don't yet optimize for 16-bit
+ ;; PC-relative references.
+ (eq_attr "move_type" "sll0,loadpool")
+ (eq_attr "jal" "direct")
+ (eq_attr "got" "load"))
+ (const_string "yes")
+ (const_string "no")))
+
+(define_attr "compression" "none,all,micromips"
+ (const_string "none"))
+
+(define_attr "enabled" "no,yes"
+ (if_then_else (ior (eq_attr "compression" "all,none")
+ (and (eq_attr "compression" "micromips")
+ (match_test "TARGET_MICROMIPS")))
+ (const_string "yes")
+ (const_string "no")))
+
+;; The number of individual instructions that a non-branch pattern generates,
+;; using units of BASE_INSN_LENGTH.
+(define_attr "insn_count" ""
+ (cond [;; "Ghost" instructions occupy no space.
+ (eq_attr "type" "ghost")
+ (const_int 0)
+
+ ;; Extended instructions count as 2.
+ (and (eq_attr "extended_mips16" "yes")
+ (match_test "TARGET_MIPS16"))
+ (const_int 2)
+
+ ;; A GOT load followed by an add of $gp. This is not used for MIPS16.
+ (eq_attr "got" "xgot_high")
+ (const_int 2)
+
+ ;; SHIFT_SHIFTs are decomposed into two separate instructions.
+ ;; They are extended instructions on MIPS16 targets.
+ (eq_attr "move_type" "shift_shift")
+ (if_then_else (match_test "TARGET_MIPS16")
+ (const_int 4)
+ (const_int 2))
+
+ ;; Check for doubleword moves that are decomposed into two
+ ;; instructions. The individual instructions are unextended
+ ;; MIPS16 ones.
+ (and (eq_attr "move_type" "mtc,mfc,mtlo,mflo,move")
+ (eq_attr "dword_mode" "yes"))
+ (const_int 2)
+
+ ;; Constants, loads and stores are handled by external routines.
+ (and (eq_attr "move_type" "const,constN")
+ (eq_attr "dword_mode" "yes"))
+ (symbol_ref "mips_split_const_insns (operands[1])")
+ (eq_attr "move_type" "const,constN")
+ (symbol_ref "mips_const_insns (operands[1])")
+ (eq_attr "move_type" "load,fpload")
+ (symbol_ref "mips_load_store_insns (operands[1], insn)")
+ (eq_attr "move_type" "store,fpstore")
+ (symbol_ref "mips_load_store_insns (operands[0], insn)
+ + (TARGET_FIX_24K ? 1 : 0)")
+
+ ;; In the worst case, a call macro will take 8 instructions:
+ ;;
+ ;; lui $25,%call_hi(FOO)
+ ;; addu $25,$25,$28
+ ;; lw $25,%call_lo(FOO)($25)
+ ;; nop
+ ;; jalr $25
+ ;; nop
+ ;; lw $gp,X($sp)
+ ;; nop
+ (eq_attr "jal_macro" "yes")
+ (const_int 8)
+
+ ;; Various VR4120 errata require a nop to be inserted after a macc
+ ;; instruction. The assembler does this for us, so account for
+ ;; the worst-case length here.
+ (and (eq_attr "type" "imadd")
+ (match_test "TARGET_FIX_VR4120"))
+ (const_int 2)
+
+ ;; VR4120 errata MD(4): if there are consecutive dmult instructions,
+ ;; the result of the second one is missed. The assembler should work
+ ;; around this by inserting a nop after the first dmult.
+ (and (eq_attr "type" "imul,imul3")
+ (eq_attr "mode" "DI")
+ (match_test "TARGET_FIX_VR4120"))
+ (const_int 2)
+
+ (eq_attr "type" "idiv,idiv3")
+ (symbol_ref "mips_idiv_insns ()")
+
+ (not (eq_attr "sync_mem" "none"))
+ (symbol_ref "mips_sync_loop_insns (insn, operands)")]
+ (const_int 1)))
+
+;; Length of instruction in bytes. The default is derived from "insn_count",
+;; but there are special cases for branches (which must be handled here)
+;; and for compressed single instructions.
+(define_attr "length" ""
+ (cond [(and (eq_attr "compression" "micromips,all")
+ (eq_attr "dword_mode" "no")
+ (match_test "TARGET_MICROMIPS"))
+ (const_int 2)
+
+ ;; Direct microMIPS branch instructions have a range of
+ ;; [-0x10000,0xfffe], otherwise the range is [-0x20000,0x1fffc].
+ ;; If a branch is outside this range, we have a choice of two
+ ;; sequences.
+ ;;
+ ;; For PIC, an out-of-range branch like:
+ ;;
+ ;; bne r1,r2,target
+ ;; dslot
+ ;;
+ ;; becomes the equivalent of:
+ ;;
+ ;; beq r1,r2,1f
+ ;; dslot
+ ;; la $at,target
+ ;; jr $at
+ ;; nop
+ ;; 1:
+ ;;
+ ;; The non-PIC case is similar except that we use a direct
+ ;; jump instead of an la/jr pair. Since the target of this
+ ;; jump is an absolute 28-bit bit address (the other bits
+ ;; coming from the address of the delay slot) this form cannot
+ ;; cross a 256MB boundary. We could provide the option of
+ ;; using la/jr in this case too, but we do not do so at
+ ;; present.
+ ;;
+ ;; The value we specify here does not account for the delay slot
+ ;; instruction, whose length is added separately. If the RTL
+ ;; pattern has no explicit delay slot, mips_adjust_insn_length
+ ;; will add the length of the implicit nop. The range of
+ ;; [-0x20000, 0x1fffc] from the address of the delay slot
+ ;; therefore translates to a range of:
+ ;;
+ ;; [-(0x20000 - sizeof (branch)), 0x1fffc - sizeof (slot)]
+ ;; == [-0x1fffc, 0x1fff8]
+ ;;
+ ;; from the shorten_branches reference address.
+ (and (eq_attr "type" "branch")
+ (not (match_test "TARGET_MIPS16")))
+ (cond [;; Any variant can handle the 17-bit range.
+ (and (le (minus (match_dup 0) (pc)) (const_int 65532))
+ (le (minus (pc) (match_dup 0)) (const_int 65534)))
+ (const_int 4)
+
+ ;; The 18-bit range is OK other than for microMIPS.
+ (and (not (match_test "TARGET_MICROMIPS"))
+ (and (le (minus (match_dup 0) (pc)) (const_int 131064))
+ (le (minus (pc) (match_dup 0)) (const_int 131068))))
+ (const_int 4)
+
+ ;; The non-PIC case: branch, first delay slot, and J.
+ (match_test "TARGET_ABSOLUTE_JUMPS")
+ (const_int 12)]
+
+ ;; Use MAX_PIC_BRANCH_LENGTH as a (gross) overestimate.
+ ;; mips_adjust_insn_length substitutes the correct length.
+ ;;
+ ;; Note that we can't simply use (symbol_ref ...) here
+ ;; because genattrtab needs to know the maximum length
+ ;; of an insn.
+ (const_int MAX_PIC_BRANCH_LENGTH))
+
+ ;; An unextended MIPS16 branch has a range of [-0x100, 0xfe]
+ ;; from the address of the following instruction, which leads
+ ;; to a range of:
+ ;;
+ ;; [-(0x100 - sizeof (branch)), 0xfe]
+ ;; == [-0xfe, 0xfe]
+ ;;
+ ;; from the shorten_branches reference address. Extended branches
+ ;; likewise have a range of [-0x10000, 0xfffe] from the address
+ ;; of the following instruction, which leads to a range of:
+ ;;
+ ;; [-(0x10000 - sizeof (branch)), 0xfffe]
+ ;; == [-0xfffc, 0xfffe]
+ ;;
+ ;; from the reference address.
+ ;;
+ ;; When a branch is out of range, mips_reorg splits it into a form
+ ;; that uses in-range branches. There are four basic sequences:
+ ;;
+ ;; (1) Absolute addressing with a readable text segment
+ ;; (32-bit addresses):
+ ;;
+ ;; b... foo 2 bytes
+ ;; move $1,$2 2 bytes
+ ;; lw $2,label 2 bytes
+ ;; jr $2 2 bytes
+ ;; move $2,$1 2 bytes
+ ;; .align 2 0 or 2 bytes
+ ;; label:
+ ;; .word target 4 bytes
+ ;; foo:
+ ;; (16 bytes in the worst case)
+ ;;
+ ;; (2) Absolute addressing with a readable text segment
+ ;; (64-bit addresses):
+ ;;
+ ;; b... foo 2 bytes
+ ;; move $1,$2 2 bytes
+ ;; ld $2,label 2 bytes
+ ;; jr $2 2 bytes
+ ;; move $2,$1 2 bytes
+ ;; .align 3 0 to 6 bytes
+ ;; label:
+ ;; .dword target 8 bytes
+ ;; foo:
+ ;; (24 bytes in the worst case)
+ ;;
+ ;; (3) Absolute addressing without a readable text segment
+ ;; (which requires 32-bit addresses at present):
+ ;;
+ ;; b... foo 2 bytes
+ ;; move $1,$2 2 bytes
+ ;; lui $2,%hi(target) 4 bytes
+ ;; sll $2,8 2 bytes
+ ;; sll $2,8 2 bytes
+ ;; addiu $2,%lo(target) 4 bytes
+ ;; jr $2 2 bytes
+ ;; move $2,$1 2 bytes
+ ;; foo:
+ ;; (20 bytes)
+ ;;
+ ;; (4) PIC addressing (which requires 32-bit addresses at present):
+ ;;
+ ;; b... foo 2 bytes
+ ;; move $1,$2 2 bytes
+ ;; lw $2,cprestore 0, 2 or 4 bytes
+ ;; lw $2,%got(target)($2) 4 bytes
+ ;; addiu $2,%lo(target) 4 bytes
+ ;; jr $2 2 bytes
+ ;; move $2,$1 2 bytes
+ ;; foo:
+ ;; (20 bytes in the worst case)
+ (and (eq_attr "type" "branch")
+ (match_test "TARGET_MIPS16"))
+ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 254))
+ (le (minus (pc) (match_dup 0)) (const_int 254)))
+ (const_int 2)
+ (and (le (minus (match_dup 0) (pc)) (const_int 65534))
+ (le (minus (pc) (match_dup 0)) (const_int 65532)))
+ (const_int 4)
+ (and (match_test "TARGET_ABICALLS")
+ (not (match_test "TARGET_ABSOLUTE_ABICALLS")))
+ (const_int 20)
+ (match_test "Pmode == SImode")
+ (const_int 16)
+ ] (const_int 24))]
+ (symbol_ref "get_attr_insn_count (insn) * BASE_INSN_LENGTH")))
+
+;; Attribute describing the processor.
+(define_enum_attr "cpu" "processor"
+ (const (symbol_ref "mips_tune")))
+
+;; The type of hardware hazard associated with this instruction.
+;; DELAY means that the next instruction cannot read the result
+;; of this one. HILO means that the next two instructions cannot
+;; write to HI or LO.
+(define_attr "hazard" "none,delay,hilo"
+ (cond [(and (eq_attr "type" "load,fpload,fpidxload")
+ (match_test "ISA_HAS_LOAD_DELAY"))
+ (const_string "delay")
+
+ (and (eq_attr "type" "mfc,mtc")
+ (match_test "ISA_HAS_XFER_DELAY"))
+ (const_string "delay")
+
+ (and (eq_attr "type" "fcmp")
+ (match_test "ISA_HAS_FCMP_DELAY"))
+ (const_string "delay")
+
+ ;; The r4000 multiplication patterns include an mflo instruction.
+ (and (eq_attr "type" "imul")
+ (match_test "TARGET_FIX_R4000"))
+ (const_string "hilo")
+
+ (and (eq_attr "type" "mfhi,mflo")
+ (not (match_test "ISA_HAS_HILO_INTERLOCKS")))
+ (const_string "hilo")]
+ (const_string "none")))
+
+;; Can the instruction be put into a delay slot?
+(define_attr "can_delay" "no,yes"
+ (if_then_else (and (eq_attr "type" "!branch,call,jump")
+ (eq_attr "hazard" "none")
+ (match_test "get_attr_insn_count (insn) == 1"))
+ (const_string "yes")
+ (const_string "no")))
+
+;; Attribute defining whether or not we can use the branch-likely
+;; instructions.
+(define_attr "branch_likely" "no,yes"
+ (if_then_else (match_test "GENERATE_BRANCHLIKELY")
+ (const_string "yes")
+ (const_string "no")))
+
+;; True if an instruction might assign to hi or lo when reloaded.
+;; This is used by the TUNE_MACC_CHAINS code.
+(define_attr "may_clobber_hilo" "no,yes"
+ (if_then_else (eq_attr "type" "imul,imul3,imadd,idiv,mthi,mtlo")
+ (const_string "yes")
+ (const_string "no")))
+
+;; Describe a user's asm statement.
+(define_asm_attributes
+ [(set_attr "type" "multi")
+ (set_attr "can_delay" "no")])
+
+;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated
+;; from the same template.
+(define_mode_iterator GPR [SI (DI "TARGET_64BIT")])
+
+;; A copy of GPR that can be used when a pattern has two independent
+;; modes.
+(define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")])
+
+(define_mode_iterator MOVEP1 [SI SF])
+(define_mode_iterator MOVEP2 [SI SF])
+
+;; This mode iterator allows :HILO to be used as the mode of the
+;; concatenated HI and LO registers.
+(define_mode_iterator HILO [(DI "!TARGET_64BIT") (TI "TARGET_64BIT")])
+
+;; This mode iterator allows :P to be used for patterns that operate on
+;; pointer-sized quantities. Exactly one of the two alternatives will match.
+(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
+
+;; This mode iterator allows :MOVECC to be used anywhere that a
+;; conditional-move-type condition is needed.
+(define_mode_iterator MOVECC [SI (DI "TARGET_64BIT")
+ (CC "TARGET_HARD_FLOAT
+ && !TARGET_LOONGSON_2EF
+ && !TARGET_MIPS5900")])
+
+;; 32-bit integer moves for which we provide move patterns.
+(define_mode_iterator IMOVE32
+ [SI
+ (V2HI "TARGET_DSP")
+ (V4QI "TARGET_DSP")
+ (V2HQ "TARGET_DSP")
+ (V2UHQ "TARGET_DSP")
+ (V2HA "TARGET_DSP")
+ (V2UHA "TARGET_DSP")
+ (V4QQ "TARGET_DSP")
+ (V4UQQ "TARGET_DSP")])
+
+;; 64-bit modes for which we provide move patterns.
+(define_mode_iterator MOVE64
+ [DI DF
+ (V2SF "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT")
+ (V2SI "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS")
+ (V4HI "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS")
+ (V8QI "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS")])
+
+;; 128-bit modes for which we provide move patterns on 64-bit targets.
+(define_mode_iterator MOVE128 [TI TF])
+
+;; This mode iterator allows the QI and HI extension patterns to be
+;; defined from the same template.
+(define_mode_iterator SHORT [QI HI])
+
+;; Likewise the 64-bit truncate-and-shift patterns.
+(define_mode_iterator SUBDI [QI HI SI])
+
+;; This mode iterator allows :ANYF to be used wherever a scalar or vector
+;; floating-point mode is allowed.
+(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT")
+ (DF "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT")
+ (V2SF "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT")])
+
+;; Like ANYF, but only applies to scalar modes.
+(define_mode_iterator SCALARF [(SF "TARGET_HARD_FLOAT")
+ (DF "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT")])
+
+;; A floating-point mode for which moves involving FPRs may need to be split.
+(define_mode_iterator SPLITF
+ [(DF "!TARGET_64BIT && TARGET_DOUBLE_FLOAT")
+ (DI "!TARGET_64BIT && TARGET_DOUBLE_FLOAT")
+ (V2SF "!TARGET_64BIT && TARGET_PAIRED_SINGLE_FLOAT")
+ (V2SI "!TARGET_64BIT && TARGET_LOONGSON_VECTORS")
+ (V4HI "!TARGET_64BIT && TARGET_LOONGSON_VECTORS")
+ (V8QI "!TARGET_64BIT && TARGET_LOONGSON_VECTORS")
+ (TF "TARGET_64BIT && TARGET_FLOAT64")])
+
+;; In GPR templates, a string like "<d>subu" will expand to "subu" in the
+;; 32-bit version and "dsubu" in the 64-bit version.
+(define_mode_attr d [(SI "") (DI "d")
+ (QQ "") (HQ "") (SQ "") (DQ "d")
+ (UQQ "") (UHQ "") (USQ "") (UDQ "d")
+ (HA "") (SA "") (DA "d")
+ (UHA "") (USA "") (UDA "d")])
+
+;; Same as d but upper-case.
+(define_mode_attr D [(SI "") (DI "D")
+ (QQ "") (HQ "") (SQ "") (DQ "D")
+ (UQQ "") (UHQ "") (USQ "") (UDQ "D")
+ (HA "") (SA "") (DA "D")
+ (UHA "") (USA "") (UDA "D")])
+
+;; This attribute gives the length suffix for a load or store instruction.
+;; The same suffixes work for zero and sign extensions.
+(define_mode_attr size [(QI "b") (HI "h") (SI "w") (DI "d")])
+(define_mode_attr SIZE [(QI "B") (HI "H") (SI "W") (DI "D")])
+
+;; This attributes gives the mode mask of a SHORT.
+(define_mode_attr mask [(QI "0x00ff") (HI "0xffff")])
+
+;; Mode attributes for GPR loads.
+(define_mode_attr load [(SI "lw") (DI "ld")])
+;; Instruction names for stores.
+(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd")])
+
+;; Similarly for MIPS IV indexed FPR loads and stores.
+(define_mode_attr loadx [(SF "lwxc1") (DF "ldxc1") (V2SF "ldxc1")])
+(define_mode_attr storex [(SF "swxc1") (DF "sdxc1") (V2SF "sdxc1")])
+
+;; The unextended ranges of the MIPS16 addiu and daddiu instructions
+;; are different. Some forms of unextended addiu have an 8-bit immediate
+;; field but the equivalent daddiu has only a 5-bit field.
+(define_mode_attr si8_di5 [(SI "8") (DI "5")])
+
+;; This attribute gives the best constraint to use for registers of
+;; a given mode.
+(define_mode_attr reg [(SI "d") (DI "d") (CC "z")])
+
+;; This attribute gives the format suffix for floating-point operations.
+(define_mode_attr fmt [(SF "s") (DF "d") (V2SF "ps")])
+
+;; This attribute gives the upper-case mode name for one unit of a
+;; floating-point mode.
+(define_mode_attr UNITMODE [(SF "SF") (DF "DF") (V2SF "SF")])
+
+;; This attribute gives the integer mode that has the same size as a
+;; fixed-point mode.
+(define_mode_attr IMODE [(QQ "QI") (HQ "HI") (SQ "SI") (DQ "DI")
+ (UQQ "QI") (UHQ "HI") (USQ "SI") (UDQ "DI")
+ (HA "HI") (SA "SI") (DA "DI")
+ (UHA "HI") (USA "SI") (UDA "DI")
+ (V4UQQ "SI") (V2UHQ "SI") (V2UHA "SI")
+ (V2HQ "SI") (V2HA "SI")])
+
+;; This attribute gives the integer mode that has half the size of
+;; the controlling mode.
+(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (V2SF "SI")
+ (V2SI "SI") (V4HI "SI") (V8QI "SI")
+ (TF "DI")])
+
+;; This attribute works around the early SB-1 rev2 core "F2" erratum:
+;;
+;; In certain cases, div.s and div.ps may have a rounding error
+;; and/or wrong inexact flag.
+;;
+;; Therefore, we only allow div.s if not working around SB-1 rev2
+;; errata or if a slight loss of precision is OK.
+(define_mode_attr divide_condition
+ [DF (SF "!TARGET_FIX_SB1 || flag_unsafe_math_optimizations")
+ (V2SF "TARGET_SB1 && (!TARGET_FIX_SB1 || flag_unsafe_math_optimizations)")])
+
+;; This attribute gives the conditions under which SQRT.fmt instructions
+;; can be used.
+(define_mode_attr sqrt_condition
+ [(SF "!ISA_MIPS1") (DF "!ISA_MIPS1") (V2SF "TARGET_SB1")])
+
+;; This code iterator allows signed and unsigned widening multiplications
+;; to use the same template.
+(define_code_iterator any_extend [sign_extend zero_extend])
+
+;; This code iterator allows the two right shift instructions to be
+;; generated from the same template.
+(define_code_iterator any_shiftrt [ashiftrt lshiftrt])
+
+;; This code iterator allows the three shift instructions to be generated
+;; from the same template.
+(define_code_iterator any_shift [ashift ashiftrt lshiftrt])
+
+;; This code iterator allows unsigned and signed division to be generated
+;; from the same template.
+(define_code_iterator any_div [div udiv])
+
+;; This code iterator allows unsigned and signed modulus to be generated
+;; from the same template.
+(define_code_iterator any_mod [mod umod])
+
+;; This code iterator allows all native floating-point comparisons to be
+;; generated from the same template.
+(define_code_iterator fcond [unordered uneq unlt unle eq lt le])
+
+;; This code iterator is used for comparisons that can be implemented
+;; by swapping the operands.
+(define_code_iterator swapped_fcond [ge gt unge ungt])
+
+;; Equality operators.
+(define_code_iterator equality_op [eq ne])
+
+;; These code iterators allow the signed and unsigned scc operations to use
+;; the same template.
+(define_code_iterator any_gt [gt gtu])
+(define_code_iterator any_ge [ge geu])
+(define_code_iterator any_lt [lt ltu])
+(define_code_iterator any_le [le leu])
+
+(define_code_iterator any_return [return simple_return])
+
+;; <u> expands to an empty string when doing a signed operation and
+;; "u" when doing an unsigned operation.
+(define_code_attr u [(sign_extend "") (zero_extend "u")
+ (div "") (udiv "u")
+ (mod "") (umod "u")
+ (gt "") (gtu "u")
+ (ge "") (geu "u")
+ (lt "") (ltu "u")
+ (le "") (leu "u")])
+
+;; <U> is like <u> except uppercase.
+(define_code_attr U [(sign_extend "") (zero_extend "U")])
+
+;; <su> is like <u>, but the signed form expands to "s" rather than "".
+(define_code_attr su [(sign_extend "s") (zero_extend "u")])
+
+;; <optab> expands to the name of the optab for a particular code.
+(define_code_attr optab [(ashift "ashl")
+ (ashiftrt "ashr")
+ (lshiftrt "lshr")
+ (ior "ior")
+ (xor "xor")
+ (and "and")
+ (plus "add")
+ (minus "sub")
+ (return "return")
+ (simple_return "simple_return")])
+
+;; <insn> expands to the name of the insn that implements a particular code.
+(define_code_attr insn [(ashift "sll")
+ (ashiftrt "sra")
+ (lshiftrt "srl")
+ (ior "or")
+ (xor "xor")
+ (and "and")
+ (plus "addu")
+ (minus "subu")])
+
+;; <immediate_insn> expands to the name of the insn that implements
+;; a particular code to operate on immediate values.
+(define_code_attr immediate_insn [(ior "ori")
+ (xor "xori")
+ (and "andi")])
+
+(define_code_attr shift_compression [(ashift "micromips")
+ (lshiftrt "micromips")
+ (ashiftrt "none")])
+
+;; <fcond> is the c.cond.fmt condition associated with a particular code.
+(define_code_attr fcond [(unordered "un")
+ (uneq "ueq")
+ (unlt "ult")
+ (unle "ule")
+ (eq "eq")
+ (lt "lt")
+ (le "le")])
+
+;; Similar, but for swapped conditions.
+(define_code_attr swapped_fcond [(ge "le")
+ (gt "lt")
+ (unge "ule")
+ (ungt "ult")])
+
+;; The value of the bit when the branch is taken for branch_bit patterns.
+;; Comparison is always against zero so this depends on the operator.
+(define_code_attr bbv [(eq "0") (ne "1")])
+
+;; This is the inverse value of bbv.
+(define_code_attr bbinv [(eq "1") (ne "0")])
+
+;; .........................
+;;
+;; Branch, call and jump delay slots
+;;
+;; .........................
+
+(define_delay (and (eq_attr "type" "branch")
+ (not (match_test "TARGET_MIPS16"))
+ (eq_attr "branch_likely" "yes"))
+ [(eq_attr "can_delay" "yes")
+ (nil)
+ (eq_attr "can_delay" "yes")])
+
+;; Branches that don't have likely variants do not annul on false.
+(define_delay (and (eq_attr "type" "branch")
+ (not (match_test "TARGET_MIPS16"))
+ (eq_attr "branch_likely" "no"))
+ [(eq_attr "can_delay" "yes")
+ (nil)
+ (nil)])
+
+(define_delay (eq_attr "type" "jump")
+ [(eq_attr "can_delay" "yes")
+ (nil)
+ (nil)])
+
+(define_delay (and (eq_attr "type" "call")
+ (eq_attr "jal_macro" "no"))
+ [(eq_attr "can_delay" "yes")
+ (nil)
+ (nil)])
+
+;; Pipeline descriptions.
+;;
+;; generic.md provides a fallback for processors without a specific
+;; pipeline description. It is derived from the old define_function_unit
+;; version and uses the "alu" and "imuldiv" units declared below.
+;;
+;; Some of the processor-specific files are also derived from old
+;; define_function_unit descriptions and simply override the parts of
+;; generic.md that don't apply. The other processor-specific files
+;; are self-contained.
+(define_automaton "alu,imuldiv")
+
+(define_cpu_unit "alu" "alu")
+(define_cpu_unit "imuldiv" "imuldiv")
+
+;; Ghost instructions produce no real code and introduce no hazards.
+;; They exist purely to express an effect on dataflow.
+(define_insn_reservation "ghost" 0
+ (eq_attr "type" "ghost")
+ "nothing")
+
+(include "4k.md")
+(include "5k.md")
+(include "20kc.md")
+(include "24k.md")
+(include "74k.md")
+(include "3000.md")
+(include "4000.md")
+(include "4100.md")
+(include "4130.md")
+(include "4300.md")
+(include "4600.md")
+(include "5000.md")
+(include "5400.md")
+(include "5500.md")
+(include "6000.md")
+(include "7000.md")
+(include "9000.md")
+(include "10000.md")
+(include "loongson2ef.md")
+(include "loongson3a.md")
+(include "octeon.md")
+(include "sb1.md")
+(include "sr71k.md")
+(include "xlr.md")
+(include "xlp.md")
+(include "generic.md")
+
+;;
+;; ....................
+;;
+;; CONDITIONAL TRAPS
+;;
+;; ....................
+;;
+
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 0))]
+ ""
+{
+ if (ISA_HAS_COND_TRAP)
+ return "teq\t$0,$0";
+ else if (TARGET_MIPS16)
+ return "break 0";
+ else
+ return "break";
+}
+ [(set_attr "type" "trap")])
+
+(define_expand "ctrap<mode>4"
+ [(trap_if (match_operator 0 "comparison_operator"
+ [(match_operand:GPR 1 "reg_or_0_operand")
+ (match_operand:GPR 2 "arith_operand")])
+ (match_operand 3 "const_0_operand"))]
+ "ISA_HAS_COND_TRAP"
+{
+ mips_expand_conditional_trap (operands[0]);
+ DONE;
+})
+
+(define_insn "*conditional_trap<mode>"
+ [(trap_if (match_operator:GPR 0 "trap_comparison_operator"
+ [(match_operand:GPR 1 "reg_or_0_operand" "dJ")
+ (match_operand:GPR 2 "arith_operand" "dI")])
+ (const_int 0))]
+ "ISA_HAS_COND_TRAP"
+ "t%C0\t%z1,%2"
+ [(set_attr "type" "trap")])
+
+;;
+;; ....................
+;;
+;; ADDITION
+;;
+;; ....................
+;;
+
+(define_insn "add<mode>3"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (plus:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")))]
+ ""
+ "add.<fmt>\t%0,%1,%2"
+ [(set_attr "type" "fadd")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_expand "add<mode>3"
+ [(set (match_operand:GPR 0 "register_operand")
+ (plus:GPR (match_operand:GPR 1 "register_operand")
+ (match_operand:GPR 2 "arith_operand")))]
+ "")
+
+(define_insn "*add<mode>3"
+ [(set (match_operand:GPR 0 "register_operand" "=!u,d,!u,!u,!ks,!d,d")
+ (plus:GPR (match_operand:GPR 1 "register_operand" "!u,d,!u,!ks,!ks,0,d")
+ (match_operand:GPR 2 "arith_operand" "!u,d,Uead,Uuw6,Uesp,Usb4,Q")))]
+ "!TARGET_MIPS16"
+{
+ if (which_alternative == 0
+ || which_alternative == 1)
+ return "<d>addu\t%0,%1,%2";
+ else
+ return "<d>addiu\t%0,%1,%2";
+}
+ [(set_attr "alu_type" "add")
+ (set_attr "compression" "micromips,*,micromips,micromips,micromips,micromips,*")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*add<mode>3_mips16"
+ [(set (match_operand:GPR 0 "register_operand" "=ks,ks,d,d,d,d,d,d,d")
+ (plus:GPR (match_operand:GPR 1 "register_operand" "ks,ks,ks,ks,0,0,d,d,d")
+ (match_operand:GPR 2 "arith_operand" "Usd8,Q,Uuw<si8_di5>,Q,Usb<si8_di5>,Q,Usb4,O,d")))]
+ "TARGET_MIPS16"
+ "@
+ <d>addiu\t%0,%2
+ <d>addiu\t%0,%2
+ <d>addiu\t%0,%1,%2
+ <d>addiu\t%0,%1,%2
+ <d>addiu\t%0,%2
+ <d>addiu\t%0,%2
+ <d>addiu\t%0,%1,%2
+ <d>addiu\t%0,%1,%2
+ <d>addu\t%0,%1,%2"
+ [(set_attr "alu_type" "add")
+ (set_attr "mode" "<MODE>")
+ (set_attr "extended_mips16" "no,yes,no,yes,no,yes,no,yes,no")])
+
+;; On the mips16, we can sometimes split an add of a constant which is
+;; a 4 byte instruction into two adds which are both 2 byte
+;; instructions. There are two cases: one where we are adding a
+;; constant plus a register to another register, and one where we are
+;; simply adding a constant to a register.
+
+(define_split
+ [(set (match_operand:SI 0 "d_operand")
+ (plus:SI (match_dup 0)
+ (match_operand:SI 1 "const_int_operand")))]
+ "TARGET_MIPS16 && reload_completed && !TARGET_DEBUG_D_MODE
+ && ((INTVAL (operands[1]) > 0x7f
+ && INTVAL (operands[1]) <= 0x7f + 0x7f)
+ || (INTVAL (operands[1]) < - 0x80
+ && INTVAL (operands[1]) >= - 0x80 - 0x80))"
+ [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 2)))]
+{
+ HOST_WIDE_INT val = INTVAL (operands[1]);
+
+ if (val >= 0)
+ {
+ operands[1] = GEN_INT (0x7f);
+ operands[2] = GEN_INT (val - 0x7f);
+ }
+ else
+ {
+ operands[1] = GEN_INT (- 0x80);
+ operands[2] = GEN_INT (val + 0x80);
+ }
+})
+
+(define_split
+ [(set (match_operand:SI 0 "d_operand")
+ (plus:SI (match_operand:SI 1 "d_operand")
+ (match_operand:SI 2 "const_int_operand")))]
+ "TARGET_MIPS16 && reload_completed && !TARGET_DEBUG_D_MODE
+ && REGNO (operands[0]) != REGNO (operands[1])
+ && ((INTVAL (operands[2]) > 0x7
+ && INTVAL (operands[2]) <= 0x7 + 0x7f)
+ || (INTVAL (operands[2]) < - 0x8
+ && INTVAL (operands[2]) >= - 0x8 - 0x80))"
+ [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 3)))]
+{
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+
+ if (val >= 0)
+ {
+ operands[2] = GEN_INT (0x7);
+ operands[3] = GEN_INT (val - 0x7);
+ }
+ else
+ {
+ operands[2] = GEN_INT (- 0x8);
+ operands[3] = GEN_INT (val + 0x8);
+ }
+})
+
+(define_split
+ [(set (match_operand:DI 0 "d_operand")
+ (plus:DI (match_dup 0)
+ (match_operand:DI 1 "const_int_operand")))]
+ "TARGET_MIPS16 && TARGET_64BIT && reload_completed && !TARGET_DEBUG_D_MODE
+ && ((INTVAL (operands[1]) > 0xf
+ && INTVAL (operands[1]) <= 0xf + 0xf)
+ || (INTVAL (operands[1]) < - 0x10
+ && INTVAL (operands[1]) >= - 0x10 - 0x10))"
+ [(set (match_dup 0) (plus:DI (match_dup 0) (match_dup 1)))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 2)))]
+{
+ HOST_WIDE_INT val = INTVAL (operands[1]);
+
+ if (val >= 0)
+ {
+ operands[1] = GEN_INT (0xf);
+ operands[2] = GEN_INT (val - 0xf);
+ }
+ else
+ {
+ operands[1] = GEN_INT (- 0x10);
+ operands[2] = GEN_INT (val + 0x10);
+ }
+})
+
+(define_split
+ [(set (match_operand:DI 0 "d_operand")
+ (plus:DI (match_operand:DI 1 "d_operand")
+ (match_operand:DI 2 "const_int_operand")))]
+ "TARGET_MIPS16 && TARGET_64BIT && reload_completed && !TARGET_DEBUG_D_MODE
+ && REGNO (operands[0]) != REGNO (operands[1])
+ && ((INTVAL (operands[2]) > 0x7
+ && INTVAL (operands[2]) <= 0x7 + 0xf)
+ || (INTVAL (operands[2]) < - 0x8
+ && INTVAL (operands[2]) >= - 0x8 - 0x10))"
+ [(set (match_dup 0) (plus:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 3)))]
+{
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+
+ if (val >= 0)
+ {
+ operands[2] = GEN_INT (0x7);
+ operands[3] = GEN_INT (val - 0x7);
+ }
+ else
+ {
+ operands[2] = GEN_INT (- 0x8);
+ operands[3] = GEN_INT (val + 0x8);
+ }
+})
+
+(define_insn "*addsi3_extended"
+ [(set (match_operand:DI 0 "register_operand" "=d,d")
+ (sign_extend:DI
+ (plus:SI (match_operand:SI 1 "register_operand" "d,d")
+ (match_operand:SI 2 "arith_operand" "d,Q"))))]
+ "TARGET_64BIT && !TARGET_MIPS16"
+ "@
+ addu\t%0,%1,%2
+ addiu\t%0,%1,%2"
+ [(set_attr "alu_type" "add")
+ (set_attr "mode" "SI")])
+
+;; Split this insn so that the addiu splitters can have a crack at it.
+;; Use a conservative length estimate until the split.
+(define_insn_and_split "*addsi3_extended_mips16"
+ [(set (match_operand:DI 0 "register_operand" "=d,d,d")
+ (sign_extend:DI
+ (plus:SI (match_operand:SI 1 "register_operand" "0,d,d")
+ (match_operand:SI 2 "arith_operand" "Q,O,d"))))]
+ "TARGET_64BIT && TARGET_MIPS16"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3) (plus:SI (match_dup 1) (match_dup 2)))]
+ { operands[3] = gen_lowpart (SImode, operands[0]); }
+ [(set_attr "alu_type" "add")
+ (set_attr "mode" "SI")
+ (set_attr "extended_mips16" "yes")])
+
+;; Combiner patterns for unsigned byte-add.
+
+(define_insn "*baddu_si_eb"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (zero_extend:SI
+ (subreg:QI
+ (plus:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d")) 3)))]
+ "ISA_HAS_BADDU && BYTES_BIG_ENDIAN"
+ "baddu\\t%0,%1,%2"
+ [(set_attr "alu_type" "add")])
+
+(define_insn "*baddu_si_el"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (zero_extend:SI
+ (subreg:QI
+ (plus:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d")) 0)))]
+ "ISA_HAS_BADDU && !BYTES_BIG_ENDIAN"
+ "baddu\\t%0,%1,%2"
+ [(set_attr "alu_type" "add")])
+
+(define_insn "*baddu_di<mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (zero_extend:GPR
+ (truncate:QI
+ (plus:DI (match_operand:DI 1 "register_operand" "d")
+ (match_operand:DI 2 "register_operand" "d")))))]
+ "ISA_HAS_BADDU && TARGET_64BIT"
+ "baddu\\t%0,%1,%2"
+ [(set_attr "alu_type" "add")])
+
+;;
+;; ....................
+;;
+;; SUBTRACTION
+;;
+;; ....................
+;;
+
+(define_insn "sub<mode>3"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (minus:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")))]
+ ""
+ "sub.<fmt>\t%0,%1,%2"
+ [(set_attr "type" "fadd")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "sub<mode>3"
+ [(set (match_operand:GPR 0 "register_operand" "=!u,d")
+ (minus:GPR (match_operand:GPR 1 "register_operand" "!u,d")
+ (match_operand:GPR 2 "register_operand" "!u,d")))]
+ ""
+ "<d>subu\t%0,%1,%2"
+ [(set_attr "alu_type" "sub")
+ (set_attr "compression" "micromips,*")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*subsi3_extended"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (sign_extend:DI
+ (minus:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d"))))]
+ "TARGET_64BIT"
+ "subu\t%0,%1,%2"
+ [(set_attr "alu_type" "sub")
+ (set_attr "mode" "DI")])
+
+;;
+;; ....................
+;;
+;; MULTIPLICATION
+;;
+;; ....................
+;;
+
+(define_expand "mul<mode>3"
+ [(set (match_operand:SCALARF 0 "register_operand")
+ (mult:SCALARF (match_operand:SCALARF 1 "register_operand")
+ (match_operand:SCALARF 2 "register_operand")))]
+ ""
+ "")
+
+(define_insn "*mul<mode>3"
+ [(set (match_operand:SCALARF 0 "register_operand" "=f")
+ (mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f")
+ (match_operand:SCALARF 2 "register_operand" "f")))]
+ "!TARGET_4300_MUL_FIX"
+ "mul.<fmt>\t%0,%1,%2"
+ [(set_attr "type" "fmul")
+ (set_attr "mode" "<MODE>")])
+
+;; Early VR4300 silicon has a CPU bug where multiplies with certain
+;; operands may corrupt immediately following multiplies. This is a
+;; simple fix to insert NOPs.
+
+(define_insn "*mul<mode>3_r4300"
+ [(set (match_operand:SCALARF 0 "register_operand" "=f")
+ (mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f")
+ (match_operand:SCALARF 2 "register_operand" "f")))]
+ "TARGET_4300_MUL_FIX"
+ "mul.<fmt>\t%0,%1,%2\;nop"
+ [(set_attr "type" "fmul")
+ (set_attr "mode" "<MODE>")
+ (set_attr "insn_count" "2")])
+
+(define_insn "mulv2sf3"
+ [(set (match_operand:V2SF 0 "register_operand" "=f")
+ (mult:V2SF (match_operand:V2SF 1 "register_operand" "f")
+ (match_operand:V2SF 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+ "mul.ps\t%0,%1,%2"
+ [(set_attr "type" "fmul")
+ (set_attr "mode" "SF")])
+
+;; The original R4000 has a cpu bug. If a double-word or a variable
+;; shift executes while an integer multiplication is in progress, the
+;; shift may give an incorrect result. Avoid this by keeping the mflo
+;; with the mult on the R4000.
+;;
+;; From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
+;; (also valid for MIPS R4000MC processors):
+;;
+;; "16. R4000PC, R4000SC: Please refer to errata 28 for an update to
+;; this errata description.
+;; The following code sequence causes the R4000 to incorrectly
+;; execute the Double Shift Right Arithmetic 32 (dsra32)
+;; instruction. If the dsra32 instruction is executed during an
+;; integer multiply, the dsra32 will only shift by the amount in
+;; specified in the instruction rather than the amount plus 32
+;; bits.
+;; instruction 1: mult rs,rt integer multiply
+;; instruction 2-12: dsra32 rd,rt,rs doubleword shift
+;; right arithmetic + 32
+;; Workaround: A dsra32 instruction placed after an integer
+;; multiply should not be one of the 11 instructions after the
+;; multiply instruction."
+;;
+;; and:
+;;
+;; "28. R4000PC, R4000SC: The text from errata 16 should be replaced by
+;; the following description.
+;; All extended shifts (shift by n+32) and variable shifts (32 and
+;; 64-bit versions) may produce incorrect results under the
+;; following conditions:
+;; 1) An integer multiply is currently executing
+;; 2) These types of shift instructions are executed immediately
+;; following an integer divide instruction.
+;; Workaround:
+;; 1) Make sure no integer multiply is running wihen these
+;; instruction are executed. If this cannot be predicted at
+;; compile time, then insert a "mfhi" to R0 instruction
+;; immediately after the integer multiply instruction. This
+;; will cause the integer multiply to complete before the shift
+;; is executed.
+;; 2) Separate integer divide and these two classes of shift
+;; instructions by another instruction or a noop."
+;;
+;; These processors have PRId values of 0x00004220 and 0x00004300,
+;; respectively.
+
+(define_expand "mul<mode>3"
+ [(set (match_operand:GPR 0 "register_operand")
+ (mult:GPR (match_operand:GPR 1 "register_operand")
+ (match_operand:GPR 2 "register_operand")))]
+ "ISA_HAS_<D>MULT"
+{
+ rtx lo;
+
+ if (TARGET_LOONGSON_2EF || TARGET_LOONGSON_3A)
+ emit_insn (gen_mul<mode>3_mul3_loongson (operands[0], operands[1],
+ operands[2]));
+ else if (ISA_HAS_<D>MUL3)
+ emit_insn (gen_mul<mode>3_mul3 (operands[0], operands[1], operands[2]));
+ else if (TARGET_MIPS16)
+ {
+ lo = gen_rtx_REG (<MODE>mode, LO_REGNUM);
+ emit_insn (gen_mul<mode>3_internal (lo, operands[1], operands[2]));
+ emit_move_insn (operands[0], lo);
+ }
+ else if (TARGET_FIX_R4000)
+ emit_insn (gen_mul<mode>3_r4000 (operands[0], operands[1], operands[2]));
+ else
+ emit_insn
+ (gen_mul<mode>3_internal (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_insn "mul<mode>3_mul3_loongson"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (mult:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "register_operand" "d")))]
+ "TARGET_LOONGSON_2EF || TARGET_LOONGSON_3A"
+{
+ if (TARGET_LOONGSON_2EF)
+ return "<d>multu.g\t%0,%1,%2";
+ else
+ return "gs<d>multu\t%0,%1,%2";
+}
+ [(set_attr "type" "imul3nc")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "mul<mode>3_mul3"
+ [(set (match_operand:GPR 0 "register_operand" "=d,l")
+ (mult:GPR (match_operand:GPR 1 "register_operand" "d,d")
+ (match_operand:GPR 2 "register_operand" "d,d")))
+ (clobber (match_scratch:GPR 3 "=l,X"))]
+ "ISA_HAS_<D>MUL3"
+{
+ if (which_alternative == 1)
+ return "<d>mult\t%1,%2";
+ if (<MODE>mode == SImode && (TARGET_MIPS3900 || TARGET_MIPS5900))
+ return "mult\t%0,%1,%2";
+ return "<d>mul\t%0,%1,%2";
+}
+ [(set_attr "type" "imul3,imul")
+ (set_attr "mode" "<MODE>")])
+
+;; If a register gets allocated to LO, and we spill to memory, the reload
+;; will include a move from LO to a GPR. Merge it into the multiplication
+;; if it can set the GPR directly.
+;;
+;; Operand 0: LO
+;; Operand 1: GPR (1st multiplication operand)
+;; Operand 2: GPR (2nd multiplication operand)
+;; Operand 3: GPR (destination)
+(define_peephole2
+ [(parallel
+ [(set (match_operand:SI 0 "lo_operand")
+ (mult:SI (match_operand:SI 1 "d_operand")
+ (match_operand:SI 2 "d_operand")))
+ (clobber (scratch:SI))])
+ (set (match_operand:SI 3 "d_operand")
+ (match_dup 0))]
+ "ISA_HAS_MUL3 && peep2_reg_dead_p (2, operands[0])"
+ [(parallel
+ [(set (match_dup 3)
+ (mult:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 0))])])
+
+(define_insn "mul<mode>3_internal"
+ [(set (match_operand:GPR 0 "muldiv_target_operand" "=l")
+ (mult:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "register_operand" "d")))]
+ "ISA_HAS_<D>MULT && !TARGET_FIX_R4000"
+ "<d>mult\t%1,%2"
+ [(set_attr "type" "imul")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "mul<mode>3_r4000"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (mult:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "register_operand" "d")))
+ (clobber (match_scratch:GPR 3 "=l"))]
+ "ISA_HAS_<D>MULT && TARGET_FIX_R4000"
+ "<d>mult\t%1,%2\;mflo\t%0"
+ [(set_attr "type" "imul")
+ (set_attr "mode" "<MODE>")
+ (set_attr "insn_count" "2")])
+
+;; On the VR4120 and VR4130, it is better to use "mtlo $0; macc" instead
+;; of "mult; mflo". They have the same latency, but the first form gives
+;; us an extra cycle to compute the operands.
+
+;; Operand 0: LO
+;; Operand 1: GPR (1st multiplication operand)
+;; Operand 2: GPR (2nd multiplication operand)
+;; Operand 3: GPR (destination)
+(define_peephole2
+ [(set (match_operand:SI 0 "lo_operand")
+ (mult:SI (match_operand:SI 1 "d_operand")
+ (match_operand:SI 2 "d_operand")))
+ (set (match_operand:SI 3 "d_operand")
+ (match_dup 0))]
+ "ISA_HAS_MACC && !ISA_HAS_MUL3"
+ [(set (match_dup 0)
+ (const_int 0))
+ (parallel
+ [(set (match_dup 0)
+ (plus:SI (mult:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 0)))
+ (set (match_dup 3)
+ (plus:SI (mult:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 0)))])])
+
+;; Multiply-accumulate patterns
+
+;; This pattern is first matched by combine, which tries to use the
+;; pattern wherever it can. We don't know until later whether it
+;; is actually profitable to use MADD over a "MUL; ADDIU" sequence,
+;; so we need to keep both options open.
+;;
+;; The second alternative has a "?" marker because it is generally
+;; one instruction more costly than the first alternative. This "?"
+;; marker is enough to convey the relative costs to the register
+;; allocator.
+;;
+;; However, reload counts reloads of operands 4 and 5 in the same way as
+;; reloads of the other operands, even though operands 4 and 5 need no
+;; copy instructions. Reload therefore thinks that the second alternative
+;; is two reloads more costly than the first. We add "*?*?" to the first
+;; alternative as a counterweight.
+(define_insn "*mul_acc_si"
+ [(set (match_operand:SI 0 "register_operand" "=l*?*?,d?")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "d,d")
+ (match_operand:SI 2 "register_operand" "d,d"))
+ (match_operand:SI 3 "register_operand" "0,d")))
+ (clobber (match_scratch:SI 4 "=X,l"))
+ (clobber (match_scratch:SI 5 "=X,&d"))]
+ "GENERATE_MADD_MSUB && !TARGET_MIPS16"
+ "@
+ madd\t%1,%2
+ #"
+ [(set_attr "type" "imadd")
+ (set_attr "accum_in" "3")
+ (set_attr "mode" "SI")
+ (set_attr "insn_count" "1,2")])
+
+;; The same idea applies here. The middle alternative needs one less
+;; clobber than the final alternative, so we add "*?" as a counterweight.
+(define_insn "*mul_acc_si_r3900"
+ [(set (match_operand:SI 0 "register_operand" "=l*?*?,d*?,d?")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "d,d,d")
+ (match_operand:SI 2 "register_operand" "d,d,d"))
+ (match_operand:SI 3 "register_operand" "0,l,d")))
+ (clobber (match_scratch:SI 4 "=X,3,l"))
+ (clobber (match_scratch:SI 5 "=X,X,&d"))]
+ "TARGET_MIPS3900 && !TARGET_MIPS16"
+ "@
+ madd\t%1,%2
+ madd\t%0,%1,%2
+ #"
+ [(set_attr "type" "imadd")
+ (set_attr "accum_in" "3")
+ (set_attr "mode" "SI")
+ (set_attr "insn_count" "1,1,2")])
+
+;; Split *mul_acc_si if both the source and destination accumulator
+;; values are GPRs.
+(define_split
+ [(set (match_operand:SI 0 "d_operand")
+ (plus:SI (mult:SI (match_operand:SI 1 "d_operand")
+ (match_operand:SI 2 "d_operand"))
+ (match_operand:SI 3 "d_operand")))
+ (clobber (match_operand:SI 4 "lo_operand"))
+ (clobber (match_operand:SI 5 "d_operand"))]
+ "reload_completed"
+ [(parallel [(set (match_dup 5)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0) (plus:SI (match_dup 5) (match_dup 3)))]
+ "")
+
+(define_insn "*macc"
+ [(set (match_operand:SI 0 "register_operand" "=l,d")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "d,d")
+ (match_operand:SI 2 "register_operand" "d,d"))
+ (match_operand:SI 3 "register_operand" "0,l")))
+ (clobber (match_scratch:SI 4 "=X,3"))]
+ "ISA_HAS_MACC"
+{
+ if (which_alternative == 1)
+ return "macc\t%0,%1,%2";
+ else if (TARGET_MIPS5500)
+ return "madd\t%1,%2";
+ else
+ /* The VR4130 assumes that there is a two-cycle latency between a macc
+ that "writes" to $0 and an instruction that reads from it. We avoid
+ this by assigning to $1 instead. */
+ return "%[macc\t%@,%1,%2%]";
+}
+ [(set_attr "type" "imadd")
+ (set_attr "accum_in" "3")
+ (set_attr "mode" "SI")])
+
+(define_insn "*msac"
+ [(set (match_operand:SI 0 "register_operand" "=l,d")
+ (minus:SI (match_operand:SI 1 "register_operand" "0,l")
+ (mult:SI (match_operand:SI 2 "register_operand" "d,d")
+ (match_operand:SI 3 "register_operand" "d,d"))))
+ (clobber (match_scratch:SI 4 "=X,1"))]
+ "ISA_HAS_MSAC"
+{
+ if (which_alternative == 1)
+ return "msac\t%0,%2,%3";
+ else if (TARGET_MIPS5500)
+ return "msub\t%2,%3";
+ else
+ return "msac\t$0,%2,%3";
+}
+ [(set_attr "type" "imadd")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")])
+
+;; An msac-like instruction implemented using negation and a macc.
+(define_insn_and_split "*msac_using_macc"
+ [(set (match_operand:SI 0 "register_operand" "=l,d")
+ (minus:SI (match_operand:SI 1 "register_operand" "0,l")
+ (mult:SI (match_operand:SI 2 "register_operand" "d,d")
+ (match_operand:SI 3 "register_operand" "d,d"))))
+ (clobber (match_scratch:SI 4 "=X,1"))
+ (clobber (match_scratch:SI 5 "=d,d"))]
+ "ISA_HAS_MACC && !ISA_HAS_MSAC"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 5)
+ (neg:SI (match_dup 3)))
+ (parallel
+ [(set (match_dup 0)
+ (plus:SI (mult:SI (match_dup 2)
+ (match_dup 5))
+ (match_dup 1)))
+ (clobber (match_dup 4))])]
+ ""
+ [(set_attr "type" "imadd")
+ (set_attr "accum_in" "1")
+ (set_attr "insn_count" "2")])
+
+;; Patterns generated by the define_peephole2 below.
+
+(define_insn "*macc2"
+ [(set (match_operand:SI 0 "muldiv_target_operand" "=l")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d"))
+ (match_dup 0)))
+ (set (match_operand:SI 3 "register_operand" "=d")
+ (plus:SI (mult:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 0)))]
+ "ISA_HAS_MACC && reload_completed"
+ "macc\t%3,%1,%2"
+ [(set_attr "type" "imadd")
+ (set_attr "accum_in" "0")
+ (set_attr "mode" "SI")])
+
+(define_insn "*msac2"
+ [(set (match_operand:SI 0 "muldiv_target_operand" "=l")
+ (minus:SI (match_dup 0)
+ (mult:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d"))))
+ (set (match_operand:SI 3 "register_operand" "=d")
+ (minus:SI (match_dup 0)
+ (mult:SI (match_dup 1)
+ (match_dup 2))))]
+ "ISA_HAS_MSAC && reload_completed"
+ "msac\t%3,%1,%2"
+ [(set_attr "type" "imadd")
+ (set_attr "accum_in" "0")
+ (set_attr "mode" "SI")])
+
+;; Convert macc $0,<r1>,<r2> & mflo <r3> into macc <r3>,<r1>,<r2>
+;; Similarly msac.
+;;
+;; Operand 0: LO
+;; Operand 1: macc/msac
+;; Operand 2: GPR (destination)
+(define_peephole2
+ [(parallel
+ [(set (match_operand:SI 0 "lo_operand")
+ (match_operand:SI 1 "macc_msac_operand"))
+ (clobber (scratch:SI))])
+ (set (match_operand:SI 2 "d_operand")
+ (match_dup 0))]
+ ""
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (set (match_dup 2)
+ (match_dup 1))])])
+
+;; When we have a three-address multiplication instruction, it should
+;; be faster to do a separate multiply and add, rather than moving
+;; something into LO in order to use a macc instruction.
+;;
+;; This peephole needs a scratch register to cater for the case when one
+;; of the multiplication operands is the same as the destination.
+;;
+;; Operand 0: GPR (scratch)
+;; Operand 1: LO
+;; Operand 2: GPR (addend)
+;; Operand 3: GPR (destination)
+;; Operand 4: macc/msac
+;; Operand 5: new multiplication
+;; Operand 6: new addition/subtraction
+(define_peephole2
+ [(match_scratch:SI 0 "d")
+ (set (match_operand:SI 1 "lo_operand")
+ (match_operand:SI 2 "d_operand"))
+ (match_dup 0)
+ (parallel
+ [(set (match_operand:SI 3 "d_operand")
+ (match_operand:SI 4 "macc_msac_operand"))
+ (clobber (match_dup 1))])]
+ "ISA_HAS_MUL3 && peep2_reg_dead_p (2, operands[1])"
+ [(parallel [(set (match_dup 0)
+ (match_dup 5))
+ (clobber (match_dup 1))])
+ (set (match_dup 3)
+ (match_dup 6))]
+{
+ operands[5] = XEXP (operands[4], GET_CODE (operands[4]) == PLUS ? 0 : 1);
+ operands[6] = gen_rtx_fmt_ee (GET_CODE (operands[4]), SImode,
+ operands[2], operands[0]);
+})
+
+;; Same as above, except LO is the initial target of the macc.
+;;
+;; Operand 0: GPR (scratch)
+;; Operand 1: LO
+;; Operand 2: GPR (addend)
+;; Operand 3: macc/msac
+;; Operand 4: GPR (destination)
+;; Operand 5: new multiplication
+;; Operand 6: new addition/subtraction
+(define_peephole2
+ [(match_scratch:SI 0 "d")
+ (set (match_operand:SI 1 "lo_operand")
+ (match_operand:SI 2 "d_operand"))
+ (match_dup 0)
+ (parallel
+ [(set (match_dup 1)
+ (match_operand:SI 3 "macc_msac_operand"))
+ (clobber (scratch:SI))])
+ (match_dup 0)
+ (set (match_operand:SI 4 "d_operand")
+ (match_dup 1))]
+ "ISA_HAS_MUL3 && peep2_reg_dead_p (3, operands[1])"
+ [(parallel [(set (match_dup 0)
+ (match_dup 5))
+ (clobber (match_dup 1))])
+ (set (match_dup 4)
+ (match_dup 6))]
+{
+ operands[5] = XEXP (operands[3], GET_CODE (operands[3]) == PLUS ? 0 : 1);
+ operands[6] = gen_rtx_fmt_ee (GET_CODE (operands[3]), SImode,
+ operands[2], operands[0]);
+})
+
+;; See the comment above *mul_add_si for details.
+(define_insn "*mul_sub_si"
+ [(set (match_operand:SI 0 "register_operand" "=l*?*?,d?")
+ (minus:SI (match_operand:SI 1 "register_operand" "0,d")
+ (mult:SI (match_operand:SI 2 "register_operand" "d,d")
+ (match_operand:SI 3 "register_operand" "d,d"))))
+ (clobber (match_scratch:SI 4 "=X,l"))
+ (clobber (match_scratch:SI 5 "=X,&d"))]
+ "GENERATE_MADD_MSUB"
+ "@
+ msub\t%2,%3
+ #"
+ [(set_attr "type" "imadd")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "SI")
+ (set_attr "insn_count" "1,2")])
+
+;; Split *mul_sub_si if both the source and destination accumulator
+;; values are GPRs.
+(define_split
+ [(set (match_operand:SI 0 "d_operand")
+ (minus:SI (match_operand:SI 1 "d_operand")
+ (mult:SI (match_operand:SI 2 "d_operand")
+ (match_operand:SI 3 "d_operand"))))
+ (clobber (match_operand:SI 4 "lo_operand"))
+ (clobber (match_operand:SI 5 "d_operand"))]
+ "reload_completed"
+ [(parallel [(set (match_dup 5)
+ (mult:SI (match_dup 2) (match_dup 3)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0) (minus:SI (match_dup 1) (match_dup 5)))]
+ "")
+
+(define_insn "*muls"
+ [(set (match_operand:SI 0 "register_operand" "=l,d")
+ (neg:SI (mult:SI (match_operand:SI 1 "register_operand" "d,d")
+ (match_operand:SI 2 "register_operand" "d,d"))))
+ (clobber (match_scratch:SI 3 "=X,l"))]
+ "ISA_HAS_MULS"
+ "@
+ muls\t$0,%1,%2
+ muls\t%0,%1,%2"
+ [(set_attr "type" "imul,imul3")
+ (set_attr "mode" "SI")])
+
+(define_expand "<u>mulsidi3"
+ [(set (match_operand:DI 0 "register_operand")
+ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand"))
+ (any_extend:DI (match_operand:SI 2 "register_operand"))))]
+ "mips_mulsidi3_gen_fn (<CODE>) != NULL"
+{
+ mulsidi3_gen_fn fn = mips_mulsidi3_gen_fn (<CODE>);
+ emit_insn (fn (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "<u>mulsidi3_32bit_mips16"
+ [(set (match_operand:DI 0 "register_operand")
+ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand"))
+ (any_extend:DI (match_operand:SI 2 "register_operand"))))]
+ "!TARGET_64BIT && TARGET_MIPS16"
+{
+ rtx hilo;
+
+ hilo = gen_rtx_REG (DImode, MD_REG_FIRST);
+ emit_insn (gen_<u>mulsidi3_32bit (hilo, operands[1], operands[2]));
+ emit_move_insn (operands[0], hilo);
+ DONE;
+})
+
+;; As well as being named patterns, these instructions are used by the
+;; __builtin_mips_mult<u>() functions. We must always make those functions
+;; available if !TARGET_64BIT && ISA_HAS_DSP.
+(define_insn "<u>mulsidi3_32bit"
+ [(set (match_operand:DI 0 "muldiv_target_operand" "=ka")
+ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand" "d"))
+ (any_extend:DI (match_operand:SI 2 "register_operand" "d"))))]
+ "!TARGET_64BIT && (!TARGET_FIX_R4000 || ISA_HAS_DSP)"
+{
+ if (ISA_HAS_DSP_MULT)
+ return "mult<u>\t%q0,%1,%2";
+ else
+ return "mult<u>\t%1,%2";
+}
+ [(set_attr "type" "imul")
+ (set_attr "mode" "SI")])
+
+(define_insn "<u>mulsidi3_32bit_r4000"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand" "d"))
+ (any_extend:DI (match_operand:SI 2 "register_operand" "d"))))
+ (clobber (match_scratch:DI 3 "=x"))]
+ "!TARGET_64BIT && TARGET_FIX_R4000 && !ISA_HAS_DSP"
+ "mult<u>\t%1,%2\;mflo\t%L0\;mfhi\t%M0"
+ [(set_attr "type" "imul")
+ (set_attr "mode" "SI")
+ (set_attr "insn_count" "3")])
+
+(define_insn_and_split "<u>mulsidi3_64bit"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand" "d"))
+ (any_extend:DI (match_operand:SI 2 "register_operand" "d"))))
+ (clobber (match_scratch:TI 3 "=x"))
+ (clobber (match_scratch:DI 4 "=d"))]
+ "TARGET_64BIT && !TARGET_FIX_R4000 && !ISA_HAS_DMUL3 && !TARGET_MIPS16"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ emit_insn (gen_<u>mulsidi3_64bit_split (operands[0], operands[1],
+ operands[2], operands[4]));
+ DONE;
+}
+ [(set_attr "type" "imul")
+ (set_attr "mode" "SI")
+ (set (attr "insn_count")
+ (if_then_else (match_test "ISA_HAS_EXT_INS")
+ (const_int 4)
+ (const_int 7)))])
+
+(define_expand "<u>mulsidi3_64bit_mips16"
+ [(set (match_operand:DI 0 "register_operand")
+ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand"))
+ (any_extend:DI (match_operand:SI 2 "register_operand"))))]
+ "TARGET_64BIT && TARGET_MIPS16"
+{
+ emit_insn (gen_<u>mulsidi3_64bit_split (operands[0], operands[1],
+ operands[2], gen_reg_rtx (DImode)));
+ DONE;
+})
+
+(define_expand "<u>mulsidi3_64bit_split"
+ [(set (match_operand:DI 0 "register_operand")
+ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand"))
+ (any_extend:DI (match_operand:SI 2 "register_operand"))))
+ (clobber (match_operand:DI 3 "register_operand"))]
+ ""
+{
+ rtx hilo;
+
+ hilo = gen_rtx_REG (TImode, MD_REG_FIRST);
+ emit_insn (gen_<u>mulsidi3_64bit_hilo (hilo, operands[1], operands[2]));
+
+ emit_move_insn (operands[0], gen_rtx_REG (DImode, LO_REGNUM));
+ emit_insn (gen_mfhidi_ti (operands[3], hilo));
+
+ if (ISA_HAS_EXT_INS)
+ emit_insn (gen_insvdi (operands[0], GEN_INT (32), GEN_INT (32),
+ operands[3]));
+ else
+ {
+ /* Zero-extend the low part. */
+ mips_emit_binary (ASHIFT, operands[0], operands[0], GEN_INT (32));
+ mips_emit_binary (LSHIFTRT, operands[0], operands[0], GEN_INT (32));
+
+ /* Shift the high part into place. */
+ mips_emit_binary (ASHIFT, operands[3], operands[3], GEN_INT (32));
+
+ /* OR the two halves together. */
+ mips_emit_binary (IOR, operands[0], operands[0], operands[3]);
+ }
+ DONE;
+})
+
+(define_insn "<u>mulsidi3_64bit_hilo"
+ [(set (match_operand:TI 0 "muldiv_target_operand" "=x")
+ (unspec:TI
+ [(mult:DI
+ (any_extend:DI (match_operand:SI 1 "register_operand" "d"))
+ (any_extend:DI (match_operand:SI 2 "register_operand" "d")))]
+ UNSPEC_SET_HILO))]
+ "TARGET_64BIT && !TARGET_FIX_R4000"
+ "mult<u>\t%1,%2"
+ [(set_attr "type" "imul")
+ (set_attr "mode" "SI")])
+
+;; See comment before the ISA_HAS_DMUL3 case in mips_mulsidi3_gen_fn.
+(define_insn "mulsidi3_64bit_dmul"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "d"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "d"))))
+ (clobber (match_scratch:DI 3 "=l"))]
+ "ISA_HAS_DMUL3"
+ "dmul\t%0,%1,%2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "DI")])
+
+;; Widening multiply with negation.
+(define_insn "*muls<u>_di"
+ [(set (match_operand:DI 0 "muldiv_target_operand" "=x")
+ (neg:DI
+ (mult:DI
+ (any_extend:DI (match_operand:SI 1 "register_operand" "d"))
+ (any_extend:DI (match_operand:SI 2 "register_operand" "d")))))]
+ "!TARGET_64BIT && ISA_HAS_MULS"
+ "muls<u>\t$0,%1,%2"
+ [(set_attr "type" "imul")
+ (set_attr "mode" "SI")])
+
+;; As well as being named patterns, these instructions are used by the
+;; __builtin_mips_msub<u>() functions. We must always make those functions
+;; available if !TARGET_64BIT && ISA_HAS_DSP.
+;;
+;; This leads to a slight inconsistency. We honor any tuning overrides
+;; in GENERATE_MADD_MSUB for -mno-dsp, but always ignore them for -mdsp,
+;; even if !ISA_HAS_DSP_MULT.
+(define_insn "<u>msubsidi4"
+ [(set (match_operand:DI 0 "muldiv_target_operand" "=ka")
+ (minus:DI
+ (match_operand:DI 3 "muldiv_target_operand" "0")
+ (mult:DI
+ (any_extend:DI (match_operand:SI 1 "register_operand" "d"))
+ (any_extend:DI (match_operand:SI 2 "register_operand" "d")))))]
+ "!TARGET_64BIT && (ISA_HAS_MSAC || GENERATE_MADD_MSUB || ISA_HAS_DSP)"
+{
+ if (ISA_HAS_DSP_MULT)
+ return "msub<u>\t%q0,%1,%2";
+ else if (TARGET_MIPS5500 || GENERATE_MADD_MSUB)
+ return "msub<u>\t%1,%2";
+ else
+ return "msac<u>\t$0,%1,%2";
+}
+ [(set_attr "type" "imadd")
+ (set_attr "accum_in" "3")
+ (set_attr "mode" "SI")])
+
+;; _highpart patterns
+
+(define_expand "<su>mulsi3_highpart"
+ [(set (match_operand:SI 0 "register_operand")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand"))
+ (any_extend:DI (match_operand:SI 2 "register_operand")))
+ (const_int 32))))]
+ ""
+{
+ if (ISA_HAS_MULHI)
+ emit_insn (gen_<su>mulsi3_highpart_mulhi_internal (operands[0],
+ operands[1],
+ operands[2]));
+ else if (TARGET_MIPS16)
+ emit_insn (gen_<su>mulsi3_highpart_split (operands[0], operands[1],
+ operands[2]));
+ else
+ emit_insn (gen_<su>mulsi3_highpart_internal (operands[0], operands[1],
+ operands[2]));
+ DONE;
+})
+
+(define_insn_and_split "<su>mulsi3_highpart_internal"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand" "d"))
+ (any_extend:DI (match_operand:SI 2 "register_operand" "d")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=l"))]
+ "!ISA_HAS_MULHI && !TARGET_MIPS16"
+ { return TARGET_FIX_R4000 ? "mult<u>\t%1,%2\n\tmfhi\t%0" : "#"; }
+ "&& reload_completed && !TARGET_FIX_R4000"
+ [(const_int 0)]
+{
+ emit_insn (gen_<su>mulsi3_highpart_split (operands[0], operands[1],
+ operands[2]));
+ DONE;
+}
+ [(set_attr "type" "imul")
+ (set_attr "mode" "SI")
+ (set_attr "insn_count" "2")])
+
+(define_expand "<su>mulsi3_highpart_split"
+ [(set (match_operand:SI 0 "register_operand")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand"))
+ (any_extend:DI (match_operand:SI 2 "register_operand")))
+ (const_int 32))))]
+ ""
+{
+ rtx hilo;
+
+ if (TARGET_64BIT)
+ {
+ hilo = gen_rtx_REG (TImode, MD_REG_FIRST);
+ emit_insn (gen_<u>mulsidi3_64bit_hilo (hilo, operands[1], operands[2]));
+ emit_insn (gen_mfhisi_ti (operands[0], hilo));
+ }
+ else
+ {
+ hilo = gen_rtx_REG (DImode, MD_REG_FIRST);
+ emit_insn (gen_<u>mulsidi3_32bit (hilo, operands[1], operands[2]));
+ emit_insn (gen_mfhisi_di (operands[0], hilo));
+ }
+ DONE;
+})
+
+(define_insn "<su>mulsi3_highpart_mulhi_internal"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI
+ (any_extend:DI (match_operand:SI 1 "register_operand" "d"))
+ (any_extend:DI (match_operand:SI 2 "register_operand" "d")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=l"))]
+ "ISA_HAS_MULHI"
+ "mulhi<u>\t%0,%1,%2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "SI")])
+
+(define_insn "*<su>mulsi3_highpart_neg_mulhi_internal"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (truncate:SI
+ (lshiftrt:DI
+ (neg:DI
+ (mult:DI
+ (any_extend:DI (match_operand:SI 1 "register_operand" "d"))
+ (any_extend:DI (match_operand:SI 2 "register_operand" "d"))))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=l"))]
+ "ISA_HAS_MULHI"
+ "mulshi<u>\t%0,%1,%2"
+ [(set_attr "type" "imul3")
+ (set_attr "mode" "SI")])
+
+;; Disable unsigned multiplication for -mfix-vr4120. This is for VR4120
+;; errata MD(0), which says that dmultu does not always produce the
+;; correct result.
+(define_expand "<su>muldi3_highpart"
+ [(set (match_operand:DI 0 "register_operand")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
+ (any_extend:TI (match_operand:DI 2 "register_operand")))
+ (const_int 64))))]
+ "ISA_HAS_DMULT && !(<CODE> == ZERO_EXTEND && TARGET_FIX_VR4120)"
+{
+ if (TARGET_MIPS16)
+ emit_insn (gen_<su>muldi3_highpart_split (operands[0], operands[1],
+ operands[2]));
+ else
+ emit_insn (gen_<su>muldi3_highpart_internal (operands[0], operands[1],
+ operands[2]));
+ DONE;
+})
+
+(define_insn_and_split "<su>muldi3_highpart_internal"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand" "d"))
+ (any_extend:TI (match_operand:DI 2 "register_operand" "d")))
+ (const_int 64))))
+ (clobber (match_scratch:DI 3 "=l"))]
+ "ISA_HAS_DMULT
+ && !TARGET_MIPS16
+ && !(<CODE> == ZERO_EXTEND && TARGET_FIX_VR4120)"
+ { return TARGET_FIX_R4000 ? "dmult<u>\t%1,%2\n\tmfhi\t%0" : "#"; }
+ "&& reload_completed && !TARGET_FIX_R4000"
+ [(const_int 0)]
+{
+ emit_insn (gen_<su>muldi3_highpart_split (operands[0], operands[1],
+ operands[2]));
+ DONE;
+}
+ [(set_attr "type" "imul")
+ (set_attr "mode" "DI")
+ (set_attr "insn_count" "2")])
+
+(define_expand "<su>muldi3_highpart_split"
+ [(set (match_operand:DI 0 "register_operand")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
+ (any_extend:TI (match_operand:DI 2 "register_operand")))
+ (const_int 64))))]
+ ""
+{
+ rtx hilo;
+
+ hilo = gen_rtx_REG (TImode, MD_REG_FIRST);
+ emit_insn (gen_<u>mulditi3_internal (hilo, operands[1], operands[2]));
+ emit_insn (gen_mfhidi_ti (operands[0], hilo));
+ DONE;
+})
+
+(define_expand "<u>mulditi3"
+ [(set (match_operand:TI 0 "register_operand")
+ (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
+ (any_extend:TI (match_operand:DI 2 "register_operand"))))]
+ "ISA_HAS_DMULT && !(<CODE> == ZERO_EXTEND && TARGET_FIX_VR4120)"
+{
+ rtx hilo;
+
+ if (TARGET_MIPS16)
+ {
+ hilo = gen_rtx_REG (TImode, MD_REG_FIRST);
+ emit_insn (gen_<u>mulditi3_internal (hilo, operands[1], operands[2]));
+ emit_move_insn (operands[0], hilo);
+ }
+ else if (TARGET_FIX_R4000)
+ emit_insn (gen_<u>mulditi3_r4000 (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_<u>mulditi3_internal (operands[0], operands[1],
+ operands[2]));
+ DONE;
+})
+
+(define_insn "<u>mulditi3_internal"
+ [(set (match_operand:TI 0 "muldiv_target_operand" "=x")
+ (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand" "d"))
+ (any_extend:TI (match_operand:DI 2 "register_operand" "d"))))]
+ "ISA_HAS_DMULT
+ && !TARGET_FIX_R4000
+ && !(<CODE> == ZERO_EXTEND && TARGET_FIX_VR4120)"
+ "dmult<u>\t%1,%2"
+ [(set_attr "type" "imul")
+ (set_attr "mode" "DI")])
+
+(define_insn "<u>mulditi3_r4000"
+ [(set (match_operand:TI 0 "register_operand" "=d")
+ (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand" "d"))
+ (any_extend:TI (match_operand:DI 2 "register_operand" "d"))))
+ (clobber (match_scratch:TI 3 "=x"))]
+ "ISA_HAS_DMULT
+ && TARGET_FIX_R4000
+ && !(<CODE> == ZERO_EXTEND && TARGET_FIX_VR4120)"
+ "dmult<u>\t%1,%2\;mflo\t%L0\;mfhi\t%M0"
+ [(set_attr "type" "imul")
+ (set_attr "mode" "DI")
+ (set_attr "insn_count" "3")])
+
+;; The R4650 supports a 32-bit multiply/ 64-bit accumulate
+;; instruction. The HI/LO registers are used as a 64-bit accumulator.
+
+(define_insn "madsi"
+ [(set (match_operand:SI 0 "register_operand" "+l")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d"))
+ (match_dup 0)))]
+ "TARGET_MAD"
+ "mad\t%1,%2"
+ [(set_attr "type" "imadd")
+ (set_attr "accum_in" "0")
+ (set_attr "mode" "SI")])
+
+;; See the comment above <u>msubsidi4 for the relationship between
+;; ISA_HAS_DSP and ISA_HAS_DSP_MULT.
+(define_insn "<u>maddsidi4"
+ [(set (match_operand:DI 0 "muldiv_target_operand" "=ka")
+ (plus:DI
+ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand" "d"))
+ (any_extend:DI (match_operand:SI 2 "register_operand" "d")))
+ (match_operand:DI 3 "muldiv_target_operand" "0")))]
+ "(TARGET_MAD || ISA_HAS_MACC || GENERATE_MADD_MSUB || ISA_HAS_DSP)
+ && !TARGET_64BIT"
+{
+ if (TARGET_MAD)
+ return "mad<u>\t%1,%2";
+ else if (ISA_HAS_DSP_MULT)
+ return "madd<u>\t%q0,%1,%2";
+ else if (GENERATE_MADD_MSUB || TARGET_MIPS5500)
+ return "madd<u>\t%1,%2";
+ else
+ /* See comment in *macc. */
+ return "%[macc<u>\t%@,%1,%2%]";
+}
+ [(set_attr "type" "imadd")
+ (set_attr "accum_in" "3")
+ (set_attr "mode" "SI")])
+
+;; Floating point multiply accumulate instructions.
+
+(define_insn "*madd4<mode>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (plus:ANYF (mult:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f"))
+ (match_operand:ANYF 3 "register_operand" "f")))]
+ "ISA_HAS_FP_MADD4_MSUB4 && TARGET_FUSED_MADD"
+ "madd.<fmt>\t%0,%3,%1,%2"
+ [(set_attr "type" "fmadd")
+ (set_attr "accum_in" "3")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "*madd3<mode>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (plus:ANYF (mult:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f"))
+ (match_operand:ANYF 3 "register_operand" "0")))]
+ "ISA_HAS_FP_MADD3_MSUB3 && TARGET_FUSED_MADD"
+ "madd.<fmt>\t%0,%1,%2"
+ [(set_attr "type" "fmadd")
+ (set_attr "accum_in" "3")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "*msub4<mode>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (minus:ANYF (mult:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f"))
+ (match_operand:ANYF 3 "register_operand" "f")))]
+ "ISA_HAS_FP_MADD4_MSUB4 && TARGET_FUSED_MADD"
+ "msub.<fmt>\t%0,%3,%1,%2"
+ [(set_attr "type" "fmadd")
+ (set_attr "accum_in" "3")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "*msub3<mode>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (minus:ANYF (mult:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f"))
+ (match_operand:ANYF 3 "register_operand" "0")))]
+ "ISA_HAS_FP_MADD3_MSUB3 && TARGET_FUSED_MADD"
+ "msub.<fmt>\t%0,%1,%2"
+ [(set_attr "type" "fmadd")
+ (set_attr "accum_in" "3")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "*nmadd4<mode>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (neg:ANYF (plus:ANYF
+ (mult:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f"))
+ (match_operand:ANYF 3 "register_operand" "f"))))]
+ "ISA_HAS_NMADD4_NMSUB4
+ && TARGET_FUSED_MADD
+ && HONOR_SIGNED_ZEROS (<MODE>mode)
+ && !HONOR_NANS (<MODE>mode)"
+ "nmadd.<fmt>\t%0,%3,%1,%2"
+ [(set_attr "type" "fmadd")
+ (set_attr "accum_in" "3")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "*nmadd3<mode>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (neg:ANYF (plus:ANYF
+ (mult:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f"))
+ (match_operand:ANYF 3 "register_operand" "0"))))]
+ "ISA_HAS_NMADD3_NMSUB3
+ && TARGET_FUSED_MADD
+ && HONOR_SIGNED_ZEROS (<MODE>mode)
+ && !HONOR_NANS (<MODE>mode)"
+ "nmadd.<fmt>\t%0,%1,%2"
+ [(set_attr "type" "fmadd")
+ (set_attr "accum_in" "3")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "*nmadd4<mode>_fastmath"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (minus:ANYF
+ (mult:ANYF (neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
+ (match_operand:ANYF 2 "register_operand" "f"))
+ (match_operand:ANYF 3 "register_operand" "f")))]
+ "ISA_HAS_NMADD4_NMSUB4
+ && TARGET_FUSED_MADD
+ && !HONOR_SIGNED_ZEROS (<MODE>mode)
+ && !HONOR_NANS (<MODE>mode)"
+ "nmadd.<fmt>\t%0,%3,%1,%2"
+ [(set_attr "type" "fmadd")
+ (set_attr "accum_in" "3")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "*nmadd3<mode>_fastmath"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (minus:ANYF
+ (mult:ANYF (neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
+ (match_operand:ANYF 2 "register_operand" "f"))
+ (match_operand:ANYF 3 "register_operand" "0")))]
+ "ISA_HAS_NMADD3_NMSUB3
+ && TARGET_FUSED_MADD
+ && !HONOR_SIGNED_ZEROS (<MODE>mode)
+ && !HONOR_NANS (<MODE>mode)"
+ "nmadd.<fmt>\t%0,%1,%2"
+ [(set_attr "type" "fmadd")
+ (set_attr "accum_in" "3")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "*nmsub4<mode>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (neg:ANYF (minus:ANYF
+ (mult:ANYF (match_operand:ANYF 2 "register_operand" "f")
+ (match_operand:ANYF 3 "register_operand" "f"))
+ (match_operand:ANYF 1 "register_operand" "f"))))]
+ "ISA_HAS_NMADD4_NMSUB4
+ && TARGET_FUSED_MADD
+ && HONOR_SIGNED_ZEROS (<MODE>mode)
+ && !HONOR_NANS (<MODE>mode)"
+ "nmsub.<fmt>\t%0,%1,%2,%3"
+ [(set_attr "type" "fmadd")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "*nmsub3<mode>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (neg:ANYF (minus:ANYF
+ (mult:ANYF (match_operand:ANYF 2 "register_operand" "f")
+ (match_operand:ANYF 3 "register_operand" "f"))
+ (match_operand:ANYF 1 "register_operand" "0"))))]
+ "ISA_HAS_NMADD3_NMSUB3
+ && TARGET_FUSED_MADD
+ && HONOR_SIGNED_ZEROS (<MODE>mode)
+ && !HONOR_NANS (<MODE>mode)"
+ "nmsub.<fmt>\t%0,%1,%2"
+ [(set_attr "type" "fmadd")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "*nmsub4<mode>_fastmath"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (minus:ANYF
+ (match_operand:ANYF 1 "register_operand" "f")
+ (mult:ANYF (match_operand:ANYF 2 "register_operand" "f")
+ (match_operand:ANYF 3 "register_operand" "f"))))]
+ "ISA_HAS_NMADD4_NMSUB4
+ && TARGET_FUSED_MADD
+ && !HONOR_SIGNED_ZEROS (<MODE>mode)
+ && !HONOR_NANS (<MODE>mode)"
+ "nmsub.<fmt>\t%0,%1,%2,%3"
+ [(set_attr "type" "fmadd")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "*nmsub3<mode>_fastmath"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (minus:ANYF
+ (match_operand:ANYF 1 "register_operand" "f")
+ (mult:ANYF (match_operand:ANYF 2 "register_operand" "f")
+ (match_operand:ANYF 3 "register_operand" "0"))))]
+ "ISA_HAS_NMADD3_NMSUB3
+ && TARGET_FUSED_MADD
+ && !HONOR_SIGNED_ZEROS (<MODE>mode)
+ && !HONOR_NANS (<MODE>mode)"
+ "nmsub.<fmt>\t%0,%1,%2"
+ [(set_attr "type" "fmadd")
+ (set_attr "accum_in" "1")
+ (set_attr "mode" "<UNITMODE>")])
+
+;;
+;; ....................
+;;
+;; DIVISION and REMAINDER
+;;
+;; ....................
+;;
+
+(define_expand "div<mode>3"
+ [(set (match_operand:ANYF 0 "register_operand")
+ (div:ANYF (match_operand:ANYF 1 "reg_or_1_operand")
+ (match_operand:ANYF 2 "register_operand")))]
+ "<divide_condition>"
+{
+ if (const_1_operand (operands[1], <MODE>mode))
+ if (!(ISA_HAS_FP_RECIP_RSQRT (<MODE>mode)
+ && flag_unsafe_math_optimizations))
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+})
+
+;; These patterns work around the early SB-1 rev2 core "F1" erratum:
+;;
+;; If an mfc1 or dmfc1 happens to access the floating point register
+;; file at the same time a long latency operation (div, sqrt, recip,
+;; sqrt) iterates an intermediate result back through the floating
+;; point register file bypass, then instead returning the correct
+;; register value the mfc1 or dmfc1 operation returns the intermediate
+;; result of the long latency operation.
+;;
+;; The workaround is to insert an unconditional 'mov' from/to the
+;; long latency op destination register.
+
+(define_insn "*div<mode>3"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (div:ANYF (match_operand:ANYF 1 "register_operand" "f")
+ (match_operand:ANYF 2 "register_operand" "f")))]
+ "<divide_condition>"
+{
+ if (TARGET_FIX_SB1)
+ return "div.<fmt>\t%0,%1,%2\;mov.<fmt>\t%0,%0";
+ else
+ return "div.<fmt>\t%0,%1,%2";
+}
+ [(set_attr "type" "fdiv")
+ (set_attr "mode" "<UNITMODE>")
+ (set (attr "insn_count")
+ (if_then_else (match_test "TARGET_FIX_SB1")
+ (const_int 2)
+ (const_int 1)))])
+
+(define_insn "*recip<mode>3"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (div:ANYF (match_operand:ANYF 1 "const_1_operand" "")
+ (match_operand:ANYF 2 "register_operand" "f")))]
+ "ISA_HAS_FP_RECIP_RSQRT (<MODE>mode) && flag_unsafe_math_optimizations"
+{
+ if (TARGET_FIX_SB1)
+ return "recip.<fmt>\t%0,%2\;mov.<fmt>\t%0,%0";
+ else
+ return "recip.<fmt>\t%0,%2";
+}
+ [(set_attr "type" "frdiv")
+ (set_attr "mode" "<UNITMODE>")
+ (set (attr "insn_count")
+ (if_then_else (match_test "TARGET_FIX_SB1")
+ (const_int 2)
+ (const_int 1)))])
+
+;; VR4120 errata MD(A1): signed division instructions do not work correctly
+;; with negative operands. We use special libgcc functions instead.
+(define_expand "divmod<mode>4"
+ [(parallel
+ [(set (match_operand:GPR 0 "register_operand")
+ (div:GPR (match_operand:GPR 1 "register_operand")
+ (match_operand:GPR 2 "register_operand")))
+ (set (match_operand:GPR 3 "register_operand")
+ (mod:GPR (match_dup 1)
+ (match_dup 2)))])]
+ "ISA_HAS_<D>DIV && !TARGET_FIX_VR4120"
+{
+ if (TARGET_MIPS16)
+ {
+ rtx lo = gen_rtx_REG (<MODE>mode, LO_REGNUM);
+ emit_insn (gen_divmod<mode>4_mips16 (operands[0], operands[1],
+ operands[2], operands[3], lo));
+ DONE;
+ }
+})
+
+(define_insn_and_split "*divmod<mode>4"
+ [(set (match_operand:GPR 0 "register_operand" "=l")
+ (div:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "register_operand" "d")))
+ (set (match_operand:GPR 3 "register_operand" "=d")
+ (mod:GPR (match_dup 1)
+ (match_dup 2)))]
+ "ISA_HAS_<D>DIV && !TARGET_FIX_VR4120 && !TARGET_MIPS16"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ emit_insn (gen_divmod<mode>4_split (operands[3], operands[1], operands[2]));
+ DONE;
+}
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "<MODE>")
+ (set_attr "insn_count" "2")])
+
+;; Expand generates divmod instructions for individual division and modulus
+;; operations. We then rely on CSE to reuse earlier divmods where possible.
+;; This means that, when generating MIPS16 code, it is better not to expose
+;; the fixed LO register until after CSE has finished. However, it's still
+;; better to split before register allocation, so that we don't allocate
+;; one of the scarce MIPS16 registers to an unused result.
+(define_insn_and_split "divmod<mode>4_mips16"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (div:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "register_operand" "d")))
+ (set (match_operand:GPR 3 "register_operand" "=d")
+ (mod:GPR (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_operand:GPR 4 "lo_operand" "=l"))]
+ "ISA_HAS_<D>DIV && !TARGET_FIX_VR4120 && TARGET_MIPS16"
+ "#"
+ "&& cse_not_expected"
+ [(const_int 0)]
+{
+ emit_insn (gen_divmod<mode>4_split (operands[3], operands[1], operands[2]));
+ emit_move_insn (operands[0], operands[4]);
+ DONE;
+}
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "<MODE>")
+ (set_attr "insn_count" "3")])
+
+(define_expand "udivmod<mode>4"
+ [(parallel
+ [(set (match_operand:GPR 0 "register_operand")
+ (udiv:GPR (match_operand:GPR 1 "register_operand")
+ (match_operand:GPR 2 "register_operand")))
+ (set (match_operand:GPR 3 "register_operand")
+ (umod:GPR (match_dup 1)
+ (match_dup 2)))])]
+ "ISA_HAS_<D>DIV && !TARGET_FIX_VR4120"
+{
+ if (TARGET_MIPS16)
+ {
+ rtx lo = gen_rtx_REG (<MODE>mode, LO_REGNUM);
+ emit_insn (gen_udivmod<mode>4_mips16 (operands[0], operands[1],
+ operands[2], operands[3], lo));
+ DONE;
+ }
+})
+
+(define_insn_and_split "*udivmod<mode>4"
+ [(set (match_operand:GPR 0 "register_operand" "=l")
+ (udiv:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "register_operand" "d")))
+ (set (match_operand:GPR 3 "register_operand" "=d")
+ (umod:GPR (match_dup 1)
+ (match_dup 2)))]
+ "ISA_HAS_<D>DIV && !TARGET_MIPS16"
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ emit_insn (gen_udivmod<mode>4_split (operands[3], operands[1], operands[2]));
+ DONE;
+}
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "<MODE>")
+ (set_attr "insn_count" "2")])
+
+;; See the comment above "divmod<mode>4_mips16" for the split timing.
+(define_insn_and_split "udivmod<mode>4_mips16"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (udiv:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "register_operand" "d")))
+ (set (match_operand:GPR 3 "register_operand" "=d")
+ (umod:GPR (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_operand:GPR 4 "lo_operand" "=l"))]
+ "ISA_HAS_<D>DIV && TARGET_MIPS16"
+ "#"
+ "cse_not_expected"
+ [(const_int 0)]
+{
+ emit_insn (gen_udivmod<mode>4_split (operands[3], operands[1], operands[2]));
+ emit_move_insn (operands[0], operands[4]);
+ DONE;
+}
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "<MODE>")
+ (set_attr "insn_count" "3")])
+
+(define_expand "<u>divmod<mode>4_split"
+ [(set (match_operand:GPR 0 "register_operand")
+ (any_mod:GPR (match_operand:GPR 1 "register_operand")
+ (match_operand:GPR 2 "register_operand")))]
+ ""
+{
+ rtx hilo;
+
+ if (TARGET_64BIT)
+ {
+ hilo = gen_rtx_REG (TImode, MD_REG_FIRST);
+ emit_insn (gen_<u>divmod<mode>4_hilo_ti (hilo, operands[1],
+ operands[2]));
+ emit_insn (gen_mfhi<mode>_ti (operands[0], hilo));
+ }
+ else
+ {
+ hilo = gen_rtx_REG (DImode, MD_REG_FIRST);
+ emit_insn (gen_<u>divmod<mode>4_hilo_di (hilo, operands[1],
+ operands[2]));
+ emit_insn (gen_mfhi<mode>_di (operands[0], hilo));
+ }
+ DONE;
+})
+
+(define_insn "<u>divmod<GPR:mode>4_hilo_<HILO:mode>"
+ [(set (match_operand:HILO 0 "muldiv_target_operand" "=x")
+ (unspec:HILO
+ [(any_div:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "register_operand" "d"))]
+ UNSPEC_SET_HILO))]
+ "ISA_HAS_<GPR:D>DIV"
+ { return mips_output_division ("<GPR:d>div<u>\t%.,%1,%2", operands); }
+ [(set_attr "type" "idiv")
+ (set_attr "mode" "<GPR:MODE>")])
+
+;;
+;; ....................
+;;
+;; SQUARE ROOT
+;;
+;; ....................
+
+;; These patterns work around the early SB-1 rev2 core "F1" erratum (see
+;; "*div[sd]f3" comment for details).
+
+(define_insn "sqrt<mode>2"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
+ "<sqrt_condition>"
+{
+ if (TARGET_FIX_SB1)
+ return "sqrt.<fmt>\t%0,%1\;mov.<fmt>\t%0,%0";
+ else
+ return "sqrt.<fmt>\t%0,%1";
+}
+ [(set_attr "type" "fsqrt")
+ (set_attr "mode" "<UNITMODE>")
+ (set (attr "insn_count")
+ (if_then_else (match_test "TARGET_FIX_SB1")
+ (const_int 2)
+ (const_int 1)))])
+
+(define_insn "*rsqrt<mode>a"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (div:ANYF (match_operand:ANYF 1 "const_1_operand" "")
+ (sqrt:ANYF (match_operand:ANYF 2 "register_operand" "f"))))]
+ "ISA_HAS_FP_RECIP_RSQRT (<MODE>mode) && flag_unsafe_math_optimizations"
+{
+ if (TARGET_FIX_SB1)
+ return "rsqrt.<fmt>\t%0,%2\;mov.<fmt>\t%0,%0";
+ else
+ return "rsqrt.<fmt>\t%0,%2";
+}
+ [(set_attr "type" "frsqrt")
+ (set_attr "mode" "<UNITMODE>")
+ (set (attr "insn_count")
+ (if_then_else (match_test "TARGET_FIX_SB1")
+ (const_int 2)
+ (const_int 1)))])
+
+(define_insn "*rsqrt<mode>b"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (sqrt:ANYF (div:ANYF (match_operand:ANYF 1 "const_1_operand" "")
+ (match_operand:ANYF 2 "register_operand" "f"))))]
+ "ISA_HAS_FP_RECIP_RSQRT (<MODE>mode) && flag_unsafe_math_optimizations"
+{
+ if (TARGET_FIX_SB1)
+ return "rsqrt.<fmt>\t%0,%2\;mov.<fmt>\t%0,%0";
+ else
+ return "rsqrt.<fmt>\t%0,%2";
+}
+ [(set_attr "type" "frsqrt")
+ (set_attr "mode" "<UNITMODE>")
+ (set (attr "insn_count")
+ (if_then_else (match_test "TARGET_FIX_SB1")
+ (const_int 2)
+ (const_int 1)))])
+
+;;
+;; ....................
+;;
+;; ABSOLUTE VALUE
+;;
+;; ....................
+
+;; Do not use the integer abs macro instruction, since that signals an
+;; exception on -2147483648 (sigh).
+
+;; The "legacy" (as opposed to "2008") form of ABS.fmt is an arithmetic
+;; instruction that treats all NaN inputs as invalid; it does not clear
+;; their sign bit. We therefore can't use that form if the signs of
+;; NaNs matter.
+
+(define_insn "abs<mode>2"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
+ "mips_abs == MIPS_IEEE_754_2008 || !HONOR_NANS (<MODE>mode)"
+ "abs.<fmt>\t%0,%1"
+ [(set_attr "type" "fabs")
+ (set_attr "mode" "<UNITMODE>")])
+
+;;
+;; ...................
+;;
+;; Count leading zeroes.
+;;
+;; ...................
+;;
+
+(define_insn "clz<mode>2"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (clz:GPR (match_operand:GPR 1 "register_operand" "d")))]
+ "ISA_HAS_CLZ_CLO"
+ "<d>clz\t%0,%1"
+ [(set_attr "type" "clz")
+ (set_attr "mode" "<MODE>")])
+
+;;
+;; ...................
+;;
+;; Count number of set bits.
+;;
+;; ...................
+;;
+
+(define_insn "popcount<mode>2"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (popcount:GPR (match_operand:GPR 1 "register_operand" "d")))]
+ "ISA_HAS_POP"
+ "<d>pop\t%0,%1"
+ [(set_attr "type" "pop")
+ (set_attr "mode" "<MODE>")])
+
+;; The POP instruction is special as it does not take into account the upper
+;; 32bits and is documented that way.
+(define_insn "*popcountdi2_trunc"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (popcount:SI (truncate:SI (match_operand:DI 1 "register_operand" "d"))))]
+ "ISA_HAS_POP && TARGET_64BIT"
+ "pop\t%0,%1"
+ [(set_attr "type" "pop")
+ (set_attr "mode" "SI")])
+
+;;
+;; ....................
+;;
+;; NEGATION and ONE'S COMPLEMENT
+;;
+;; ....................
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (neg:SI (match_operand:SI 1 "register_operand" "d")))]
+ ""
+{
+ if (TARGET_MIPS16)
+ return "neg\t%0,%1";
+ else
+ return "subu\t%0,%.,%1";
+}
+ [(set_attr "alu_type" "sub")
+ (set_attr "mode" "SI")])
+
+(define_insn "negdi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (neg:DI (match_operand:DI 1 "register_operand" "d")))]
+ "TARGET_64BIT && !TARGET_MIPS16"
+ "dsubu\t%0,%.,%1"
+ [(set_attr "alu_type" "sub")
+ (set_attr "mode" "DI")])
+
+;; The "legacy" (as opposed to "2008") form of NEG.fmt is an arithmetic
+;; instruction that treats all NaN inputs as invalid; it does not flip
+;; their sign bit. We therefore can't use that form if the signs of
+;; NaNs matter.
+
+(define_insn "neg<mode>2"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
+ "mips_abs == MIPS_IEEE_754_2008 || !HONOR_NANS (<MODE>mode)"
+ "neg.<fmt>\t%0,%1"
+ [(set_attr "type" "fneg")
+ (set_attr "mode" "<UNITMODE>")])
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:GPR 0 "register_operand" "=!u,d")
+ (not:GPR (match_operand:GPR 1 "register_operand" "!u,d")))]
+ ""
+{
+ if (TARGET_MIPS16)
+ return "not\t%0,%1";
+ else
+ return "nor\t%0,%.,%1";
+}
+ [(set_attr "alu_type" "not")
+ (set_attr "compression" "micromips,*")
+ (set_attr "mode" "<MODE>")])
+
+;;
+;; ....................
+;;
+;; LOGICAL
+;;
+;; ....................
+;;
+
+;; Many of these instructions use trivial define_expands, because we
+;; want to use a different set of constraints when TARGET_MIPS16.
+
+(define_expand "and<mode>3"
+ [(set (match_operand:GPR 0 "register_operand")
+ (and:GPR (match_operand:GPR 1 "register_operand")
+ (match_operand:GPR 2 "and_reg_operand")))])
+
+;; The middle-end is not allowed to convert ANDing with 0xffff_ffff into a
+;; zero_extendsidi2 because of TRULY_NOOP_TRUNCATION, so handle these here.
+;; Note that this variant does not trigger for SI mode because we require
+;; a 64-bit HOST_WIDE_INT and 0xffff_ffff wouldn't be a canonical
+;; sign-extended SImode value.
+;;
+;; These are possible combinations for operand 1 and 2. The table
+;; includes both MIPS and MIPS16 cases. (r=register, mem=memory,
+;; 16=MIPS16, x=match, S=split):
+;;
+;; \ op1 r/EXT r/!EXT mem r/16 mem/16
+;; op2
+;;
+;; andi x x
+;; 0xff x x x x
+;; 0xffff x x x x
+;; 0xffff_ffff x S x S x
+;; low-bitmask x
+;; register x x
+;; register =op1 x
+
+(define_insn "*and<mode>3"
+ [(set (match_operand:GPR 0 "register_operand" "=d,d,d,!u,d,d,d,!u,d")
+ (and:GPR (match_operand:GPR 1 "nonimmediate_operand" "o,o,W,!u,d,d,d,0,d")
+ (match_operand:GPR 2 "and_operand" "Yb,Yh,Yw,Uean,K,Yx,Yw,!u,d")))]
+ "!TARGET_MIPS16 && and_operands_ok (<MODE>mode, operands[1], operands[2])"
+{
+ int len;
+
+ switch (which_alternative)
+ {
+ case 0:
+ operands[1] = gen_lowpart (QImode, operands[1]);
+ return "lbu\t%0,%1";
+ case 1:
+ operands[1] = gen_lowpart (HImode, operands[1]);
+ return "lhu\t%0,%1";
+ case 2:
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ return "lwu\t%0,%1";
+ case 3:
+ case 4:
+ return "andi\t%0,%1,%x2";
+ case 5:
+ len = low_bitmask_len (<MODE>mode, INTVAL (operands[2]));
+ operands[2] = GEN_INT (len);
+ return "<d>ext\t%0,%1,0,%2";
+ case 6:
+ return "#";
+ case 7:
+ case 8:
+ return "and\t%0,%1,%2";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "move_type" "load,load,load,andi,andi,ext_ins,shift_shift,logical,logical")
+ (set_attr "compression" "*,*,*,micromips,*,*,*,micromips,*")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*and<mode>3_mips16"
+ [(set (match_operand:GPR 0 "register_operand" "=d,d,d,d,d")
+ (and:GPR (match_operand:GPR 1 "nonimmediate_operand" "%W,W,W,d,0")
+ (match_operand:GPR 2 "and_operand" "Yb,Yh,Yw,Yw,d")))]
+ "TARGET_MIPS16 && and_operands_ok (<MODE>mode, operands[1], operands[2])"
+{
+ switch (which_alternative)
+ {
+ case 0:
+ operands[1] = gen_lowpart (QImode, operands[1]);
+ return "lbu\t%0,%1";
+ case 1:
+ operands[1] = gen_lowpart (HImode, operands[1]);
+ return "lhu\t%0,%1";
+ case 2:
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ return "lwu\t%0,%1";
+ case 3:
+ return "#";
+ case 4:
+ return "and\t%0,%2";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "move_type" "load,load,load,shift_shift,logical")
+ (set_attr "mode" "<MODE>")])
+
+(define_expand "ior<mode>3"
+ [(set (match_operand:GPR 0 "register_operand")
+ (ior:GPR (match_operand:GPR 1 "register_operand")
+ (match_operand:GPR 2 "uns_arith_operand")))]
+ ""
+{
+ if (TARGET_MIPS16)
+ operands[2] = force_reg (<MODE>mode, operands[2]);
+})
+
+(define_insn "*ior<mode>3"
+ [(set (match_operand:GPR 0 "register_operand" "=!u,d,d")
+ (ior:GPR (match_operand:GPR 1 "register_operand" "%0,d,d")
+ (match_operand:GPR 2 "uns_arith_operand" "!u,d,K")))]
+ "!TARGET_MIPS16"
+ "@
+ or\t%0,%1,%2
+ or\t%0,%1,%2
+ ori\t%0,%1,%x2"
+ [(set_attr "alu_type" "or")
+ (set_attr "compression" "micromips,*,*")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*ior<mode>3_mips16"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (ior:GPR (match_operand:GPR 1 "register_operand" "%0")
+ (match_operand:GPR 2 "register_operand" "d")))]
+ "TARGET_MIPS16"
+ "or\t%0,%2"
+ [(set_attr "alu_type" "or")
+ (set_attr "mode" "<MODE>")])
+
+(define_expand "xor<mode>3"
+ [(set (match_operand:GPR 0 "register_operand")
+ (xor:GPR (match_operand:GPR 1 "register_operand")
+ (match_operand:GPR 2 "uns_arith_operand")))]
+ ""
+ "")
+
+(define_insn "*xor<mode>3"
+ [(set (match_operand:GPR 0 "register_operand" "=!u,d,d")
+ (xor:GPR (match_operand:GPR 1 "register_operand" "%0,d,d")
+ (match_operand:GPR 2 "uns_arith_operand" "!u,d,K")))]
+ "!TARGET_MIPS16"
+ "@
+ xor\t%0,%1,%2
+ xor\t%0,%1,%2
+ xori\t%0,%1,%x2"
+ [(set_attr "alu_type" "xor")
+ (set_attr "compression" "micromips,*,*")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*xor<mode>3_mips16"
+ [(set (match_operand:GPR 0 "register_operand" "=d,t,t,t")
+ (xor:GPR (match_operand:GPR 1 "register_operand" "%0,d,d,d")
+ (match_operand:GPR 2 "uns_arith_operand" "d,Uub8,K,d")))]
+ "TARGET_MIPS16"
+ "@
+ xor\t%0,%2
+ cmpi\t%1,%2
+ cmpi\t%1,%2
+ cmp\t%1,%2"
+ [(set_attr "alu_type" "xor")
+ (set_attr "mode" "<MODE>")
+ (set_attr "extended_mips16" "no,no,yes,no")])
+
+(define_insn "*nor<mode>3"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (and:GPR (not:GPR (match_operand:GPR 1 "register_operand" "d"))
+ (not:GPR (match_operand:GPR 2 "register_operand" "d"))))]
+ "!TARGET_MIPS16"
+ "nor\t%0,%1,%2"
+ [(set_attr "alu_type" "nor")
+ (set_attr "mode" "<MODE>")])
+
+;;
+;; ....................
+;;
+;; TRUNCATION
+;;
+;; ....................
+
+
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float_truncate:SF (match_operand:DF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT"
+ "cvt.s.d\t%0,%1"
+ [(set_attr "type" "fcvt")
+ (set_attr "cnv_mode" "D2S")
+ (set_attr "mode" "SF")])
+
+;; Integer truncation patterns. Truncating SImode values to smaller
+;; modes is a no-op, as it is for most other GCC ports. Truncating
+;; DImode values to SImode is not a no-op for TARGET_64BIT since we
+;; need to make sure that the lower 32 bits are properly sign-extended
+;; (see TRULY_NOOP_TRUNCATION). Truncating DImode values into modes
+;; smaller than SImode is equivalent to two separate truncations:
+;;
+;; A B
+;; DI ---> HI == DI ---> SI ---> HI
+;; DI ---> QI == DI ---> SI ---> QI
+;;
+;; Step A needs a real instruction but step B does not.
+
+(define_insn "truncdi<mode>2"
+ [(set (match_operand:SUBDI 0 "nonimmediate_operand" "=d,m")
+ (truncate:SUBDI (match_operand:DI 1 "register_operand" "d,d")))]
+ "TARGET_64BIT"
+ "@
+ sll\t%0,%1,0
+ <store>\t%1,%0"
+ [(set_attr "move_type" "sll0,store")
+ (set_attr "mode" "SI")])
+
+;; Combiner patterns to optimize shift/truncate combinations.
+
+(define_insn "*ashr_trunc<mode>"
+ [(set (match_operand:SUBDI 0 "register_operand" "=d")
+ (truncate:SUBDI
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "d")
+ (match_operand:DI 2 "const_arith_operand" ""))))]
+ "TARGET_64BIT && !TARGET_MIPS16 && IN_RANGE (INTVAL (operands[2]), 32, 63)"
+ "dsra\t%0,%1,%2"
+ [(set_attr "type" "shift")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*lshr32_trunc<mode>"
+ [(set (match_operand:SUBDI 0 "register_operand" "=d")
+ (truncate:SUBDI
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "d")
+ (const_int 32))))]
+ "TARGET_64BIT && !TARGET_MIPS16"
+ "dsra\t%0,%1,32"
+ [(set_attr "type" "shift")
+ (set_attr "mode" "<MODE>")])
+
+;; Logical shift by more than 32 results in proper SI values so truncation is
+;; removed by the middle end. Note that a logical shift by 32 is handled by
+;; the previous pattern.
+(define_insn "*<optab>_trunc<mode>_exts"
+ [(set (match_operand:SUBDI 0 "register_operand" "=d")
+ (truncate:SUBDI
+ (any_shiftrt:DI (match_operand:DI 1 "register_operand" "d")
+ (match_operand:DI 2 "const_arith_operand" ""))))]
+ "ISA_HAS_EXTS && TARGET_64BIT && UINTVAL (operands[2]) < 32"
+ "exts\t%0,%1,%2,31"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "<MODE>")])
+
+;;
+;; ....................
+;;
+;; ZERO EXTENSION
+;;
+;; ....................
+
+;; Extension insns.
+
+(define_expand "zero_extendsidi2"
+ [(set (match_operand:DI 0 "register_operand")
+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))]
+ "TARGET_64BIT")
+
+(define_insn_and_split "*zero_extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=d,d")
+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "d,W")))]
+ "TARGET_64BIT && !ISA_HAS_EXT_INS"
+ "@
+ #
+ lwu\t%0,%1"
+ "&& reload_completed && REG_P (operands[1])"
+ [(set (match_dup 0)
+ (ashift:DI (match_dup 1) (const_int 32)))
+ (set (match_dup 0)
+ (lshiftrt:DI (match_dup 0) (const_int 32)))]
+ { operands[1] = gen_lowpart (DImode, operands[1]); }
+ [(set_attr "move_type" "shift_shift,load")
+ (set_attr "mode" "DI")])
+
+(define_insn "*zero_extendsidi2_dext"
+ [(set (match_operand:DI 0 "register_operand" "=d,d")
+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "d,W")))]
+ "TARGET_64BIT && ISA_HAS_EXT_INS"
+ "@
+ dext\t%0,%1,0,32
+ lwu\t%0,%1"
+ [(set_attr "move_type" "arith,load")
+ (set_attr "mode" "DI")])
+
+;; See the comment before the *and<mode>3 pattern why this is generated by
+;; combine.
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand")
+ (and:DI (match_operand:DI 1 "register_operand")
+ (const_int 4294967295)))]
+ "TARGET_64BIT && !ISA_HAS_EXT_INS && reload_completed"
+ [(set (match_dup 0)
+ (ashift:DI (match_dup 1) (const_int 32)))
+ (set (match_dup 0)
+ (lshiftrt:DI (match_dup 0) (const_int 32)))])
+
+(define_expand "zero_extend<SHORT:mode><GPR:mode>2"
+ [(set (match_operand:GPR 0 "register_operand")
+ (zero_extend:GPR (match_operand:SHORT 1 "nonimmediate_operand")))]
+ ""
+{
+ if (TARGET_MIPS16 && !GENERATE_MIPS16E
+ && !memory_operand (operands[1], <SHORT:MODE>mode))
+ {
+ emit_insn (gen_and<GPR:mode>3 (operands[0],
+ gen_lowpart (<GPR:MODE>mode, operands[1]),
+ force_reg (<GPR:MODE>mode,
+ GEN_INT (<SHORT:mask>))));
+ DONE;
+ }
+})
+
+(define_insn "*zero_extend<SHORT:mode><GPR:mode>2"
+ [(set (match_operand:GPR 0 "register_operand" "=!u,d,d")
+ (zero_extend:GPR
+ (match_operand:SHORT 1 "nonimmediate_operand" "!u,d,m")))]
+ "!TARGET_MIPS16"
+ "@
+ andi\t%0,%1,<SHORT:mask>
+ andi\t%0,%1,<SHORT:mask>
+ l<SHORT:size>u\t%0,%1"
+ [(set_attr "move_type" "andi,andi,load")
+ (set_attr "compression" "micromips,*,*")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*zero_extend<SHORT:mode><GPR:mode>2_mips16e"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (zero_extend:GPR (match_operand:SHORT 1 "register_operand" "0")))]
+ "GENERATE_MIPS16E"
+ "ze<SHORT:size>\t%0"
+ ;; This instruction is effectively a special encoding of ANDI.
+ [(set_attr "move_type" "andi")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*zero_extend<SHORT:mode><GPR:mode>2_mips16"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (zero_extend:GPR (match_operand:SHORT 1 "memory_operand" "m")))]
+ "TARGET_MIPS16"
+ "l<SHORT:size>u\t%0,%1"
+ [(set_attr "move_type" "load")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_expand "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand")))]
+ ""
+{
+ if (TARGET_MIPS16 && !memory_operand (operands[1], QImode))
+ {
+ emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
+ operands[1]));
+ DONE;
+ }
+})
+
+(define_insn "*zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=d,d")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "d,m")))]
+ "!TARGET_MIPS16"
+ "@
+ andi\t%0,%1,0x00ff
+ lbu\t%0,%1"
+ [(set_attr "move_type" "andi,load")
+ (set_attr "mode" "HI")])
+
+(define_insn "*zero_extendqihi2_mips16"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (zero_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
+ "TARGET_MIPS16"
+ "lbu\t%0,%1"
+ [(set_attr "move_type" "load")
+ (set_attr "mode" "HI")])
+
+;; Combiner patterns to optimize truncate/zero_extend combinations.
+
+(define_insn "*zero_extend<GPR:mode>_trunc<SHORT:mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (zero_extend:GPR
+ (truncate:SHORT (match_operand:DI 1 "register_operand" "d"))))]
+ "TARGET_64BIT && !TARGET_MIPS16"
+{
+ operands[2] = GEN_INT (GET_MODE_MASK (<SHORT:MODE>mode));
+ return "andi\t%0,%1,%x2";
+}
+ [(set_attr "alu_type" "and")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*zero_extendhi_truncqi"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (zero_extend:HI
+ (truncate:QI (match_operand:DI 1 "register_operand" "d"))))]
+ "TARGET_64BIT && !TARGET_MIPS16"
+ "andi\t%0,%1,0xff"
+ [(set_attr "alu_type" "and")
+ (set_attr "mode" "HI")])
+
+;;
+;; ....................
+;;
+;; SIGN EXTENSION
+;;
+;; ....................
+
+;; Extension insns.
+;; Those for integer source operand are ordered widest source type first.
+
+;; When TARGET_64BIT, all SImode integer and accumulator registers
+;; should already be in sign-extended form (see TRULY_NOOP_TRUNCATION
+;; and truncdisi2). We can therefore get rid of register->register
+;; instructions if we constrain the source to be in the same register as
+;; the destination.
+;;
+;; Only the pre-reload scheduler sees the type of the register alternatives;
+;; we split them into nothing before the post-reload scheduler runs.
+;; These alternatives therefore have type "move" in order to reflect
+;; what happens if the two pre-reload operands cannot be tied, and are
+;; instead allocated two separate GPRs. We don't distinguish between
+;; the GPR and LO cases because we don't usually know during pre-reload
+;; scheduling whether an operand will be LO or not.
+(define_insn_and_split "extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=d,l,d")
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "0,0,m")))]
+ "TARGET_64BIT"
+ "@
+ #
+ #
+ lw\t%0,%1"
+ "&& reload_completed && register_operand (operands[1], VOIDmode)"
+ [(const_int 0)]
+{
+ emit_note (NOTE_INSN_DELETED);
+ DONE;
+}
+ [(set_attr "move_type" "move,move,load")
+ (set_attr "mode" "DI")])
+
+(define_expand "extend<SHORT:mode><GPR:mode>2"
+ [(set (match_operand:GPR 0 "register_operand")
+ (sign_extend:GPR (match_operand:SHORT 1 "nonimmediate_operand")))]
+ "")
+
+(define_insn "*extend<SHORT:mode><GPR:mode>2_mips16e"
+ [(set (match_operand:GPR 0 "register_operand" "=d,d")
+ (sign_extend:GPR (match_operand:SHORT 1 "nonimmediate_operand" "0,m")))]
+ "GENERATE_MIPS16E"
+ "@
+ se<SHORT:size>\t%0
+ l<SHORT:size>\t%0,%1"
+ [(set_attr "move_type" "signext,load")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn_and_split "*extend<SHORT:mode><GPR:mode>2"
+ [(set (match_operand:GPR 0 "register_operand" "=d,d")
+ (sign_extend:GPR
+ (match_operand:SHORT 1 "nonimmediate_operand" "d,m")))]
+ "!ISA_HAS_SEB_SEH && !GENERATE_MIPS16E"
+ "@
+ #
+ l<SHORT:size>\t%0,%1"
+ "&& reload_completed && REG_P (operands[1])"
+ [(set (match_dup 0) (ashift:GPR (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (ashiftrt:GPR (match_dup 0) (match_dup 2)))]
+{
+ operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
+ operands[2] = GEN_INT (GET_MODE_BITSIZE (<GPR:MODE>mode)
+ - GET_MODE_BITSIZE (<SHORT:MODE>mode));
+}
+ [(set_attr "move_type" "shift_shift,load")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*extend<SHORT:mode><GPR:mode>2_se<SHORT:size>"
+ [(set (match_operand:GPR 0 "register_operand" "=d,d")
+ (sign_extend:GPR
+ (match_operand:SHORT 1 "nonimmediate_operand" "d,m")))]
+ "ISA_HAS_SEB_SEH"
+ "@
+ se<SHORT:size>\t%0,%1
+ l<SHORT:size>\t%0,%1"
+ [(set_attr "move_type" "signext,load")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_expand "extendqihi2"
+ [(set (match_operand:HI 0 "register_operand")
+ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand")))]
+ "")
+
+(define_insn "*extendqihi2_mips16e"
+ [(set (match_operand:HI 0 "register_operand" "=d,d")
+ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0,m")))]
+ "GENERATE_MIPS16E"
+ "@
+ seb\t%0
+ lb\t%0,%1"
+ [(set_attr "move_type" "signext,load")
+ (set_attr "mode" "SI")])
+
+(define_insn_and_split "*extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=d,d")
+ (sign_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "d,m")))]
+ "!ISA_HAS_SEB_SEH && !GENERATE_MIPS16E"
+ "@
+ #
+ lb\t%0,%1"
+ "&& reload_completed && REG_P (operands[1])"
+ [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
+{
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
+ - GET_MODE_BITSIZE (QImode));
+}
+ [(set_attr "move_type" "shift_shift,load")
+ (set_attr "mode" "SI")])
+
+(define_insn "*extendqihi2_seb"
+ [(set (match_operand:HI 0 "register_operand" "=d,d")
+ (sign_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "d,m")))]
+ "ISA_HAS_SEB_SEH"
+ "@
+ seb\t%0,%1
+ lb\t%0,%1"
+ [(set_attr "move_type" "signext,load")
+ (set_attr "mode" "SI")])
+
+;; Combiner patterns for truncate/sign_extend combinations. The SI versions
+;; use the shift/truncate patterns.
+
+(define_insn_and_split "*extenddi_truncate<mode>"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (sign_extend:DI
+ (truncate:SHORT (match_operand:DI 1 "register_operand" "d"))))]
+ "TARGET_64BIT && !TARGET_MIPS16 && !ISA_HAS_EXTS"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2)
+ (ashift:DI (match_dup 1)
+ (match_dup 3)))
+ (set (match_dup 0)
+ (ashiftrt:DI (match_dup 2)
+ (match_dup 3)))]
+{
+ operands[2] = gen_lowpart (DImode, operands[0]);
+ operands[3] = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (<MODE>mode));
+}
+ [(set_attr "move_type" "shift_shift")
+ (set_attr "mode" "DI")])
+
+(define_insn_and_split "*extendsi_truncate<mode>"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (sign_extend:SI
+ (truncate:SHORT (match_operand:DI 1 "register_operand" "d"))))]
+ "TARGET_64BIT && !TARGET_MIPS16 && !ISA_HAS_EXTS"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2)
+ (ashift:DI (match_dup 1)
+ (match_dup 3)))
+ (set (match_dup 0)
+ (truncate:SI (ashiftrt:DI (match_dup 2)
+ (match_dup 3))))]
+{
+ operands[2] = gen_lowpart (DImode, operands[0]);
+ operands[3] = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (<MODE>mode));
+}
+ [(set_attr "move_type" "shift_shift")
+ (set_attr "mode" "SI")])
+
+(define_insn_and_split "*extendhi_truncateqi"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (sign_extend:HI
+ (truncate:QI (match_operand:DI 1 "register_operand" "d"))))]
+ "TARGET_64BIT && !TARGET_MIPS16 && !ISA_HAS_EXTS"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2)
+ (ashift:DI (match_dup 1)
+ (const_int 56)))
+ (set (match_dup 0)
+ (truncate:HI (ashiftrt:DI (match_dup 2)
+ (const_int 56))))]
+{
+ operands[2] = gen_lowpart (DImode, operands[0]);
+}
+ [(set_attr "move_type" "shift_shift")
+ (set_attr "mode" "SI")])
+
+(define_insn "*extend<GPR:mode>_truncate<SHORT:mode>_exts"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (sign_extend:GPR
+ (truncate:SHORT (match_operand:DI 1 "register_operand" "d"))))]
+ "TARGET_64BIT && !TARGET_MIPS16 && ISA_HAS_EXTS"
+{
+ operands[2] = GEN_INT (GET_MODE_BITSIZE (<SHORT:MODE>mode));
+ return "exts\t%0,%1,0,%m2";
+}
+ [(set_attr "type" "arith")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*extendhi_truncateqi_exts"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (sign_extend:HI
+ (truncate:QI (match_operand:DI 1 "register_operand" "d"))))]
+ "TARGET_64BIT && !TARGET_MIPS16 && ISA_HAS_EXTS"
+ "exts\t%0,%1,0,7"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float_extend:DF (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT"
+ "cvt.d.s\t%0,%1"
+ [(set_attr "type" "fcvt")
+ (set_attr "cnv_mode" "S2D")
+ (set_attr "mode" "DF")])
+
+;;
+;; ....................
+;;
+;; CONVERSIONS
+;;
+;; ....................
+
+(define_expand "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand")
+ (fix:SI (match_operand:DF 1 "register_operand")))]
+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT"
+{
+ if (!ISA_HAS_TRUNC_W)
+ {
+ emit_insn (gen_fix_truncdfsi2_macro (operands[0], operands[1]));
+ DONE;
+ }
+})
+
+(define_insn "fix_truncdfsi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (match_operand:DF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT && ISA_HAS_TRUNC_W"
+ "trunc.w.d %0,%1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "DF")
+ (set_attr "cnv_mode" "D2I")])
+
+(define_insn "fix_truncdfsi2_macro"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (match_operand:DF 1 "register_operand" "f")))
+ (clobber (match_scratch:DF 2 "=d"))]
+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT && !ISA_HAS_TRUNC_W"
+{
+ if (mips_nomacro.nesting_level > 0)
+ return ".set\tmacro\;trunc.w.d %0,%1,%2\;.set\tnomacro";
+ else
+ return "trunc.w.d %0,%1,%2";
+}
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "DF")
+ (set_attr "cnv_mode" "D2I")
+ (set_attr "insn_count" "9")])
+
+(define_expand "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand")
+ (fix:SI (match_operand:SF 1 "register_operand")))]
+ "TARGET_HARD_FLOAT"
+{
+ if (!ISA_HAS_TRUNC_W)
+ {
+ emit_insn (gen_fix_truncsfsi2_macro (operands[0], operands[1]));
+ DONE;
+ }
+})
+
+(define_insn "fix_truncsfsi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && ISA_HAS_TRUNC_W"
+ "trunc.w.s %0,%1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")
+ (set_attr "cnv_mode" "S2I")])
+
+(define_insn "fix_truncsfsi2_macro"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (match_operand:SF 1 "register_operand" "f")))
+ (clobber (match_scratch:SF 2 "=d"))]
+ "TARGET_HARD_FLOAT && !ISA_HAS_TRUNC_W"
+{
+ if (mips_nomacro.nesting_level > 0)
+ return ".set\tmacro\;trunc.w.s %0,%1,%2\;.set\tnomacro";
+ else
+ return "trunc.w.s %0,%1,%2";
+}
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")
+ (set_attr "cnv_mode" "S2I")
+ (set_attr "insn_count" "9")])
+
+
+(define_insn "fix_truncdfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (fix:DI (match_operand:DF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT"
+ "trunc.l.d %0,%1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "DF")
+ (set_attr "cnv_mode" "D2I")])
+
+
+(define_insn "fix_truncsfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (fix:DI (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT"
+ "trunc.l.s %0,%1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")
+ (set_attr "cnv_mode" "S2I")])
+
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float:DF (match_operand:SI 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT"
+ "cvt.d.w\t%0,%1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "DF")
+ (set_attr "cnv_mode" "I2D")])
+
+
+(define_insn "floatdidf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (float:DF (match_operand:DI 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT"
+ "cvt.d.l\t%0,%1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "DF")
+ (set_attr "cnv_mode" "I2D")])
+
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float:SF (match_operand:SI 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "cvt.s.w\t%0,%1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")
+ (set_attr "cnv_mode" "I2S")])
+
+
+(define_insn "floatdisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float:SF (match_operand:DI 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT"
+ "cvt.s.l\t%0,%1"
+ [(set_attr "type" "fcvt")
+ (set_attr "mode" "SF")
+ (set_attr "cnv_mode" "I2S")])
+
+
+(define_expand "fixuns_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand")
+ (unsigned_fix:SI (match_operand:DF 1 "register_operand")))]
+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT"
+{
+ rtx reg1 = gen_reg_rtx (DFmode);
+ rtx reg2 = gen_reg_rtx (DFmode);
+ rtx reg3 = gen_reg_rtx (SImode);
+ rtx label1 = gen_label_rtx ();
+ rtx label2 = gen_label_rtx ();
+ rtx test;
+ REAL_VALUE_TYPE offset;
+
+ real_2expN (&offset, 31, DFmode);
+
+ if (reg1) /* Turn off complaints about unreached code. */
+ {
+ mips_emit_move (reg1, CONST_DOUBLE_FROM_REAL_VALUE (offset, DFmode));
+ do_pending_stack_adjust ();
+
+ test = gen_rtx_GE (VOIDmode, operands[1], reg1);
+ emit_jump_insn (gen_cbranchdf4 (test, operands[1], reg1, label1));
+
+ emit_insn (gen_fix_truncdfsi2 (operands[0], operands[1]));
+ emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
+ gen_rtx_LABEL_REF (VOIDmode, label2)));
+ emit_barrier ();
+
+ emit_label (label1);
+ mips_emit_move (reg2, gen_rtx_MINUS (DFmode, operands[1], reg1));
+ mips_emit_move (reg3, GEN_INT (trunc_int_for_mode
+ (BITMASK_HIGH, SImode)));
+
+ emit_insn (gen_fix_truncdfsi2 (operands[0], reg2));
+ emit_insn (gen_iorsi3 (operands[0], operands[0], reg3));
+
+ emit_label (label2);
+
+ /* Allow REG_NOTES to be set on last insn (labels don't have enough
+ fields, and can't be used for REG_NOTES anyway). */
+ emit_use (stack_pointer_rtx);
+ DONE;
+ }
+})
+
+
+(define_expand "fixuns_truncdfdi2"
+ [(set (match_operand:DI 0 "register_operand")
+ (unsigned_fix:DI (match_operand:DF 1 "register_operand")))]
+ "TARGET_HARD_FLOAT && TARGET_64BIT && TARGET_DOUBLE_FLOAT"
+{
+ rtx reg1 = gen_reg_rtx (DFmode);
+ rtx reg2 = gen_reg_rtx (DFmode);
+ rtx reg3 = gen_reg_rtx (DImode);
+ rtx label1 = gen_label_rtx ();
+ rtx label2 = gen_label_rtx ();
+ rtx test;
+ REAL_VALUE_TYPE offset;
+
+ real_2expN (&offset, 63, DFmode);
+
+ mips_emit_move (reg1, CONST_DOUBLE_FROM_REAL_VALUE (offset, DFmode));
+ do_pending_stack_adjust ();
+
+ test = gen_rtx_GE (VOIDmode, operands[1], reg1);
+ emit_jump_insn (gen_cbranchdf4 (test, operands[1], reg1, label1));
+
+ emit_insn (gen_fix_truncdfdi2 (operands[0], operands[1]));
+ emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
+ gen_rtx_LABEL_REF (VOIDmode, label2)));
+ emit_barrier ();
+
+ emit_label (label1);
+ mips_emit_move (reg2, gen_rtx_MINUS (DFmode, operands[1], reg1));
+ mips_emit_move (reg3, GEN_INT (BITMASK_HIGH));
+ emit_insn (gen_ashldi3 (reg3, reg3, GEN_INT (32)));
+
+ emit_insn (gen_fix_truncdfdi2 (operands[0], reg2));
+ emit_insn (gen_iordi3 (operands[0], operands[0], reg3));
+
+ emit_label (label2);
+
+ /* Allow REG_NOTES to be set on last insn (labels don't have enough
+ fields, and can't be used for REG_NOTES anyway). */
+ emit_use (stack_pointer_rtx);
+ DONE;
+})
+
+
+(define_expand "fixuns_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand")
+ (unsigned_fix:SI (match_operand:SF 1 "register_operand")))]
+ "TARGET_HARD_FLOAT"
+{
+ rtx reg1 = gen_reg_rtx (SFmode);
+ rtx reg2 = gen_reg_rtx (SFmode);
+ rtx reg3 = gen_reg_rtx (SImode);
+ rtx label1 = gen_label_rtx ();
+ rtx label2 = gen_label_rtx ();
+ rtx test;
+ REAL_VALUE_TYPE offset;
+
+ real_2expN (&offset, 31, SFmode);
+
+ mips_emit_move (reg1, CONST_DOUBLE_FROM_REAL_VALUE (offset, SFmode));
+ do_pending_stack_adjust ();
+
+ test = gen_rtx_GE (VOIDmode, operands[1], reg1);
+ emit_jump_insn (gen_cbranchsf4 (test, operands[1], reg1, label1));
+
+ emit_insn (gen_fix_truncsfsi2 (operands[0], operands[1]));
+ emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
+ gen_rtx_LABEL_REF (VOIDmode, label2)));
+ emit_barrier ();
+
+ emit_label (label1);
+ mips_emit_move (reg2, gen_rtx_MINUS (SFmode, operands[1], reg1));
+ mips_emit_move (reg3, GEN_INT (trunc_int_for_mode
+ (BITMASK_HIGH, SImode)));
+
+ emit_insn (gen_fix_truncsfsi2 (operands[0], reg2));
+ emit_insn (gen_iorsi3 (operands[0], operands[0], reg3));
+
+ emit_label (label2);
+
+ /* Allow REG_NOTES to be set on last insn (labels don't have enough
+ fields, and can't be used for REG_NOTES anyway). */
+ emit_use (stack_pointer_rtx);
+ DONE;
+})
+
+
+(define_expand "fixuns_truncsfdi2"
+ [(set (match_operand:DI 0 "register_operand")
+ (unsigned_fix:DI (match_operand:SF 1 "register_operand")))]
+ "TARGET_HARD_FLOAT && TARGET_64BIT && TARGET_DOUBLE_FLOAT"
+{
+ rtx reg1 = gen_reg_rtx (SFmode);
+ rtx reg2 = gen_reg_rtx (SFmode);
+ rtx reg3 = gen_reg_rtx (DImode);
+ rtx label1 = gen_label_rtx ();
+ rtx label2 = gen_label_rtx ();
+ rtx test;
+ REAL_VALUE_TYPE offset;
+
+ real_2expN (&offset, 63, SFmode);
+
+ mips_emit_move (reg1, CONST_DOUBLE_FROM_REAL_VALUE (offset, SFmode));
+ do_pending_stack_adjust ();
+
+ test = gen_rtx_GE (VOIDmode, operands[1], reg1);
+ emit_jump_insn (gen_cbranchsf4 (test, operands[1], reg1, label1));
+
+ emit_insn (gen_fix_truncsfdi2 (operands[0], operands[1]));
+ emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
+ gen_rtx_LABEL_REF (VOIDmode, label2)));
+ emit_barrier ();
+
+ emit_label (label1);
+ mips_emit_move (reg2, gen_rtx_MINUS (SFmode, operands[1], reg1));
+ mips_emit_move (reg3, GEN_INT (BITMASK_HIGH));
+ emit_insn (gen_ashldi3 (reg3, reg3, GEN_INT (32)));
+
+ emit_insn (gen_fix_truncsfdi2 (operands[0], reg2));
+ emit_insn (gen_iordi3 (operands[0], operands[0], reg3));
+
+ emit_label (label2);
+
+ /* Allow REG_NOTES to be set on last insn (labels don't have enough
+ fields, and can't be used for REG_NOTES anyway). */
+ emit_use (stack_pointer_rtx);
+ DONE;
+})
+
+;;
+;; ....................
+;;
+;; DATA MOVEMENT
+;;
+;; ....................
+
+;; Bit field extract patterns which use lwl/lwr or ldl/ldr.
+
+(define_expand "extvmisalign<mode>"
+ [(set (match_operand:GPR 0 "register_operand")
+ (sign_extract:GPR (match_operand:BLK 1 "memory_operand")
+ (match_operand 2 "const_int_operand")
+ (match_operand 3 "const_int_operand")))]
+ "!TARGET_MIPS16"
+{
+ if (mips_expand_ext_as_unaligned_load (operands[0], operands[1],
+ INTVAL (operands[2]),
+ INTVAL (operands[3]),
+ /*unsigned=*/ false))
+ DONE;
+ else
+ FAIL;
+})
+
+(define_expand "extv<mode>"
+ [(set (match_operand:GPR 0 "register_operand")
+ (sign_extract:GPR (match_operand:GPR 1 "register_operand")
+ (match_operand 2 "const_int_operand")
+ (match_operand 3 "const_int_operand")))]
+ "ISA_HAS_EXTS"
+{
+ if (UINTVAL (operands[2]) > 32)
+ FAIL;
+})
+
+(define_insn "*extv<mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (sign_extract:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand 2 "const_int_operand" "")
+ (match_operand 3 "const_int_operand" "")))]
+ "ISA_HAS_EXTS && UINTVAL (operands[2]) <= 32"
+ "exts\t%0,%1,%3,%m2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "<MODE>")])
+
+(define_expand "extzvmisalign<mode>"
+ [(set (match_operand:GPR 0 "register_operand")
+ (zero_extract:GPR (match_operand:BLK 1 "memory_operand")
+ (match_operand 2 "const_int_operand")
+ (match_operand 3 "const_int_operand")))]
+ "!TARGET_MIPS16"
+{
+ if (mips_expand_ext_as_unaligned_load (operands[0], operands[1],
+ INTVAL (operands[2]),
+ INTVAL (operands[3]),
+ /*unsigned=*/ true))
+ DONE;
+ else
+ FAIL;
+})
+
+(define_expand "extzv<mode>"
+ [(set (match_operand:GPR 0 "register_operand")
+ (zero_extract:GPR (match_operand:GPR 1 "register_operand")
+ (match_operand 2 "const_int_operand")
+ (match_operand 3 "const_int_operand")))]
+ ""
+{
+ if (!mips_use_ins_ext_p (operands[1], INTVAL (operands[2]),
+ INTVAL (operands[3])))
+ FAIL;
+})
+
+(define_insn "*extzv<mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (zero_extract:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand 2 "const_int_operand" "")
+ (match_operand 3 "const_int_operand" "")))]
+ "mips_use_ins_ext_p (operands[1], INTVAL (operands[2]),
+ INTVAL (operands[3]))"
+ "<d>ext\t%0,%1,%3,%2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*extzv_truncsi_exts"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (truncate:SI
+ (zero_extract:DI (match_operand:DI 1 "register_operand" "d")
+ (match_operand 2 "const_int_operand" "")
+ (match_operand 3 "const_int_operand" ""))))]
+ "ISA_HAS_EXTS && TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)"
+ "exts\t%0,%1,%3,31"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")])
+
+
+(define_expand "insvmisalign<mode>"
+ [(set (zero_extract:GPR (match_operand:BLK 0 "memory_operand")
+ (match_operand 1 "const_int_operand")
+ (match_operand 2 "const_int_operand"))
+ (match_operand:GPR 3 "reg_or_0_operand"))]
+ "!TARGET_MIPS16"
+{
+ if (mips_expand_ins_as_unaligned_store (operands[0], operands[3],
+ INTVAL (operands[1]),
+ INTVAL (operands[2])))
+ DONE;
+ else
+ FAIL;
+})
+
+(define_expand "insv<mode>"
+ [(set (zero_extract:GPR (match_operand:GPR 0 "register_operand")
+ (match_operand 1 "const_int_operand")
+ (match_operand 2 "const_int_operand"))
+ (match_operand:GPR 3 "reg_or_0_operand"))]
+ ""
+{
+ if (!mips_use_ins_ext_p (operands[0], INTVAL (operands[1]),
+ INTVAL (operands[2])))
+ FAIL;
+})
+
+(define_insn "*insv<mode>"
+ [(set (zero_extract:GPR (match_operand:GPR 0 "register_operand" "+d")
+ (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:GPR 3 "reg_or_0_operand" "dJ"))]
+ "mips_use_ins_ext_p (operands[0], INTVAL (operands[1]),
+ INTVAL (operands[2]))"
+ "<d>ins\t%0,%z3,%2,%1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "<MODE>")])
+
+;; Combiner pattern for cins (clear and insert bit field). We can
+;; implement mask-and-shift-left operation with this. Note that if
+;; the upper bit of the mask is set in an SImode operation, the mask
+;; itself will be sign-extended. mask_low_and_shift_len will
+;; therefore be greater than our threshold of 32.
+
+(define_insn "*cins<mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (and:GPR
+ (ashift:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "const_int_operand" ""))
+ (match_operand:GPR 3 "const_int_operand" "")))]
+ "ISA_HAS_CINS
+ && mask_low_and_shift_p (<MODE>mode, operands[3], operands[2], 32)"
+{
+ operands[3] =
+ GEN_INT (mask_low_and_shift_len (<MODE>mode, operands[3], operands[2]));
+ return "cins\t%0,%1,%2,%m3";
+}
+ [(set_attr "type" "shift")
+ (set_attr "mode" "<MODE>")])
+
+;; Unaligned word moves generated by the bit field patterns.
+;;
+;; As far as the rtl is concerned, both the left-part and right-part
+;; instructions can access the whole field. However, the real operand
+;; refers to just the first or the last byte (depending on endianness).
+;; We therefore use two memory operands to each instruction, one to
+;; describe the rtl effect and one to use in the assembly output.
+;;
+;; Operands 0 and 1 are the rtl-level target and source respectively.
+;; This allows us to use the standard length calculations for the "load"
+;; and "store" type attributes.
+
+(define_insn "mov_<load>l"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (unspec:GPR [(match_operand:BLK 1 "memory_operand" "m")
+ (match_operand:QI 2 "memory_operand" "ZC")]
+ UNSPEC_LOAD_LEFT))]
+ "!TARGET_MIPS16 && mips_mem_fits_mode_p (<MODE>mode, operands[1])"
+ "<load>l\t%0,%2"
+ [(set_attr "move_type" "load")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "mov_<load>r"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (unspec:GPR [(match_operand:BLK 1 "memory_operand" "m")
+ (match_operand:QI 2 "memory_operand" "ZC")
+ (match_operand:GPR 3 "register_operand" "0")]
+ UNSPEC_LOAD_RIGHT))]
+ "!TARGET_MIPS16 && mips_mem_fits_mode_p (<MODE>mode, operands[1])"
+ "<load>r\t%0,%2"
+ [(set_attr "move_type" "load")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "mov_<store>l"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:GPR 1 "reg_or_0_operand" "dJ")
+ (match_operand:QI 2 "memory_operand" "ZC")]
+ UNSPEC_STORE_LEFT))]
+ "!TARGET_MIPS16 && mips_mem_fits_mode_p (<MODE>mode, operands[0])"
+ "<store>l\t%z1,%2"
+ [(set_attr "move_type" "store")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "mov_<store>r"
+ [(set (match_operand:BLK 0 "memory_operand" "+m")
+ (unspec:BLK [(match_operand:GPR 1 "reg_or_0_operand" "dJ")
+ (match_operand:QI 2 "memory_operand" "ZC")
+ (match_dup 0)]
+ UNSPEC_STORE_RIGHT))]
+ "!TARGET_MIPS16 && mips_mem_fits_mode_p (<MODE>mode, operands[0])"
+ "<store>r\t%z1,%2"
+ [(set_attr "move_type" "store")
+ (set_attr "mode" "<MODE>")])
+
+;; An instruction to calculate the high part of a 64-bit SYMBOL_ABSOLUTE.
+;; The required value is:
+;;
+;; (%highest(op1) << 48) + (%higher(op1) << 32) + (%hi(op1) << 16)
+;;
+;; which translates to:
+;;
+;; lui op0,%highest(op1)
+;; daddiu op0,op0,%higher(op1)
+;; dsll op0,op0,16
+;; daddiu op0,op0,%hi(op1)
+;; dsll op0,op0,16
+;;
+;; The split is deferred until after flow2 to allow the peephole2 below
+;; to take effect.
+(define_insn_and_split "*lea_high64"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (high:DI (match_operand:DI 1 "absolute_symbolic_operand" "")))]
+ "TARGET_EXPLICIT_RELOCS && ABI_HAS_64BIT_SYMBOLS"
+ "#"
+ "&& epilogue_completed"
+ [(set (match_dup 0) (high:DI (match_dup 2)))
+ (set (match_dup 0) (lo_sum:DI (match_dup 0) (match_dup 2)))
+ (set (match_dup 0) (ashift:DI (match_dup 0) (const_int 16)))
+ (set (match_dup 0) (lo_sum:DI (match_dup 0) (match_dup 3)))
+ (set (match_dup 0) (ashift:DI (match_dup 0) (const_int 16)))]
+{
+ operands[2] = mips_unspec_address (operands[1], SYMBOL_64_HIGH);
+ operands[3] = mips_unspec_address (operands[1], SYMBOL_64_MID);
+}
+ [(set_attr "insn_count" "5")])
+
+;; Use a scratch register to reduce the latency of the above pattern
+;; on superscalar machines. The optimized sequence is:
+;;
+;; lui op1,%highest(op2)
+;; lui op0,%hi(op2)
+;; daddiu op1,op1,%higher(op2)
+;; dsll32 op1,op1,0
+;; daddu op1,op1,op0
+(define_peephole2
+ [(set (match_operand:DI 1 "d_operand")
+ (high:DI (match_operand:DI 2 "absolute_symbolic_operand")))
+ (match_scratch:DI 0 "d")]
+ "TARGET_EXPLICIT_RELOCS && ABI_HAS_64BIT_SYMBOLS"
+ [(set (match_dup 1) (high:DI (match_dup 3)))
+ (set (match_dup 0) (high:DI (match_dup 4)))
+ (set (match_dup 1) (lo_sum:DI (match_dup 1) (match_dup 3)))
+ (set (match_dup 1) (ashift:DI (match_dup 1) (const_int 32)))
+ (set (match_dup 1) (plus:DI (match_dup 1) (match_dup 0)))]
+{
+ operands[3] = mips_unspec_address (operands[2], SYMBOL_64_HIGH);
+ operands[4] = mips_unspec_address (operands[2], SYMBOL_64_LOW);
+})
+
+;; On most targets, the expansion of (lo_sum (high X) X) for a 64-bit
+;; SYMBOL_ABSOLUTE X will take 6 cycles. This next pattern allows combine
+;; to merge the HIGH and LO_SUM parts of a move if the HIGH part is only
+;; used once. We can then use the sequence:
+;;
+;; lui op0,%highest(op1)
+;; lui op2,%hi(op1)
+;; daddiu op0,op0,%higher(op1)
+;; daddiu op2,op2,%lo(op1)
+;; dsll32 op0,op0,0
+;; daddu op0,op0,op2
+;;
+;; which takes 4 cycles on most superscalar targets.
+(define_insn_and_split "*lea64"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (match_operand:DI 1 "absolute_symbolic_operand" ""))
+ (clobber (match_scratch:DI 2 "=&d"))]
+ "TARGET_EXPLICIT_RELOCS && ABI_HAS_64BIT_SYMBOLS && cse_not_expected"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0) (high:DI (match_dup 3)))
+ (set (match_dup 2) (high:DI (match_dup 4)))
+ (set (match_dup 0) (lo_sum:DI (match_dup 0) (match_dup 3)))
+ (set (match_dup 2) (lo_sum:DI (match_dup 2) (match_dup 4)))
+ (set (match_dup 0) (ashift:DI (match_dup 0) (const_int 32)))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 2)))]
+{
+ operands[3] = mips_unspec_address (operands[1], SYMBOL_64_HIGH);
+ operands[4] = mips_unspec_address (operands[1], SYMBOL_64_LOW);
+}
+ [(set_attr "insn_count" "6")])
+
+;; Split HIGHs into:
+;;
+;; li op0,%hi(sym)
+;; sll op0,16
+;;
+;; on MIPS16 targets.
+(define_split
+ [(set (match_operand:P 0 "d_operand")
+ (high:P (match_operand:P 1 "symbolic_operand_with_high")))]
+ "TARGET_MIPS16 && reload_completed"
+ [(set (match_dup 0) (unspec:P [(match_dup 1)] UNSPEC_UNSHIFTED_HIGH))
+ (set (match_dup 0) (ashift:P (match_dup 0) (const_int 16)))])
+
+(define_insn "*unshifted_high"
+ [(set (match_operand:P 0 "d_operand" "=d")
+ (unspec:P [(match_operand:P 1 "symbolic_operand_with_high")]
+ UNSPEC_UNSHIFTED_HIGH))]
+ ""
+ "li\t%0,%h1"
+ [(set_attr "extended_mips16" "yes")])
+
+;; Insns to fetch a symbol from a big GOT.
+
+(define_insn_and_split "*xgot_hi<mode>"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (high:P (match_operand:P 1 "got_disp_operand" "")))]
+ "TARGET_EXPLICIT_RELOCS && TARGET_XGOT"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0) (high:P (match_dup 2)))
+ (set (match_dup 0) (plus:P (match_dup 0) (match_dup 3)))]
+{
+ operands[2] = mips_unspec_address (operands[1], SYMBOL_GOTOFF_DISP);
+ operands[3] = pic_offset_table_rtx;
+}
+ [(set_attr "got" "xgot_high")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn_and_split "*xgot_lo<mode>"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (lo_sum:P (match_operand:P 1 "register_operand" "d")
+ (match_operand:P 2 "got_disp_operand" "")))]
+ "TARGET_EXPLICIT_RELOCS && TARGET_XGOT"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0)
+ (unspec:P [(match_dup 1) (match_dup 3)] UNSPEC_LOAD_GOT))]
+ { operands[3] = mips_unspec_address (operands[2], SYMBOL_GOTOFF_DISP); }
+ [(set_attr "got" "load")
+ (set_attr "mode" "<MODE>")])
+
+;; Insns to fetch a symbol from a normal GOT.
+
+(define_insn_and_split "*got_disp<mode>"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (match_operand:P 1 "got_disp_operand" ""))]
+ "TARGET_EXPLICIT_RELOCS && !mips_split_p[SYMBOL_GOT_DISP]"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0) (match_dup 2))]
+ { operands[2] = mips_got_load (NULL, operands[1], SYMBOL_GOTOFF_DISP); }
+ [(set_attr "got" "load")
+ (set_attr "mode" "<MODE>")])
+
+;; Insns for loading the "page" part of a page/ofst address from the GOT.
+
+(define_insn_and_split "*got_page<mode>"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (high:P (match_operand:P 1 "got_page_ofst_operand" "")))]
+ "TARGET_EXPLICIT_RELOCS && !mips_split_hi_p[SYMBOL_GOT_PAGE_OFST]"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0) (match_dup 2))]
+ { operands[2] = mips_got_load (NULL, operands[1], SYMBOL_GOTOFF_PAGE); }
+ [(set_attr "got" "load")
+ (set_attr "mode" "<MODE>")])
+
+;; Convenience expander that generates the rhs of a load_got<mode> insn.
+(define_expand "unspec_got_<mode>"
+ [(unspec:P [(match_operand:P 0)
+ (match_operand:P 1)] UNSPEC_LOAD_GOT)])
+
+;; Lower-level instructions for loading an address from the GOT.
+;; We could use MEMs, but an unspec gives more optimization
+;; opportunities.
+
+(define_insn "load_got<mode>"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (unspec:P [(match_operand:P 1 "register_operand" "d")
+ (match_operand:P 2 "immediate_operand" "")]
+ UNSPEC_LOAD_GOT))]
+ ""
+ "<load>\t%0,%R2(%1)"
+ [(set_attr "got" "load")
+ (set_attr "mode" "<MODE>")])
+
+;; Instructions for adding the low 16 bits of an address to a register.
+;; Operand 2 is the address: mips_print_operand works out which relocation
+;; should be applied.
+
+(define_insn "*low<mode>"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (lo_sum:P (match_operand:P 1 "register_operand" "d")
+ (match_operand:P 2 "immediate_operand" "")))]
+ "!TARGET_MIPS16"
+ "<d>addiu\t%0,%1,%R2"
+ [(set_attr "alu_type" "add")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*low<mode>_mips16"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (lo_sum:P (match_operand:P 1 "register_operand" "0")
+ (match_operand:P 2 "immediate_operand" "")))]
+ "TARGET_MIPS16"
+ "<d>addiu\t%0,%R2"
+ [(set_attr "alu_type" "add")
+ (set_attr "mode" "<MODE>")
+ (set_attr "extended_mips16" "yes")])
+
+;; Expose MIPS16 uses of the global pointer after reload if the function
+;; is responsible for setting up the register itself.
+(define_split
+ [(set (match_operand:GPR 0 "d_operand")
+ (const:GPR (unspec:GPR [(const_int 0)] UNSPEC_GP)))]
+ "TARGET_MIPS16 && TARGET_USE_GOT && reload_completed"
+ [(set (match_dup 0) (match_dup 1))]
+ { operands[1] = pic_offset_table_rtx; })
+
+;; Allow combine to split complex const_int load sequences, using operand 2
+;; to store the intermediate results. See move_operand for details.
+(define_split
+ [(set (match_operand:GPR 0 "register_operand")
+ (match_operand:GPR 1 "splittable_const_int_operand"))
+ (clobber (match_operand:GPR 2 "register_operand"))]
+ ""
+ [(const_int 0)]
+{
+ mips_move_integer (operands[2], operands[0], INTVAL (operands[1]));
+ DONE;
+})
+
+;; Likewise, for symbolic operands.
+(define_split
+ [(set (match_operand:P 0 "register_operand")
+ (match_operand:P 1))
+ (clobber (match_operand:P 2 "register_operand"))]
+ "mips_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
+ [(set (match_dup 0) (match_dup 3))]
+{
+ mips_split_symbol (operands[2], operands[1],
+ MAX_MACHINE_MODE, &operands[3]);
+})
+
+;; 64-bit integer moves
+
+;; Unlike most other insns, the move insns can't be split with
+;; different predicates, because register spilling and other parts of
+;; the compiler, have memoized the insn number already.
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "")
+ (match_operand:DI 1 ""))]
+ ""
+{
+ if (mips_legitimize_move (DImode, operands[0], operands[1]))
+ DONE;
+})
+
+;; For mips16, we need a special case to handle storing $31 into
+;; memory, since we don't have a constraint to match $31. This
+;; instruction can be generated by save_restore_insns.
+
+(define_insn "*mov<mode>_ra"
+ [(set (match_operand:GPR 0 "stack_operand" "=m")
+ (reg:GPR RETURN_ADDR_REGNUM))]
+ "TARGET_MIPS16"
+ "<store>\t$31,%0"
+ [(set_attr "move_type" "store")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*movdi_32bit"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=d,d,d,m,*a,*a,*d,*f,*f,*d,*m,*B*C*D,*B*C*D,*d,*m")
+ (match_operand:DI 1 "move_operand" "d,i,m,d,*J,*d,*a,*J*d,*m,*f,*f,*d,*m,*B*C*D,*B*C*D"))]
+ "!TARGET_64BIT && !TARGET_MIPS16
+ && (register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,const,load,store,imul,mtlo,mflo,mtc,fpload,mfc,fpstore,mtc,fpload,mfc,fpstore")
+ (set (attr "mode")
+ (if_then_else (eq_attr "move_type" "imul")
+ (const_string "SI")
+ (const_string "DI")))])
+
+(define_insn "*movdi_32bit_mips16"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=d,y,d,d,d,d,m,*d")
+ (match_operand:DI 1 "move_operand" "d,d,y,K,N,m,d,*x"))]
+ "!TARGET_64BIT && TARGET_MIPS16
+ && (register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,move,move,const,constN,load,store,mflo")
+ (set_attr "mode" "DI")])
+
+(define_insn "*movdi_64bit"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=d,d,e,d,m,*f,*f,*d,*m,*a,*d,*B*C*D,*B*C*D,*d,*m")
+ (match_operand:DI 1 "move_operand" "d,Yd,Yf,m,dJ,*d*J,*m,*f,*f,*J*d,*a,*d,*m,*B*C*D,*B*C*D"))]
+ "TARGET_64BIT && !TARGET_MIPS16
+ && (register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,const,const,load,store,mtc,fpload,mfc,fpstore,mtlo,mflo,mtc,fpload,mfc,fpstore")
+ (set_attr "mode" "DI")])
+
+(define_insn "*movdi_64bit_mips16"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=d,y,d,d,d,d,d,d,m,*d")
+ (match_operand:DI 1 "move_operand" "d,d,y,K,N,Yd,kf,m,d,*a"))]
+ "TARGET_64BIT && TARGET_MIPS16
+ && (register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,move,move,const,constN,const,loadpool,load,store,mflo")
+ (set_attr "mode" "DI")])
+
+;; On the mips16, we can split ld $r,N($r) into an add and a load,
+;; when the original load is a 4 byte instruction but the add and the
+;; load are 2 2 byte instructions.
+
+(define_split
+ [(set (match_operand:DI 0 "d_operand")
+ (mem:DI (plus:DI (match_dup 0)
+ (match_operand:DI 1 "const_int_operand"))))]
+ "TARGET_64BIT && TARGET_MIPS16 && reload_completed
+ && !TARGET_DEBUG_D_MODE
+ && ((INTVAL (operands[1]) < 0
+ && INTVAL (operands[1]) >= -0x10)
+ || (INTVAL (operands[1]) >= 32 * 8
+ && INTVAL (operands[1]) <= 31 * 8 + 0x8)
+ || (INTVAL (operands[1]) >= 0
+ && INTVAL (operands[1]) < 32 * 8
+ && (INTVAL (operands[1]) & 7) != 0))"
+ [(set (match_dup 0) (plus:DI (match_dup 0) (match_dup 1)))
+ (set (match_dup 0) (mem:DI (plus:DI (match_dup 0) (match_dup 2))))]
+{
+ HOST_WIDE_INT val = INTVAL (operands[1]);
+
+ if (val < 0)
+ operands[2] = const0_rtx;
+ else if (val >= 32 * 8)
+ {
+ int off = val & 7;
+
+ operands[1] = GEN_INT (0x8 + off);
+ operands[2] = GEN_INT (val - off - 0x8);
+ }
+ else
+ {
+ int off = val & 7;
+
+ operands[1] = GEN_INT (off);
+ operands[2] = GEN_INT (val - off);
+ }
+})
+
+;; 32-bit Integer moves
+
+;; Unlike most other insns, the move insns can't be split with
+;; different predicates, because register spilling and other parts of
+;; the compiler, have memoized the insn number already.
+
+(define_expand "mov<mode>"
+ [(set (match_operand:IMOVE32 0 "")
+ (match_operand:IMOVE32 1 ""))]
+ ""
+{
+ if (mips_legitimize_move (<MODE>mode, operands[0], operands[1]))
+ DONE;
+})
+
+;; The difference between these two is whether or not ints are allowed
+;; in FP registers (off by default, use -mdebugh to enable).
+
+(define_insn "*mov<mode>_internal"
+ [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=d,!u,!u,d,e,!u,!ks,d,ZS,ZT,m,*f,*f,*d,*m,*d,*z,*a,*d,*B*C*D,*B*C*D,*d,*m")
+ (match_operand:IMOVE32 1 "move_operand" "d,J,Udb7,Yd,Yf,ZT,ZS,m,!ks,!u,dJ,*d*J,*m,*f,*f,*z,*d,*J*d,*a,*d,*m,*B*C*D,*B*C*D"))]
+ "!TARGET_MIPS16
+ && (register_operand (operands[0], <MODE>mode)
+ || reg_or_0_operand (operands[1], <MODE>mode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,move,const,const,const,load,load,load,store,store,store,mtc,fpload,mfc,fpstore,mfc,mtc,mtlo,mflo,mtc,fpload,mfc,fpstore")
+ (set_attr "compression" "all,micromips,micromips,*,*,micromips,micromips,*,micromips,micromips,*,*,*,*,*,*,*,*,*,*,*,*,*")
+ (set_attr "mode" "SI")])
+
+(define_insn "*mov<mode>_mips16"
+ [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=d,y,d,d,d,d,d,d,m,*d")
+ (match_operand:IMOVE32 1 "move_operand" "d,d,y,K,N,Yd,kf,m,d,*a"))]
+ "TARGET_MIPS16
+ && (register_operand (operands[0], <MODE>mode)
+ || register_operand (operands[1], <MODE>mode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,move,move,const,constN,const,loadpool,load,store,mflo")
+ (set_attr "mode" "SI")])
+
+;; On the mips16, we can split lw $r,N($r) into an add and a load,
+;; when the original load is a 4 byte instruction but the add and the
+;; load are 2 2 byte instructions.
+
+(define_split
+ [(set (match_operand:SI 0 "d_operand")
+ (mem:SI (plus:SI (match_dup 0)
+ (match_operand:SI 1 "const_int_operand"))))]
+ "TARGET_MIPS16 && reload_completed && !TARGET_DEBUG_D_MODE
+ && ((INTVAL (operands[1]) < 0
+ && INTVAL (operands[1]) >= -0x80)
+ || (INTVAL (operands[1]) >= 32 * 4
+ && INTVAL (operands[1]) <= 31 * 4 + 0x7c)
+ || (INTVAL (operands[1]) >= 0
+ && INTVAL (operands[1]) < 32 * 4
+ && (INTVAL (operands[1]) & 3) != 0))"
+ [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+ (set (match_dup 0) (mem:SI (plus:SI (match_dup 0) (match_dup 2))))]
+{
+ HOST_WIDE_INT val = INTVAL (operands[1]);
+
+ if (val < 0)
+ operands[2] = const0_rtx;
+ else if (val >= 32 * 4)
+ {
+ int off = val & 3;
+
+ operands[1] = GEN_INT (0x7c + off);
+ operands[2] = GEN_INT (val - off - 0x7c);
+ }
+ else
+ {
+ int off = val & 3;
+
+ operands[1] = GEN_INT (off);
+ operands[2] = GEN_INT (val - off);
+ }
+})
+
+;; On the mips16, we can split a load of certain constants into a load
+;; and an add. This turns a 4 byte instruction into 2 2 byte
+;; instructions.
+
+(define_split
+ [(set (match_operand:SI 0 "d_operand")
+ (match_operand:SI 1 "const_int_operand"))]
+ "TARGET_MIPS16 && reload_completed && !TARGET_DEBUG_D_MODE
+ && INTVAL (operands[1]) >= 0x100
+ && INTVAL (operands[1]) <= 0xff + 0x7f"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 2)))]
+{
+ int val = INTVAL (operands[1]);
+
+ operands[1] = GEN_INT (0xff);
+ operands[2] = GEN_INT (val - 0xff);
+})
+
+;; MIPS4 supports loading and storing a floating point register from
+;; the sum of two general registers. We use two versions for each of
+;; these four instructions: one where the two general registers are
+;; SImode, and one where they are DImode. This is because general
+;; registers will be in SImode when they hold 32-bit values, but,
+;; since the 32-bit values are always sign extended, the [ls][wd]xc1
+;; instructions will still work correctly.
+
+;; ??? Perhaps it would be better to support these instructions by
+;; modifying TARGET_LEGITIMATE_ADDRESS_P and friends. However, since
+;; these instructions can only be used to load and store floating
+;; point registers, that would probably cause trouble in reload.
+
+(define_insn "*<ANYF:loadx>_<P:mode>"
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
+ (mem:ANYF (plus:P (match_operand:P 1 "register_operand" "d")
+ (match_operand:P 2 "register_operand" "d"))))]
+ "ISA_HAS_LXC1_SXC1"
+ "<ANYF:loadx>\t%0,%1(%2)"
+ [(set_attr "type" "fpidxload")
+ (set_attr "mode" "<ANYF:UNITMODE>")])
+
+(define_insn "*<ANYF:storex>_<P:mode>"
+ [(set (mem:ANYF (plus:P (match_operand:P 1 "register_operand" "d")
+ (match_operand:P 2 "register_operand" "d")))
+ (match_operand:ANYF 0 "register_operand" "f"))]
+ "ISA_HAS_LXC1_SXC1"
+ "<ANYF:storex>\t%0,%1(%2)"
+ [(set_attr "type" "fpidxstore")
+ (set_attr "mode" "<ANYF:UNITMODE>")])
+
+;; Scaled indexed address load.
+;; Per md.texi, we only need to look for a pattern with multiply in the
+;; address expression, not shift.
+
+(define_insn "*lwxs"
+ [(set (match_operand:IMOVE32 0 "register_operand" "=d")
+ (mem:IMOVE32
+ (plus:P (mult:P (match_operand:P 1 "register_operand" "d")
+ (const_int 4))
+ (match_operand:P 2 "register_operand" "d"))))]
+ "ISA_HAS_LWXS"
+ "lwxs\t%0,%1(%2)"
+ [(set_attr "type" "load")
+ (set_attr "mode" "SI")])
+
+;; 16-bit Integer moves
+
+;; Unlike most other insns, the move insns can't be split with
+;; different predicates, because register spilling and other parts of
+;; the compiler, have memoized the insn number already.
+;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "")
+ (match_operand:HI 1 ""))]
+ ""
+{
+ if (mips_legitimize_move (HImode, operands[0], operands[1]))
+ DONE;
+})
+
+(define_insn "*movhi_internal"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=d,!u,d,!u,d,ZU,m,*a,*d")
+ (match_operand:HI 1 "move_operand" "d,J,I,ZU,m,!u,dJ,*d*J,*a"))]
+ "!TARGET_MIPS16
+ && (register_operand (operands[0], HImode)
+ || reg_or_0_operand (operands[1], HImode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,const,const,load,load,store,store,mtlo,mflo")
+ (set_attr "compression" "all,micromips,*,micromips,*,micromips,*,*,*")
+ (set_attr "mode" "HI")])
+
+(define_insn "*movhi_mips16"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=d,y,d,d,d,d,m,*d")
+ (match_operand:HI 1 "move_operand" "d,d,y,K,N,m,d,*a"))]
+ "TARGET_MIPS16
+ && (register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,move,move,const,constN,load,store,mflo")
+ (set_attr "mode" "HI")])
+
+;; On the mips16, we can split lh $r,N($r) into an add and a load,
+;; when the original load is a 4 byte instruction but the add and the
+;; load are 2 2 byte instructions.
+
+(define_split
+ [(set (match_operand:HI 0 "d_operand")
+ (mem:HI (plus:SI (match_dup 0)
+ (match_operand:SI 1 "const_int_operand"))))]
+ "TARGET_MIPS16 && reload_completed && !TARGET_DEBUG_D_MODE
+ && ((INTVAL (operands[1]) < 0
+ && INTVAL (operands[1]) >= -0x80)
+ || (INTVAL (operands[1]) >= 32 * 2
+ && INTVAL (operands[1]) <= 31 * 2 + 0x7e)
+ || (INTVAL (operands[1]) >= 0
+ && INTVAL (operands[1]) < 32 * 2
+ && (INTVAL (operands[1]) & 1) != 0))"
+ [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+ (set (match_dup 0) (mem:HI (plus:SI (match_dup 0) (match_dup 2))))]
+{
+ HOST_WIDE_INT val = INTVAL (operands[1]);
+
+ if (val < 0)
+ operands[2] = const0_rtx;
+ else if (val >= 32 * 2)
+ {
+ int off = val & 1;
+
+ operands[1] = GEN_INT (0x7e + off);
+ operands[2] = GEN_INT (val - off - 0x7e);
+ }
+ else
+ {
+ int off = val & 1;
+
+ operands[1] = GEN_INT (off);
+ operands[2] = GEN_INT (val - off);
+ }
+})
+
+;; 8-bit Integer moves
+
+;; Unlike most other insns, the move insns can't be split with
+;; different predicates, because register spilling and other parts of
+;; the compiler, have memoized the insn number already.
+;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "")
+ (match_operand:QI 1 ""))]
+ ""
+{
+ if (mips_legitimize_move (QImode, operands[0], operands[1]))
+ DONE;
+})
+
+(define_insn "*movqi_internal"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=d,!u,d,!u,d,ZV,m,*a,*d")
+ (match_operand:QI 1 "move_operand" "d,J,I,ZW,m,!u,dJ,*d*J,*a"))]
+ "!TARGET_MIPS16
+ && (register_operand (operands[0], QImode)
+ || reg_or_0_operand (operands[1], QImode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,const,const,load,load,store,store,mtlo,mflo")
+ (set_attr "compression" "all,micromips,*,micromips,*,micromips,*,*,*")
+ (set_attr "mode" "QI")])
+
+(define_insn "*movqi_mips16"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=d,y,d,d,d,d,m,*d")
+ (match_operand:QI 1 "move_operand" "d,d,y,K,N,m,d,*a"))]
+ "TARGET_MIPS16
+ && (register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,move,move,const,constN,load,store,mflo")
+ (set_attr "mode" "QI")])
+
+;; On the mips16, we can split lb $r,N($r) into an add and a load,
+;; when the original load is a 4 byte instruction but the add and the
+;; load are 2 2 byte instructions.
+
+(define_split
+ [(set (match_operand:QI 0 "d_operand")
+ (mem:QI (plus:SI (match_dup 0)
+ (match_operand:SI 1 "const_int_operand"))))]
+ "TARGET_MIPS16 && reload_completed && !TARGET_DEBUG_D_MODE
+ && ((INTVAL (operands[1]) < 0
+ && INTVAL (operands[1]) >= -0x80)
+ || (INTVAL (operands[1]) >= 32
+ && INTVAL (operands[1]) <= 31 + 0x7f))"
+ [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+ (set (match_dup 0) (mem:QI (plus:SI (match_dup 0) (match_dup 2))))]
+{
+ HOST_WIDE_INT val = INTVAL (operands[1]);
+
+ if (val < 0)
+ operands[2] = const0_rtx;
+ else
+ {
+ operands[1] = GEN_INT (0x7f);
+ operands[2] = GEN_INT (val - 0x7f);
+ }
+})
+
+;; 32-bit floating point moves
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "")
+ (match_operand:SF 1 ""))]
+ ""
+{
+ if (mips_legitimize_move (SFmode, operands[0], operands[1]))
+ DONE;
+})
+
+(define_insn "*movsf_hardfloat"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*d,*d,*d,*m")
+ (match_operand:SF 1 "move_operand" "f,G,m,f,G,*d,*f,*G*d,*m,*d"))]
+ "TARGET_HARD_FLOAT
+ && (register_operand (operands[0], SFmode)
+ || reg_or_0_operand (operands[1], SFmode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
+ (set_attr "mode" "SF")])
+
+(define_insn "*movsf_softfloat"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=d,d,m")
+ (match_operand:SF 1 "move_operand" "Gd,m,d"))]
+ "TARGET_SOFT_FLOAT && !TARGET_MIPS16
+ && (register_operand (operands[0], SFmode)
+ || reg_or_0_operand (operands[1], SFmode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,load,store")
+ (set_attr "mode" "SF")])
+
+(define_insn "*movsf_mips16"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=d,y,d,d,m")
+ (match_operand:SF 1 "move_operand" "d,d,y,m,d"))]
+ "TARGET_MIPS16
+ && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,move,move,load,store")
+ (set_attr "mode" "SF")])
+
+;; 64-bit floating point moves
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "")
+ (match_operand:DF 1 ""))]
+ ""
+{
+ if (mips_legitimize_move (DFmode, operands[0], operands[1]))
+ DONE;
+})
+
+(define_insn "*movdf_hardfloat"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*d,*d,*d,*m")
+ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*d,*f,*d*G,*m,*d"))]
+ "TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
+ && (register_operand (operands[0], DFmode)
+ || reg_or_0_operand (operands[1], DFmode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
+ (set_attr "mode" "DF")])
+
+(define_insn "*movdf_softfloat"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=d,d,m")
+ (match_operand:DF 1 "move_operand" "dG,m,dG"))]
+ "(TARGET_SOFT_FLOAT || TARGET_SINGLE_FLOAT) && !TARGET_MIPS16
+ && (register_operand (operands[0], DFmode)
+ || reg_or_0_operand (operands[1], DFmode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,load,store")
+ (set_attr "mode" "DF")])
+
+(define_insn "*movdf_mips16"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=d,y,d,d,m")
+ (match_operand:DF 1 "move_operand" "d,d,y,m,d"))]
+ "TARGET_MIPS16
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,move,move,load,store")
+ (set_attr "mode" "DF")])
+
+;; 128-bit integer moves
+
+(define_expand "movti"
+ [(set (match_operand:TI 0)
+ (match_operand:TI 1))]
+ "TARGET_64BIT"
+{
+ if (mips_legitimize_move (TImode, operands[0], operands[1]))
+ DONE;
+})
+
+(define_insn "*movti"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=d,d,d,m,*a,*a,*d")
+ (match_operand:TI 1 "move_operand" "d,i,m,dJ,*J,*d,*a"))]
+ "TARGET_64BIT
+ && !TARGET_MIPS16
+ && (register_operand (operands[0], TImode)
+ || reg_or_0_operand (operands[1], TImode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "move,const,load,store,imul,mtlo,mflo")
+ (set (attr "mode")
+ (if_then_else (eq_attr "move_type" "imul")
+ (const_string "SI")
+ (const_string "TI")))])
+
+(define_insn "*movti_mips16"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=d,y,d,d,d,d,m,*d")
+ (match_operand:TI 1 "move_operand" "d,d,y,K,N,m,d,*a"))]
+ "TARGET_64BIT
+ && TARGET_MIPS16
+ && (register_operand (operands[0], TImode)
+ || register_operand (operands[1], TImode))"
+ "#"
+ [(set_attr "move_type" "move,move,move,const,constN,load,store,mflo")
+ (set_attr "mode" "TI")])
+
+;; 128-bit floating point moves
+
+(define_expand "movtf"
+ [(set (match_operand:TF 0)
+ (match_operand:TF 1))]
+ "TARGET_64BIT"
+{
+ if (mips_legitimize_move (TFmode, operands[0], operands[1]))
+ DONE;
+})
+
+;; This pattern handles both hard- and soft-float cases.
+(define_insn "*movtf"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=d,d,m,f,d,f,m")
+ (match_operand:TF 1 "move_operand" "dG,m,dG,dG,f,m,f"))]
+ "TARGET_64BIT
+ && !TARGET_MIPS16
+ && (register_operand (operands[0], TFmode)
+ || reg_or_0_operand (operands[1], TFmode))"
+ "#"
+ [(set_attr "move_type" "move,load,store,mtc,mfc,fpload,fpstore")
+ (set_attr "mode" "TF")])
+
+(define_insn "*movtf_mips16"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=d,y,d,d,m")
+ (match_operand:TF 1 "move_operand" "d,d,y,m,d"))]
+ "TARGET_64BIT
+ && TARGET_MIPS16
+ && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode))"
+ "#"
+ [(set_attr "move_type" "move,move,move,load,store")
+ (set_attr "mode" "TF")])
+
+(define_split
+ [(set (match_operand:MOVE64 0 "nonimmediate_operand")
+ (match_operand:MOVE64 1 "move_operand"))]
+ "reload_completed && mips_split_move_insn_p (operands[0], operands[1], insn)"
+ [(const_int 0)]
+{
+ mips_split_move_insn (operands[0], operands[1], curr_insn);
+ DONE;
+})
+
+(define_split
+ [(set (match_operand:MOVE128 0 "nonimmediate_operand")
+ (match_operand:MOVE128 1 "move_operand"))]
+ "reload_completed && mips_split_move_insn_p (operands[0], operands[1], insn)"
+ [(const_int 0)]
+{
+ mips_split_move_insn (operands[0], operands[1], curr_insn);
+ DONE;
+})
+
+;; When generating mips16 code, split moves of negative constants into
+;; a positive "li" followed by a negation.
+(define_split
+ [(set (match_operand 0 "d_operand")
+ (match_operand 1 "const_int_operand"))]
+ "TARGET_MIPS16 && reload_completed && INTVAL (operands[1]) < 0"
+ [(set (match_dup 2)
+ (match_dup 3))
+ (set (match_dup 2)
+ (neg:SI (match_dup 2)))]
+{
+ operands[2] = gen_lowpart (SImode, operands[0]);
+ operands[3] = GEN_INT (-INTVAL (operands[1]));
+})
+
+;; 64-bit paired-single floating point moves
+
+(define_expand "movv2sf"
+ [(set (match_operand:V2SF 0)
+ (match_operand:V2SF 1))]
+ "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT"
+{
+ if (mips_legitimize_move (V2SFmode, operands[0], operands[1]))
+ DONE;
+})
+
+(define_insn "*movv2sf"
+ [(set (match_operand:V2SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*d,*d,*d,*m")
+ (match_operand:V2SF 1 "move_operand" "f,YG,m,f,YG,*d,*f,*d*YG,*m,*d"))]
+ "TARGET_HARD_FLOAT
+ && TARGET_PAIRED_SINGLE_FLOAT
+ && (register_operand (operands[0], V2SFmode)
+ || reg_or_0_operand (operands[1], V2SFmode))"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
+ (set_attr "mode" "DF")])
+
+;; Extract the high part of a HI/LO value. See mips_hard_regno_mode_ok_p
+;; for the reason why we can't just use (reg:GPR HI_REGNUM).
+;;
+;; When generating VR4120 or VR4130 code, we use MACCHI and DMACCHI
+;; instead of MFHI. This avoids both the normal MIPS III hi/lo hazards
+;; and the errata related to -mfix-vr4130.
+(define_insn "mfhi<GPR:mode>_<HILO:mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (unspec:GPR [(match_operand:HILO 1 "hilo_operand" "x")]
+ UNSPEC_MFHI))]
+ ""
+ { return ISA_HAS_MACCHI ? "<GPR:d>macchi\t%0,%.,%." : "mfhi\t%0"; }
+ [(set_attr "type" "mfhi")
+ (set_attr "mode" "<GPR:MODE>")])
+
+;; Set the high part of a HI/LO value, given that the low part has
+;; already been set. See mips_hard_regno_mode_ok_p for the reason
+;; why we can't just use (reg:GPR HI_REGNUM).
+(define_insn "mthi<GPR:mode>_<HILO:mode>"
+ [(set (match_operand:HILO 0 "register_operand" "=x")
+ (unspec:HILO [(match_operand:GPR 1 "reg_or_0_operand" "dJ")
+ (match_operand:GPR 2 "register_operand" "l")]
+ UNSPEC_MTHI))]
+ ""
+ "mthi\t%z1"
+ [(set_attr "type" "mthi")
+ (set_attr "mode" "SI")])
+
+;; Emit a doubleword move in which exactly one of the operands is
+;; a floating-point register. We can't just emit two normal moves
+;; because of the constraints imposed by the FPU register model;
+;; see mips_cannot_change_mode_class for details. Instead, we keep
+;; the FPR whole and use special patterns to refer to each word of
+;; the other operand.
+
+(define_expand "move_doubleword_fpr<mode>"
+ [(set (match_operand:SPLITF 0)
+ (match_operand:SPLITF 1))]
+ ""
+{
+ if (FP_REG_RTX_P (operands[0]))
+ {
+ rtx low = mips_subword (operands[1], 0);
+ rtx high = mips_subword (operands[1], 1);
+ emit_insn (gen_load_low<mode> (operands[0], low));
+ if (TARGET_FLOAT64 && !TARGET_64BIT)
+ emit_insn (gen_mthc1<mode> (operands[0], high, operands[0]));
+ else
+ emit_insn (gen_load_high<mode> (operands[0], high, operands[0]));
+ }
+ else
+ {
+ rtx low = mips_subword (operands[0], 0);
+ rtx high = mips_subword (operands[0], 1);
+ emit_insn (gen_store_word<mode> (low, operands[1], const0_rtx));
+ if (TARGET_FLOAT64 && !TARGET_64BIT)
+ emit_insn (gen_mfhc1<mode> (high, operands[1]));
+ else
+ emit_insn (gen_store_word<mode> (high, operands[1], const1_rtx));
+ }
+ DONE;
+})
+
+;; Load the low word of operand 0 with operand 1.
+(define_insn "load_low<mode>"
+ [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
+ (unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "dJ,m")]
+ UNSPEC_LOAD_LOW))]
+ "TARGET_HARD_FLOAT"
+{
+ operands[0] = mips_subword (operands[0], 0);
+ return mips_output_move (operands[0], operands[1]);
+}
+ [(set_attr "move_type" "mtc,fpload")
+ (set_attr "mode" "<HALFMODE>")])
+
+;; Load the high word of operand 0 from operand 1, preserving the value
+;; in the low word.
+(define_insn "load_high<mode>"
+ [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
+ (unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "dJ,m")
+ (match_operand:SPLITF 2 "register_operand" "0,0")]
+ UNSPEC_LOAD_HIGH))]
+ "TARGET_HARD_FLOAT"
+{
+ operands[0] = mips_subword (operands[0], 1);
+ return mips_output_move (operands[0], operands[1]);
+}
+ [(set_attr "move_type" "mtc,fpload")
+ (set_attr "mode" "<HALFMODE>")])
+
+;; Store one word of operand 1 in operand 0. Operand 2 is 1 to store the
+;; high word and 0 to store the low word.
+(define_insn "store_word<mode>"
+ [(set (match_operand:<HALFMODE> 0 "nonimmediate_operand" "=d,m")
+ (unspec:<HALFMODE> [(match_operand:SPLITF 1 "register_operand" "f,f")
+ (match_operand 2 "const_int_operand")]
+ UNSPEC_STORE_WORD))]
+ "TARGET_HARD_FLOAT"
+{
+ operands[1] = mips_subword (operands[1], INTVAL (operands[2]));
+ return mips_output_move (operands[0], operands[1]);
+}
+ [(set_attr "move_type" "mfc,fpstore")
+ (set_attr "mode" "<HALFMODE>")])
+
+;; Move operand 1 to the high word of operand 0 using mthc1, preserving the
+;; value in the low word.
+(define_insn "mthc1<mode>"
+ [(set (match_operand:SPLITF 0 "register_operand" "=f")
+ (unspec:SPLITF [(match_operand:<HALFMODE> 1 "reg_or_0_operand" "dJ")
+ (match_operand:SPLITF 2 "register_operand" "0")]
+ UNSPEC_MTHC1))]
+ "TARGET_HARD_FLOAT && ISA_HAS_MXHC1"
+ "mthc1\t%z1,%0"
+ [(set_attr "move_type" "mtc")
+ (set_attr "mode" "<HALFMODE>")])
+
+;; Move high word of operand 1 to operand 0 using mfhc1.
+(define_insn "mfhc1<mode>"
+ [(set (match_operand:<HALFMODE> 0 "register_operand" "=d")
+ (unspec:<HALFMODE> [(match_operand:SPLITF 1 "register_operand" "f")]
+ UNSPEC_MFHC1))]
+ "TARGET_HARD_FLOAT && ISA_HAS_MXHC1"
+ "mfhc1\t%0,%1"
+ [(set_attr "move_type" "mfc")
+ (set_attr "mode" "<HALFMODE>")])
+
+;; Move a constant that satisfies CONST_GP_P into operand 0.
+(define_expand "load_const_gp_<mode>"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (const:P (unspec:P [(const_int 0)] UNSPEC_GP)))])
+
+;; Insn to initialize $gp for n32/n64 abicalls. Operand 0 is the offset
+;; of _gp from the start of this function. Operand 1 is the incoming
+;; function address.
+(define_insn_and_split "loadgp_newabi_<mode>"
+ [(set (match_operand:P 0 "register_operand" "=&d")
+ (unspec:P [(match_operand:P 1)
+ (match_operand:P 2 "register_operand" "d")]
+ UNSPEC_LOADGP))]
+ "mips_current_loadgp_style () == LOADGP_NEWABI"
+ { return mips_must_initialize_gp_p () ? "#" : ""; }
+ "&& mips_must_initialize_gp_p ()"
+ [(set (match_dup 0) (match_dup 3))
+ (set (match_dup 0) (match_dup 4))
+ (set (match_dup 0) (match_dup 5))]
+{
+ operands[3] = gen_rtx_HIGH (Pmode, operands[1]);
+ operands[4] = gen_rtx_PLUS (Pmode, operands[0], operands[2]);
+ operands[5] = gen_rtx_LO_SUM (Pmode, operands[0], operands[1]);
+}
+ [(set_attr "type" "ghost")])
+
+;; Likewise, for -mno-shared code. Operand 0 is the __gnu_local_gp symbol.
+(define_insn_and_split "loadgp_absolute_<mode>"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (unspec:P [(match_operand:P 1)] UNSPEC_LOADGP))]
+ "mips_current_loadgp_style () == LOADGP_ABSOLUTE"
+ { return mips_must_initialize_gp_p () ? "#" : ""; }
+ "&& mips_must_initialize_gp_p ()"
+ [(const_int 0)]
+{
+ mips_emit_move (operands[0], operands[1]);
+ DONE;
+}
+ [(set_attr "type" "ghost")])
+
+;; This blockage instruction prevents the gp load from being
+;; scheduled after an implicit use of gp. It also prevents
+;; the load from being deleted as dead.
+(define_insn "loadgp_blockage"
+ [(unspec_volatile [(reg:SI 28)] UNSPEC_BLOCKAGE)]
+ ""
+ ""
+ [(set_attr "type" "ghost")])
+
+;; Initialize $gp for RTP PIC. Operand 0 is the __GOTT_BASE__ symbol
+;; and operand 1 is the __GOTT_INDEX__ symbol.
+(define_insn_and_split "loadgp_rtp_<mode>"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (unspec:P [(match_operand:P 1 "symbol_ref_operand")
+ (match_operand:P 2 "symbol_ref_operand")]
+ UNSPEC_LOADGP))]
+ "mips_current_loadgp_style () == LOADGP_RTP"
+ { return mips_must_initialize_gp_p () ? "#" : ""; }
+ "&& mips_must_initialize_gp_p ()"
+ [(set (match_dup 0) (high:P (match_dup 3)))
+ (set (match_dup 0) (unspec:P [(match_dup 0)
+ (match_dup 3)] UNSPEC_LOAD_GOT))
+ (set (match_dup 0) (unspec:P [(match_dup 0)
+ (match_dup 4)] UNSPEC_LOAD_GOT))]
+{
+ operands[3] = mips_unspec_address (operands[1], SYMBOL_ABSOLUTE);
+ operands[4] = mips_unspec_address (operands[2], SYMBOL_HALF);
+}
+ [(set_attr "type" "ghost")])
+
+;; Initialize the global pointer for MIPS16 code. Operand 0 is the
+;; global pointer and operand 1 is the MIPS16 register that holds
+;; the required value.
+(define_insn_and_split "copygp_mips16_<mode>"
+ [(set (match_operand:P 0 "register_operand" "=y")
+ (unspec:P [(match_operand:P 1 "register_operand" "d")]
+ UNSPEC_COPYGP))]
+ "TARGET_MIPS16"
+ { return mips_must_initialize_gp_p () ? "#" : ""; }
+ "&& mips_must_initialize_gp_p ()"
+ [(set (match_dup 0) (match_dup 1))]
+ ""
+ [(set_attr "type" "ghost")])
+
+;; A placeholder for where the cprestore instruction should go,
+;; if we decide we need one. Operand 0 and operand 1 are as for
+;; "cprestore". Operand 2 is a register that holds the gp value.
+;;
+;; The "cprestore" pattern requires operand 2 to be pic_offset_table_rtx,
+;; otherwise any register that holds the correct value will do.
+(define_insn_and_split "potential_cprestore_<mode>"
+ [(set (match_operand:P 0 "cprestore_save_slot_operand" "=X,X")
+ (unspec:P [(match_operand:P 1 "const_int_operand" "I,i")
+ (match_operand:P 2 "register_operand" "d,d")]
+ UNSPEC_POTENTIAL_CPRESTORE))
+ (clobber (match_operand:P 3 "scratch_operand" "=X,&d"))]
+ "!TARGET_CPRESTORE_DIRECTIVE || operands[2] == pic_offset_table_rtx"
+ { return mips_must_initialize_gp_p () ? "#" : ""; }
+ "mips_must_initialize_gp_p ()"
+ [(const_int 0)]
+{
+ mips_save_gp_to_cprestore_slot (operands[0], operands[1],
+ operands[2], operands[3]);
+ DONE;
+}
+ [(set_attr "type" "ghost")])
+
+;; Emit a .cprestore directive, which normally expands to a single store
+;; instruction. Operand 0 is a (possibly illegitimate) sp-based MEM
+;; for the cprestore slot. Operand 1 is the offset of the slot from
+;; the stack pointer. (This is redundant with operand 0, but it makes
+;; things a little simpler.)
+(define_insn "cprestore_<mode>"
+ [(set (match_operand:P 0 "cprestore_save_slot_operand" "=X,X")
+ (unspec:P [(match_operand:P 1 "const_int_operand" "I,i")
+ (reg:P 28)]
+ UNSPEC_CPRESTORE))]
+ "TARGET_CPRESTORE_DIRECTIVE"
+{
+ if (mips_nomacro.nesting_level > 0 && which_alternative == 1)
+ return ".set\tmacro\;.cprestore\t%1\;.set\tnomacro";
+ else
+ return ".cprestore\t%1";
+}
+ [(set_attr "type" "store")
+ (set_attr "insn_count" "1,3")])
+
+(define_insn "use_cprestore_<mode>"
+ [(set (reg:P CPRESTORE_SLOT_REGNUM)
+ (match_operand:P 0 "cprestore_load_slot_operand"))]
+ ""
+ ""
+ [(set_attr "type" "ghost")])
+
+;; Expand in-line code to clear the instruction cache between operand[0] and
+;; operand[1].
+(define_expand "clear_cache"
+ [(match_operand 0 "pmode_register_operand")
+ (match_operand 1 "pmode_register_operand")]
+ ""
+ "
+{
+ if (TARGET_SYNCI)
+ {
+ mips_expand_synci_loop (operands[0], operands[1]);
+ emit_insn (gen_sync ());
+ emit_insn (PMODE_INSN (gen_clear_hazard, ()));
+ }
+ else if (mips_cache_flush_func && mips_cache_flush_func[0])
+ {
+ rtx len = gen_reg_rtx (Pmode);
+ emit_insn (gen_sub3_insn (len, operands[1], operands[0]));
+ MIPS_ICACHE_SYNC (operands[0], len);
+ }
+ DONE;
+}")
+
+(define_insn "sync"
+ [(unspec_volatile [(const_int 0)] UNSPEC_SYNC)]
+ "GENERATE_SYNC"
+ { return mips_output_sync (); })
+
+(define_insn "synci"
+ [(unspec_volatile [(match_operand 0 "pmode_register_operand" "d")]
+ UNSPEC_SYNCI)]
+ "TARGET_SYNCI"
+ "synci\t0(%0)")
+
+(define_insn "rdhwr_synci_step_<mode>"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (unspec_volatile [(const_int 1)]
+ UNSPEC_RDHWR))]
+ "ISA_HAS_SYNCI"
+ "rdhwr\t%0,$1")
+
+(define_insn "clear_hazard_<mode>"
+ [(unspec_volatile [(const_int 0)] UNSPEC_CLEAR_HAZARD)
+ (clobber (reg:P RETURN_ADDR_REGNUM))]
+ "ISA_HAS_SYNCI"
+{
+ return "%(%<bal\t1f\n"
+ "\tnop\n"
+ "1:\t<d>addiu\t$31,$31,12\n"
+ "\tjr.hb\t$31\n"
+ "\tnop%>%)";
+}
+ [(set_attr "insn_count" "5")])
+
+;; Cache operations for R4000-style caches.
+(define_insn "mips_cache"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK [(match_operand:SI 0 "const_int_operand")
+ (match_operand:QI 1 "address_operand" "p")]
+ UNSPEC_MIPS_CACHE))]
+ "ISA_HAS_CACHE"
+ "cache\t%X0,%a1")
+
+;; Similar, but with the operands hard-coded to an R10K cache barrier
+;; operation. We keep the pattern distinct so that we can identify
+;; cache operations inserted by -mr10k-cache-barrier=, and so that
+;; the operation is never inserted into a delay slot.
+(define_insn "r10k_cache_barrier"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK [(const_int 0)] UNSPEC_R10K_CACHE_BARRIER))]
+ "ISA_HAS_CACHE"
+ "cache\t0x14,0(%$)"
+ [(set_attr "can_delay" "no")])
+
+;; Block moves, see mips.c for more details.
+;; Argument 0 is the destination
+;; Argument 1 is the source
+;; Argument 2 is the length
+;; Argument 3 is the alignment
+
+(define_expand "movmemsi"
+ [(parallel [(set (match_operand:BLK 0 "general_operand")
+ (match_operand:BLK 1 "general_operand"))
+ (use (match_operand:SI 2 ""))
+ (use (match_operand:SI 3 "const_int_operand"))])]
+ "!TARGET_MIPS16 && !TARGET_MEMCPY"
+{
+ if (mips_expand_block_move (operands[0], operands[1], operands[2]))
+ DONE;
+ else
+ FAIL;
+})
+
+;;
+;; ....................
+;;
+;; SHIFTS
+;;
+;; ....................
+
+(define_expand "<optab><mode>3"
+ [(set (match_operand:GPR 0 "register_operand")
+ (any_shift:GPR (match_operand:GPR 1 "register_operand")
+ (match_operand:SI 2 "arith_operand")))]
+ ""
+{
+ /* On the mips16, a shift of more than 8 is a four byte instruction,
+ so, for a shift between 8 and 16, it is just as fast to do two
+ shifts of 8 or less. If there is a lot of shifting going on, we
+ may win in CSE. Otherwise combine will put the shifts back
+ together again. This can be called by mips_function_arg, so we must
+ be careful not to allocate a new register if we've reached the
+ reload pass. */
+ if (TARGET_MIPS16
+ && optimize
+ && CONST_INT_P (operands[2])
+ && INTVAL (operands[2]) > 8
+ && INTVAL (operands[2]) <= 16
+ && !reload_in_progress
+ && !reload_completed)
+ {
+ rtx temp = gen_reg_rtx (<MODE>mode);
+
+ emit_insn (gen_<optab><mode>3 (temp, operands[1], GEN_INT (8)));
+ emit_insn (gen_<optab><mode>3 (operands[0], temp,
+ GEN_INT (INTVAL (operands[2]) - 8)));
+ DONE;
+ }
+})
+
+(define_insn "*<optab><mode>3"
+ [(set (match_operand:GPR 0 "register_operand" "=!u,d")
+ (any_shift:GPR (match_operand:GPR 1 "register_operand" "!u,d")
+ (match_operand:SI 2 "arith_operand" "Uib3,dI")))]
+ "!TARGET_MIPS16"
+{
+ if (CONST_INT_P (operands[2]))
+ operands[2] = GEN_INT (INTVAL (operands[2])
+ & (GET_MODE_BITSIZE (<MODE>mode) - 1));
+
+ return "<d><insn>\t%0,%1,%2";
+}
+ [(set_attr "type" "shift")
+ (set_attr "compression" "<shift_compression>,none")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*<optab>si3_extend"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (sign_extend:DI
+ (any_shift:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "arith_operand" "dI"))))]
+ "TARGET_64BIT && !TARGET_MIPS16"
+{
+ if (CONST_INT_P (operands[2]))
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+
+ return "<insn>\t%0,%1,%2";
+}
+ [(set_attr "type" "shift")
+ (set_attr "mode" "SI")])
+
+(define_insn "*<optab>si3_mips16"
+ [(set (match_operand:SI 0 "register_operand" "=d,d,d")
+ (any_shift:SI (match_operand:SI 1 "register_operand" "0,d,d")
+ (match_operand:SI 2 "arith_operand" "d,Uib3,I")))]
+ "TARGET_MIPS16"
+{
+ if (which_alternative == 0)
+ return "<insn>\t%0,%2";
+
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+ return "<insn>\t%0,%1,%2";
+}
+ [(set_attr "type" "shift")
+ (set_attr "mode" "SI")
+ (set_attr "extended_mips16" "no,no,yes")])
+
+;; We need separate DImode MIPS16 patterns because of the irregularity
+;; of right shifts.
+(define_insn "*ashldi3_mips16"
+ [(set (match_operand:DI 0 "register_operand" "=d,d,d")
+ (ashift:DI (match_operand:DI 1 "register_operand" "0,d,d")
+ (match_operand:SI 2 "arith_operand" "d,Uib3,I")))]
+ "TARGET_64BIT && TARGET_MIPS16"
+{
+ if (which_alternative == 0)
+ return "dsll\t%0,%2";
+
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
+ return "dsll\t%0,%1,%2";
+}
+ [(set_attr "type" "shift")
+ (set_attr "mode" "DI")
+ (set_attr "extended_mips16" "no,no,yes")])
+
+(define_insn "*ashrdi3_mips16"
+ [(set (match_operand:DI 0 "register_operand" "=d,d,d")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "0,0,0")
+ (match_operand:SI 2 "arith_operand" "d,Uib3,I")))]
+ "TARGET_64BIT && TARGET_MIPS16"
+{
+ if (CONST_INT_P (operands[2]))
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
+
+ return "dsra\t%0,%2";
+}
+ [(set_attr "type" "shift")
+ (set_attr "mode" "DI")
+ (set_attr "extended_mips16" "no,no,yes")])
+
+(define_insn "*lshrdi3_mips16"
+ [(set (match_operand:DI 0 "register_operand" "=d,d,d")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "0,0,0")
+ (match_operand:SI 2 "arith_operand" "d,Uib3,I")))]
+ "TARGET_64BIT && TARGET_MIPS16"
+{
+ if (CONST_INT_P (operands[2]))
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
+
+ return "dsrl\t%0,%2";
+}
+ [(set_attr "type" "shift")
+ (set_attr "mode" "DI")
+ (set_attr "extended_mips16" "no,no,yes")])
+
+;; On the mips16, we can split a 4 byte shift into 2 2 byte shifts.
+
+(define_split
+ [(set (match_operand:GPR 0 "d_operand")
+ (any_shift:GPR (match_operand:GPR 1 "d_operand")
+ (match_operand:GPR 2 "const_int_operand")))]
+ "TARGET_MIPS16 && reload_completed && !TARGET_DEBUG_D_MODE
+ && INTVAL (operands[2]) > 8
+ && INTVAL (operands[2]) <= 16"
+ [(set (match_dup 0) (any_shift:GPR (match_dup 1) (const_int 8)))
+ (set (match_dup 0) (any_shift:GPR (match_dup 0) (match_dup 2)))]
+ { operands[2] = GEN_INT (INTVAL (operands[2]) - 8); })
+
+;; If we load a byte on the mips16 as a bitfield, the resulting
+;; sequence of instructions is too complicated for combine, because it
+;; involves four instructions: a load, a shift, a constant load into a
+;; register, and an and (the key problem here is that the mips16 does
+;; not have and immediate). We recognize a shift of a load in order
+;; to make it simple enough for combine to understand.
+;;
+;; The instruction count here is the worst case.
+(define_insn_and_split ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (lshiftrt:SI (match_operand:SI 1 "memory_operand" "m")
+ (match_operand:SI 2 "immediate_operand" "I")))]
+ "TARGET_MIPS16"
+ "#"
+ ""
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (lshiftrt:SI (match_dup 0) (match_dup 2)))]
+ ""
+ [(set_attr "type" "load")
+ (set_attr "mode" "SI")
+ (set (attr "insn_count")
+ (symbol_ref "mips_load_store_insns (operands[1], insn) + 2"))])
+
+(define_insn "rotr<mode>3"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (rotatert:GPR (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:SI 2 "arith_operand" "dI")))]
+ "ISA_HAS_ROR"
+{
+ if (CONST_INT_P (operands[2]))
+ gcc_assert (INTVAL (operands[2]) >= 0
+ && INTVAL (operands[2]) < GET_MODE_BITSIZE (<MODE>mode));
+
+ return "<d>ror\t%0,%1,%2";
+}
+ [(set_attr "type" "shift")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "bswaphi2"
+ [(set (match_operand:HI 0 "register_operand" "=d")
+ (bswap:HI (match_operand:HI 1 "register_operand" "d")))]
+ "ISA_HAS_WSBH"
+ "wsbh\t%0,%1"
+ [(set_attr "type" "shift")])
+
+(define_insn_and_split "bswapsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (bswap:SI (match_operand:SI 1 "register_operand" "d")))]
+ "ISA_HAS_WSBH && ISA_HAS_ROR"
+ "#"
+ ""
+ [(set (match_dup 0) (unspec:SI [(match_dup 1)] UNSPEC_WSBH))
+ (set (match_dup 0) (rotatert:SI (match_dup 0) (const_int 16)))]
+ ""
+ [(set_attr "insn_count" "2")])
+
+(define_insn_and_split "bswapdi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (bswap:DI (match_operand:DI 1 "register_operand" "d")))]
+ "TARGET_64BIT && ISA_HAS_WSBH"
+ "#"
+ ""
+ [(set (match_dup 0) (unspec:DI [(match_dup 1)] UNSPEC_DSBH))
+ (set (match_dup 0) (unspec:DI [(match_dup 0)] UNSPEC_DSHD))]
+ ""
+ [(set_attr "insn_count" "2")])
+
+(define_insn "wsbh"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "d")] UNSPEC_WSBH))]
+ "ISA_HAS_WSBH"
+ "wsbh\t%0,%1"
+ [(set_attr "type" "shift")])
+
+(define_insn "dsbh"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "d")] UNSPEC_DSBH))]
+ "TARGET_64BIT && ISA_HAS_WSBH"
+ "dsbh\t%0,%1"
+ [(set_attr "type" "shift")])
+
+(define_insn "dshd"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "d")] UNSPEC_DSHD))]
+ "TARGET_64BIT && ISA_HAS_WSBH"
+ "dshd\t%0,%1"
+ [(set_attr "type" "shift")])
+
+;;
+;; ....................
+;;
+;; CONDITIONAL BRANCHES
+;;
+;; ....................
+
+;; Conditional branches on floating-point equality tests.
+
+(define_insn "*branch_fp"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "equality_operator"
+ [(match_operand:CC 2 "register_operand" "z")
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_HARD_FLOAT"
+{
+ return mips_output_conditional_branch (insn, operands,
+ MIPS_BRANCH ("b%F1", "%Z2%0"),
+ MIPS_BRANCH ("b%W1", "%Z2%0"));
+}
+ [(set_attr "type" "branch")])
+
+(define_insn "*branch_fp_inverted"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "equality_operator"
+ [(match_operand:CC 2 "register_operand" "z")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_HARD_FLOAT"
+{
+ return mips_output_conditional_branch (insn, operands,
+ MIPS_BRANCH ("b%W1", "%Z2%0"),
+ MIPS_BRANCH ("b%F1", "%Z2%0"));
+}
+ [(set_attr "type" "branch")])
+
+;; Conditional branches on ordered comparisons with zero.
+
+(define_insn "*branch_order<mode>"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "order_operator"
+ [(match_operand:GPR 2 "register_operand" "d")
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "!TARGET_MIPS16"
+ { return mips_output_order_conditional_branch (insn, operands, false); }
+ [(set_attr "type" "branch")])
+
+(define_insn "*branch_order<mode>_inverted"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "order_operator"
+ [(match_operand:GPR 2 "register_operand" "d")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "!TARGET_MIPS16"
+ { return mips_output_order_conditional_branch (insn, operands, true); }
+ [(set_attr "type" "branch")])
+
+;; Conditional branch on equality comparison.
+
+(define_insn "*branch_equality<mode>"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "equality_operator"
+ [(match_operand:GPR 2 "register_operand" "d")
+ (match_operand:GPR 3 "reg_or_0_operand" "dJ")])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "!TARGET_MIPS16"
+{
+ /* For a simple BNEZ or BEQZ microMIPS branch. */
+ if (TARGET_MICROMIPS
+ && operands[3] == const0_rtx
+ && get_attr_length (insn) <= 8)
+ return mips_output_conditional_branch (insn, operands,
+ "%*b%C1z%:\t%2,%0",
+ "%*b%N1z%:\t%2,%0");
+
+ return mips_output_conditional_branch (insn, operands,
+ MIPS_BRANCH ("b%C1", "%2,%z3,%0"),
+ MIPS_BRANCH ("b%N1", "%2,%z3,%0"));
+}
+ [(set_attr "type" "branch")])
+
+(define_insn "*branch_equality<mode>_inverted"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "equality_operator"
+ [(match_operand:GPR 2 "register_operand" "d")
+ (match_operand:GPR 3 "reg_or_0_operand" "dJ")])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "!TARGET_MIPS16"
+{
+ /* For a simple BNEZ or BEQZ microMIPS branch. */
+ if (TARGET_MICROMIPS
+ && operands[3] == const0_rtx
+ && get_attr_length (insn) <= 8)
+ return mips_output_conditional_branch (insn, operands,
+ "%*b%N0z%:\t%2,%1",
+ "%*b%C0z%:\t%2,%1");
+
+ return mips_output_conditional_branch (insn, operands,
+ MIPS_BRANCH ("b%N1", "%2,%z3,%0"),
+ MIPS_BRANCH ("b%C1", "%2,%z3,%0"));
+}
+ [(set_attr "type" "branch")])
+
+;; MIPS16 branches
+
+(define_insn "*branch_equality<mode>_mips16"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "equality_operator"
+ [(match_operand:GPR 2 "register_operand" "d,t")
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "TARGET_MIPS16"
+ "@
+ b%C1z\t%2,%0
+ bt%C1z\t%0"
+ [(set_attr "type" "branch")])
+
+(define_insn "*branch_equality<mode>_mips16_inverted"
+ [(set (pc)
+ (if_then_else
+ (match_operator 1 "equality_operator"
+ [(match_operand:GPR 2 "register_operand" "d,t")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ "TARGET_MIPS16"
+ "@
+ b%N1z\t%2,%0
+ bt%N1z\t%0"
+ [(set_attr "type" "branch")])
+
+(define_expand "cbranch<mode>4"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand:GPR 1 "register_operand")
+ (match_operand:GPR 2 "nonmemory_operand")])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ ""
+{
+ mips_expand_conditional_branch (operands);
+ DONE;
+})
+
+(define_expand "cbranch<mode>4"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand:SCALARF 1 "register_operand")
+ (match_operand:SCALARF 2 "register_operand")])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ ""
+{
+ mips_expand_conditional_branch (operands);
+ DONE;
+})
+
+;; Used to implement built-in functions.
+(define_expand "condjump"
+ [(set (pc)
+ (if_then_else (match_operand 0)
+ (label_ref (match_operand 1))
+ (pc)))])
+
+;; Branch if bit is set/clear.
+
+(define_insn "*branch_bit<bbv><mode>"
+ [(set (pc)
+ (if_then_else
+ (equality_op (zero_extract:GPR
+ (match_operand:GPR 1 "register_operand" "d")
+ (const_int 1)
+ (match_operand 2 "const_int_operand" ""))
+ (const_int 0))
+ (label_ref (match_operand 0 ""))
+ (pc)))]
+ "ISA_HAS_BBIT && UINTVAL (operands[2]) < GET_MODE_BITSIZE (<MODE>mode)"
+{
+ return
+ mips_output_conditional_branch (insn, operands,
+ MIPS_BRANCH ("bbit<bbv>", "%1,%2,%0"),
+ MIPS_BRANCH ("bbit<bbinv>", "%1,%2,%0"));
+}
+ [(set_attr "type" "branch")
+ (set_attr "branch_likely" "no")])
+
+(define_insn "*branch_bit<bbv><mode>_inverted"
+ [(set (pc)
+ (if_then_else
+ (equality_op (zero_extract:GPR
+ (match_operand:GPR 1 "register_operand" "d")
+ (const_int 1)
+ (match_operand 2 "const_int_operand" ""))
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 ""))))]
+ "ISA_HAS_BBIT && UINTVAL (operands[2]) < GET_MODE_BITSIZE (<MODE>mode)"
+{
+ return
+ mips_output_conditional_branch (insn, operands,
+ MIPS_BRANCH ("bbit<bbinv>", "%1,%2,%0"),
+ MIPS_BRANCH ("bbit<bbv>", "%1,%2,%0"));
+}
+ [(set_attr "type" "branch")
+ (set_attr "branch_likely" "no")])
+
+;;
+;; ....................
+;;
+;; SETTING A REGISTER FROM A COMPARISON
+;;
+;; ....................
+
+;; Destination is always set in SI mode.
+
+(define_expand "cstore<mode>4"
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operator:SI 1 "mips_cstore_operator"
+ [(match_operand:GPR 2 "register_operand")
+ (match_operand:GPR 3 "nonmemory_operand")]))]
+ ""
+{
+ mips_expand_scc (operands);
+ DONE;
+})
+
+(define_insn "*seq_zero_<GPR:mode><GPR2:mode>"
+ [(set (match_operand:GPR2 0 "register_operand" "=d")
+ (eq:GPR2 (match_operand:GPR 1 "register_operand" "d")
+ (const_int 0)))]
+ "!TARGET_MIPS16 && !ISA_HAS_SEQ_SNE"
+ "sltu\t%0,%1,1"
+ [(set_attr "type" "slt")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*seq_zero_<GPR:mode><GPR2:mode>_mips16"
+ [(set (match_operand:GPR2 0 "register_operand" "=t")
+ (eq:GPR2 (match_operand:GPR 1 "register_operand" "d")
+ (const_int 0)))]
+ "TARGET_MIPS16 && !ISA_HAS_SEQ_SNE"
+ "sltu\t%1,1"
+ [(set_attr "type" "slt")
+ (set_attr "mode" "<GPR:MODE>")])
+
+;; Generate sltiu unless using seq results in better code.
+(define_insn "*seq_<GPR:mode><GPR2:mode>_seq"
+ [(set (match_operand:GPR2 0 "register_operand" "=d,d,d")
+ (eq:GPR2 (match_operand:GPR 1 "register_operand" "%d,d,d")
+ (match_operand:GPR 2 "reg_imm10_operand" "d,J,YB")))]
+ "ISA_HAS_SEQ_SNE"
+ "@
+ seq\t%0,%1,%2
+ sltiu\t%0,%1,1
+ seqi\t%0,%1,%2"
+ [(set_attr "type" "slt")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*sne_zero_<GPR:mode><GPR2:mode>"
+ [(set (match_operand:GPR2 0 "register_operand" "=d")
+ (ne:GPR2 (match_operand:GPR 1 "register_operand" "d")
+ (const_int 0)))]
+ "!TARGET_MIPS16 && !ISA_HAS_SEQ_SNE"
+ "sltu\t%0,%.,%1"
+ [(set_attr "type" "slt")
+ (set_attr "mode" "<GPR:MODE>")])
+
+;; Generate sltu unless using sne results in better code.
+(define_insn "*sne_<GPR:mode><GPR2:mode>_sne"
+ [(set (match_operand:GPR2 0 "register_operand" "=d,d,d")
+ (ne:GPR2 (match_operand:GPR 1 "register_operand" "%d,d,d")
+ (match_operand:GPR 2 "reg_imm10_operand" "d,J,YB")))]
+ "ISA_HAS_SEQ_SNE"
+ "@
+ sne\t%0,%1,%2
+ sltu\t%0,%.,%1
+ snei\t%0,%1,%2"
+ [(set_attr "type" "slt")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*sgt<u>_<GPR:mode><GPR2:mode>"
+ [(set (match_operand:GPR2 0 "register_operand" "=d")
+ (any_gt:GPR2 (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "reg_or_0_operand" "dJ")))]
+ "!TARGET_MIPS16"
+ "slt<u>\t%0,%z2,%1"
+ [(set_attr "type" "slt")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*sgt<u>_<GPR:mode><GPR2:mode>_mips16"
+ [(set (match_operand:GPR2 0 "register_operand" "=t")
+ (any_gt:GPR2 (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "register_operand" "d")))]
+ "TARGET_MIPS16"
+ "slt<u>\t%2,%1"
+ [(set_attr "type" "slt")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*sge<u>_<GPR:mode><GPR2:mode>"
+ [(set (match_operand:GPR2 0 "register_operand" "=d")
+ (any_ge:GPR2 (match_operand:GPR 1 "register_operand" "d")
+ (const_int 1)))]
+ "!TARGET_MIPS16"
+ "slt<u>\t%0,%.,%1"
+ [(set_attr "type" "slt")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*slt<u>_<GPR:mode><GPR2:mode>"
+ [(set (match_operand:GPR2 0 "register_operand" "=d")
+ (any_lt:GPR2 (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "arith_operand" "dI")))]
+ "!TARGET_MIPS16"
+ "slt<u>\t%0,%1,%2"
+ [(set_attr "type" "slt")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*slt<u>_<GPR:mode><GPR2:mode>_mips16"
+ [(set (match_operand:GPR2 0 "register_operand" "=t,t,t")
+ (any_lt:GPR2 (match_operand:GPR 1 "register_operand" "d,d,d")
+ (match_operand:GPR 2 "arith_operand" "d,Uub8,I")))]
+ "TARGET_MIPS16"
+ "slt<u>\t%1,%2"
+ [(set_attr "type" "slt")
+ (set_attr "mode" "<GPR:MODE>")
+ (set_attr "extended_mips16" "no,no,yes")])
+
+(define_insn "*sle<u>_<GPR:mode><GPR2:mode>"
+ [(set (match_operand:GPR2 0 "register_operand" "=d")
+ (any_le:GPR2 (match_operand:GPR 1 "register_operand" "d")
+ (match_operand:GPR 2 "sle_operand" "")))]
+ "!TARGET_MIPS16"
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
+ return "slt<u>\t%0,%1,%2";
+}
+ [(set_attr "type" "slt")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*sle<u>_<GPR:mode><GPR2:mode>_mips16"
+ [(set (match_operand:GPR2 0 "register_operand" "=t,t")
+ (any_le:GPR2 (match_operand:GPR 1 "register_operand" "d,d")
+ (match_operand:GPR 2 "sle_operand" "Udb8,i")))]
+ "TARGET_MIPS16"
+{
+ operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
+ return "slt<u>\t%1,%2";
+}
+ [(set_attr "type" "slt")
+ (set_attr "mode" "<GPR:MODE>")
+ (set_attr "extended_mips16" "no,yes")])
+
+;;
+;; ....................
+;;
+;; FLOATING POINT COMPARISONS
+;;
+;; ....................
+
+(define_insn "s<code>_<mode>"
+ [(set (match_operand:CC 0 "register_operand" "=z")
+ (fcond:CC (match_operand:SCALARF 1 "register_operand" "f")
+ (match_operand:SCALARF 2 "register_operand" "f")))]
+ ""
+ "c.<fcond>.<fmt>\t%Z0%1,%2"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "FPSW")])
+
+(define_insn "s<code>_<mode>"
+ [(set (match_operand:CC 0 "register_operand" "=z")
+ (swapped_fcond:CC (match_operand:SCALARF 1 "register_operand" "f")
+ (match_operand:SCALARF 2 "register_operand" "f")))]
+ ""
+ "c.<swapped_fcond>.<fmt>\t%Z0%2,%1"
+ [(set_attr "type" "fcmp")
+ (set_attr "mode" "FPSW")])
+
+;;
+;; ....................
+;;
+;; UNCONDITIONAL BRANCHES
+;;
+;; ....................
+
+;; Unconditional branches.
+
+(define_expand "jump"
+ [(set (pc)
+ (label_ref (match_operand 0)))])
+
+(define_insn "*jump_absolute"
+ [(set (pc)
+ (label_ref (match_operand 0)))]
+ "!TARGET_MIPS16 && TARGET_ABSOLUTE_JUMPS"
+{
+ /* Use a branch for microMIPS. The assembler will choose
+ a 16-bit branch, a 32-bit branch, or a 32-bit jump. */
+ if (TARGET_MICROMIPS && !TARGET_ABICALLS_PIC2)
+ return "%*b\t%l0%/";
+ else
+ return MIPS_ABSOLUTE_JUMP ("%*j\t%l0%/");
+}
+ [(set_attr "type" "jump")])
+
+(define_insn "*jump_pic"
+ [(set (pc)
+ (label_ref (match_operand 0)))]
+ "!TARGET_MIPS16 && !TARGET_ABSOLUTE_JUMPS"
+{
+ if (get_attr_length (insn) <= 8)
+ return "%*b\t%l0%/";
+ else
+ {
+ mips_output_load_label (operands[0]);
+ return "%*jr\t%@%/%]";
+ }
+}
+ [(set_attr "type" "branch")])
+
+;; We need a different insn for the mips16, because a mips16 branch
+;; does not have a delay slot.
+
+(define_insn "*jump_mips16"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ "TARGET_MIPS16"
+ "b\t%l0"
+ [(set_attr "type" "branch")
+ (set (attr "length")
+ ;; This calculation is like the normal branch one, but the
+ ;; range of the unextended instruction is [-0x800, 0x7fe] rather
+ ;; than [-0x100, 0xfe]. This translates to a range of:
+ ;;
+ ;; [-(0x800 - sizeof (branch)), 0x7fe]
+ ;; == [-0x7fe, 0x7fe]
+ ;;
+ ;; from the shorten_branches reference address. Long-branch
+ ;; sequences will replace this one, so the minimum length
+ ;; is one instruction shorter than for conditional branches.
+ (cond [(and (le (minus (match_dup 0) (pc)) (const_int 2046))
+ (le (minus (pc) (match_dup 0)) (const_int 2046)))
+ (const_int 2)
+ (and (le (minus (match_dup 0) (pc)) (const_int 65534))
+ (le (minus (pc) (match_dup 0)) (const_int 65532)))
+ (const_int 4)
+ (and (match_test "TARGET_ABICALLS")
+ (not (match_test "TARGET_ABSOLUTE_ABICALLS")))
+ (const_int 18)
+ (match_test "Pmode == SImode")
+ (const_int 14)
+ ] (const_int 22)))])
+
+(define_expand "indirect_jump"
+ [(set (pc) (match_operand 0 "register_operand"))]
+ ""
+{
+ operands[0] = force_reg (Pmode, operands[0]);
+ emit_jump_insn (PMODE_INSN (gen_indirect_jump, (operands[0])));
+ DONE;
+})
+
+(define_insn "indirect_jump_<mode>"
+ [(set (pc) (match_operand:P 0 "register_operand" "d"))]
+ ""
+{
+ if (TARGET_MICROMIPS)
+ return "%*jr%:\t%0";
+ else
+ return "%*j\t%0%/";
+}
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")])
+
+;; A combined jump-and-move instruction, used for MIPS16 long-branch
+;; sequences. Having a dedicated pattern is more convenient than
+;; creating a SEQUENCE for this special case.
+(define_insn "indirect_jump_and_restore_<mode>"
+ [(set (pc) (match_operand:P 1 "register_operand" "d"))
+ (set (match_operand:P 0 "register_operand" "=d")
+ (match_operand:P 2 "register_operand" "y"))]
+ ""
+ "%(%<jr\t%1\;move\t%0,%2%>%)"
+ [(set_attr "type" "multi")
+ (set_attr "extended_mips16" "yes")])
+
+(define_expand "tablejump"
+ [(set (pc)
+ (match_operand 0 "register_operand"))
+ (use (label_ref (match_operand 1 "")))]
+ "!TARGET_MIPS16_SHORT_JUMP_TABLES"
+{
+ if (TARGET_GPWORD)
+ operands[0] = expand_binop (Pmode, add_optab, operands[0],
+ pic_offset_table_rtx, 0, 0, OPTAB_WIDEN);
+ else if (TARGET_RTP_PIC)
+ {
+ /* When generating RTP PIC, we use case table entries that are relative
+ to the start of the function. Add the function's address to the
+ value we loaded. */
+ rtx start = get_hard_reg_initial_val (Pmode, PIC_FUNCTION_ADDR_REGNUM);
+ operands[0] = expand_binop (ptr_mode, add_optab, operands[0],
+ start, 0, 0, OPTAB_WIDEN);
+ }
+
+ emit_jump_insn (PMODE_INSN (gen_tablejump, (operands[0], operands[1])));
+ DONE;
+})
+
+(define_insn "tablejump_<mode>"
+ [(set (pc)
+ (match_operand:P 0 "register_operand" "d"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+{
+ if (TARGET_MICROMIPS)
+ return "%*jr%:\t%0";
+ else
+ return "%*j\t%0%/";
+}
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")])
+
+;; For MIPS16, we don't know whether a given jump table will use short or
+;; word-sized offsets until late in compilation, when we are able to determine
+;; the sizes of the insns which comprise the containing function. This
+;; necessitates the use of the casesi rather than the tablejump pattern, since
+;; the latter tries to calculate the index of the offset to jump through early
+;; in compilation, i.e. at expand time, when nothing is known about the
+;; eventual function layout.
+
+(define_expand "casesi"
+ [(match_operand:SI 0 "register_operand" "") ; index to jump on
+ (match_operand:SI 1 "const_int_operand" "") ; lower bound
+ (match_operand:SI 2 "const_int_operand" "") ; total range
+ (match_operand 3 "" "") ; table label
+ (match_operand 4 "" "")] ; out of range label
+ "TARGET_MIPS16_SHORT_JUMP_TABLES"
+{
+ if (operands[1] != const0_rtx)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ rtx offset = gen_int_mode (-INTVAL (operands[1]), SImode);
+
+ if (!arith_operand (offset, SImode))
+ offset = force_reg (SImode, offset);
+
+ emit_insn (gen_addsi3 (reg, operands[0], offset));
+ operands[0] = reg;
+ }
+
+ if (!arith_operand (operands[0], SImode))
+ operands[0] = force_reg (SImode, operands[0]);
+
+ operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
+
+ emit_jump_insn (PMODE_INSN (gen_casesi_internal_mips16,
+ (operands[0], operands[2],
+ operands[3], operands[4])));
+
+ DONE;
+})
+
+(define_insn "casesi_internal_mips16_<mode>"
+ [(set (pc)
+ (if_then_else
+ (leu (match_operand:SI 0 "register_operand" "d")
+ (match_operand:SI 1 "arith_operand" "dI"))
+ (unspec:P
+ [(match_dup 0)
+ (label_ref (match_operand 2 "" ""))]
+ UNSPEC_CASESI_DISPATCH)
+ (label_ref (match_operand 3 "" ""))))
+ (clobber (match_scratch:P 4 "=d"))
+ (clobber (match_scratch:P 5 "=d"))
+ (clobber (reg:SI MIPS16_T_REGNUM))]
+ "TARGET_MIPS16_SHORT_JUMP_TABLES"
+{
+ rtx diff_vec = PATTERN (NEXT_INSN (operands[2]));
+
+ gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
+
+ output_asm_insn ("sltu\t%0, %1", operands);
+ output_asm_insn ("bteqz\t%3", operands);
+
+ switch (GET_MODE (diff_vec))
+ {
+ case HImode:
+ output_asm_insn ("sll\t%5, %0, 1", operands);
+ output_asm_insn ("la\t%4, %2", operands);
+ output_asm_insn ("<d>addu\t%5, %4, %5", operands);
+ output_asm_insn ("lh\t%5, 0(%5)", operands);
+ break;
+
+ case SImode:
+ output_asm_insn ("sll\t%5, %0, 2", operands);
+ output_asm_insn ("la\t%4, %2", operands);
+ output_asm_insn ("<d>addu\t%5, %4, %5", operands);
+ output_asm_insn ("lw\t%5, 0(%5)", operands);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ output_asm_insn ("addu\t%4, %4, %5", operands);
+
+ return "j\t%4";
+}
+ [(set_attr "insn_count" "16")])
+
+;; For TARGET_USE_GOT, we save the gp in the jmp_buf as well.
+;; While it is possible to either pull it off the stack (in the
+;; o32 case) or recalculate it given t9 and our target label,
+;; it takes 3 or 4 insns to do so.
+
+(define_expand "builtin_setjmp_setup"
+ [(use (match_operand 0 "register_operand"))]
+ "TARGET_USE_GOT"
+{
+ rtx addr;
+
+ addr = plus_constant (Pmode, operands[0], GET_MODE_SIZE (Pmode) * 3);
+ mips_emit_move (gen_rtx_MEM (Pmode, addr), pic_offset_table_rtx);
+ DONE;
+})
+
+;; Restore the gp that we saved above. Despite the earlier comment, it seems
+;; that older code did recalculate the gp from $25. Continue to jump through
+;; $25 for compatibility (we lose nothing by doing so).
+
+(define_expand "builtin_longjmp"
+ [(use (match_operand 0 "register_operand"))]
+ "TARGET_USE_GOT"
+{
+ /* The elements of the buffer are, in order: */
+ int W = GET_MODE_SIZE (Pmode);
+ rtx fp = gen_rtx_MEM (Pmode, operands[0]);
+ rtx lab = gen_rtx_MEM (Pmode, plus_constant (Pmode, operands[0], 1*W));
+ rtx stack = gen_rtx_MEM (Pmode, plus_constant (Pmode, operands[0], 2*W));
+ rtx gpv = gen_rtx_MEM (Pmode, plus_constant (Pmode, operands[0], 3*W));
+ rtx pv = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
+ /* Use gen_raw_REG to avoid being given pic_offset_table_rtx.
+ The target is bound to be using $28 as the global pointer
+ but the current function might not be. */
+ rtx gp = gen_raw_REG (Pmode, GLOBAL_POINTER_REGNUM);
+
+ /* This bit is similar to expand_builtin_longjmp except that it
+ restores $gp as well. */
+ mips_emit_move (hard_frame_pointer_rtx, fp);
+ mips_emit_move (pv, lab);
+ emit_stack_restore (SAVE_NONLOCAL, stack);
+ mips_emit_move (gp, gpv);
+ emit_use (hard_frame_pointer_rtx);
+ emit_use (stack_pointer_rtx);
+ emit_use (gp);
+ emit_indirect_jump (pv);
+ DONE;
+})
+
+;;
+;; ....................
+;;
+;; Function prologue/epilogue
+;;
+;; ....................
+;;
+
+(define_expand "prologue"
+ [(const_int 1)]
+ ""
+{
+ mips_expand_prologue ();
+ DONE;
+})
+
+;; Block any insns from being moved before this point, since the
+;; profiling call to mcount can use various registers that aren't
+;; saved or used to pass arguments.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)]
+ ""
+ ""
+ [(set_attr "type" "ghost")
+ (set_attr "mode" "none")])
+
+(define_insn "probe_stack_range_<P:mode>"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (unspec_volatile:P [(match_operand:P 1 "register_operand" "0")
+ (match_operand:P 2 "register_operand" "d")]
+ UNSPEC_PROBE_STACK_RANGE))]
+ ""
+ { return mips_output_probe_stack_range (operands[0], operands[2]); }
+ [(set_attr "type" "unknown")
+ (set_attr "can_delay" "no")
+ (set_attr "mode" "<MODE>")])
+
+(define_expand "epilogue"
+ [(const_int 2)]
+ ""
+{
+ mips_expand_epilogue (false);
+ DONE;
+})
+
+(define_expand "sibcall_epilogue"
+ [(const_int 2)]
+ ""
+{
+ mips_expand_epilogue (true);
+ DONE;
+})
+
+;; Trivial return. Make it look like a normal return insn as that
+;; allows jump optimizations to work better.
+
+(define_expand "return"
+ [(simple_return)]
+ "mips_can_use_return_insn ()"
+ { mips_expand_before_return (); })
+
+(define_expand "simple_return"
+ [(simple_return)]
+ ""
+ { mips_expand_before_return (); })
+
+(define_insn "*<optab>"
+ [(any_return)]
+ ""
+ {
+ if (TARGET_MICROMIPS)
+ return "%*jr%:\t$31";
+ else
+ return "%*j\t$31%/";
+ }
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")])
+
+;; Normal return.
+
+(define_insn "<optab>_internal"
+ [(any_return)
+ (use (match_operand 0 "pmode_register_operand" ""))]
+ ""
+{
+ if (TARGET_MICROMIPS)
+ return "%*jr%:\t%0";
+ else
+ return "%*j\t%0%/";
+}
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")])
+
+;; Exception return.
+(define_insn "mips_eret"
+ [(return)
+ (unspec_volatile [(const_int 0)] UNSPEC_ERET)]
+ ""
+ "eret"
+ [(set_attr "type" "trap")
+ (set_attr "mode" "none")])
+
+;; Debug exception return.
+(define_insn "mips_deret"
+ [(return)
+ (unspec_volatile [(const_int 0)] UNSPEC_DERET)]
+ ""
+ "deret"
+ [(set_attr "type" "trap")
+ (set_attr "mode" "none")])
+
+;; Disable interrupts.
+(define_insn "mips_di"
+ [(unspec_volatile [(const_int 0)] UNSPEC_DI)]
+ ""
+ "di"
+ [(set_attr "type" "trap")
+ (set_attr "mode" "none")])
+
+;; Execution hazard barrier.
+(define_insn "mips_ehb"
+ [(unspec_volatile [(const_int 0)] UNSPEC_EHB)]
+ ""
+ "ehb"
+ [(set_attr "type" "trap")
+ (set_attr "mode" "none")])
+
+;; Read GPR from previous shadow register set.
+(define_insn "mips_rdpgpr"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "d")]
+ UNSPEC_RDPGPR))]
+ ""
+ "rdpgpr\t%0,%1"
+ [(set_attr "type" "move")
+ (set_attr "mode" "SI")])
+
+;; Move involving COP0 registers.
+(define_insn "cop0_move"
+ [(set (match_operand:SI 0 "register_operand" "=B,d")
+ (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "d,B")]
+ UNSPEC_COP0))]
+ ""
+{ return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "type" "mtc,mfc")
+ (set_attr "mode" "SI")])
+
+;; This is used in compiling the unwind routines.
+(define_expand "eh_return"
+ [(use (match_operand 0 "general_operand"))]
+ ""
+{
+ if (GET_MODE (operands[0]) != word_mode)
+ operands[0] = convert_to_mode (word_mode, operands[0], 0);
+ if (TARGET_64BIT)
+ emit_insn (gen_eh_set_lr_di (operands[0]));
+ else
+ emit_insn (gen_eh_set_lr_si (operands[0]));
+ DONE;
+})
+
+;; Clobber the return address on the stack. We can't expand this
+;; until we know where it will be put in the stack frame.
+
+(define_insn "eh_set_lr_si"
+ [(unspec [(match_operand:SI 0 "register_operand" "d")] UNSPEC_EH_RETURN)
+ (clobber (match_scratch:SI 1 "=&d"))]
+ "! TARGET_64BIT"
+ "#")
+
+(define_insn "eh_set_lr_di"
+ [(unspec [(match_operand:DI 0 "register_operand" "d")] UNSPEC_EH_RETURN)
+ (clobber (match_scratch:DI 1 "=&d"))]
+ "TARGET_64BIT"
+ "#")
+
+(define_split
+ [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
+ (clobber (match_scratch 1))]
+ "reload_completed"
+ [(const_int 0)]
+{
+ mips_set_return_address (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "exception_receiver"
+ [(const_int 0)]
+ "TARGET_USE_GOT"
+{
+ /* See the comment above load_call<mode> for details. */
+ emit_insn (gen_set_got_version ());
+
+ /* If we have a call-clobbered $gp, restore it from its save slot. */
+ if (HAVE_restore_gp_si)
+ emit_insn (gen_restore_gp_si ());
+ else if (HAVE_restore_gp_di)
+ emit_insn (gen_restore_gp_di ());
+ DONE;
+})
+
+(define_expand "nonlocal_goto_receiver"
+ [(const_int 0)]
+ "TARGET_USE_GOT"
+{
+ /* See the comment above load_call<mode> for details. */
+ emit_insn (gen_set_got_version ());
+ DONE;
+})
+
+;; Restore $gp from its .cprestore stack slot. The instruction remains
+;; volatile until all uses of $28 are exposed.
+(define_insn_and_split "restore_gp_<mode>"
+ [(set (reg:P 28)
+ (unspec_volatile:P [(const_int 0)] UNSPEC_RESTORE_GP))
+ (clobber (match_scratch:P 0 "=&d"))]
+ "TARGET_CALL_CLOBBERED_GP"
+ "#"
+ "&& epilogue_completed"
+ [(const_int 0)]
+{
+ mips_restore_gp_from_cprestore_slot (operands[0]);
+ DONE;
+}
+ [(set_attr "type" "ghost")])
+
+;; Move between $gp and its register save slot.
+(define_insn_and_split "move_gp<mode>"
+ [(set (match_operand:GPR 0 "nonimmediate_operand" "=d,m")
+ (unspec:GPR [(match_operand:GPR 1 "move_operand" "m,d")]
+ UNSPEC_MOVE_GP))]
+ ""
+ { return mips_must_initialize_gp_p () ? "#" : ""; }
+ "mips_must_initialize_gp_p ()"
+ [(const_int 0)]
+{
+ mips_emit_move (operands[0], operands[1]);
+ DONE;
+}
+ [(set_attr "type" "ghost")])
+
+;;
+;; ....................
+;;
+;; FUNCTION CALLS
+;;
+;; ....................
+
+;; Instructions to load a call address from the GOT. The address might
+;; point to a function or to a lazy binding stub. In the latter case,
+;; the stub will use the dynamic linker to resolve the function, which
+;; in turn will change the GOT entry to point to the function's real
+;; address.
+;;
+;; This means that every call, even pure and constant ones, can
+;; potentially modify the GOT entry. And once a stub has been called,
+;; we must not call it again.
+;;
+;; We represent this restriction using an imaginary, fixed, call-saved
+;; register called GOT_VERSION_REGNUM. The idea is to make the register
+;; live throughout the function and to change its value after every
+;; potential call site. This stops any rtx value that uses the register
+;; from being computed before an earlier call. To do this, we:
+;;
+;; - Ensure that the register is live on entry to the function,
+;; so that it is never thought to be used uninitalized.
+;;
+;; - Ensure that the register is live on exit from the function,
+;; so that it is live throughout.
+;;
+;; - Make each call (lazily-bound or not) use the current value
+;; of GOT_VERSION_REGNUM, so that updates of the register are
+;; not moved across call boundaries.
+;;
+;; - Add "ghost" definitions of the register to the beginning of
+;; blocks reached by EH and ABNORMAL_CALL edges, because those
+;; edges may involve calls that normal paths don't. (E.g. the
+;; unwinding code that handles a non-call exception may change
+;; lazily-bound GOT entries.) We do this by making the
+;; exception_receiver and nonlocal_goto_receiver expanders emit
+;; a set_got_version instruction.
+;;
+;; - After each call (lazily-bound or not), use a "ghost"
+;; update_got_version instruction to change the register's value.
+;; This instruction mimics the _possible_ effect of the dynamic
+;; resolver during the call and it remains live even if the call
+;; itself becomes dead.
+;;
+;; - Leave GOT_VERSION_REGNUM out of all register classes.
+;; The register is therefore not a valid register_operand
+;; and cannot be moved to or from other registers.
+
+(define_insn "load_call<mode>"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (unspec:P [(match_operand:P 1 "register_operand" "d")
+ (match_operand:P 2 "immediate_operand" "")
+ (reg:SI GOT_VERSION_REGNUM)] UNSPEC_LOAD_CALL))]
+ "TARGET_USE_GOT"
+ "<load>\t%0,%R2(%1)"
+ [(set_attr "got" "load")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "set_got_version"
+ [(set (reg:SI GOT_VERSION_REGNUM)
+ (unspec_volatile:SI [(const_int 0)] UNSPEC_SET_GOT_VERSION))]
+ "TARGET_USE_GOT"
+ ""
+ [(set_attr "type" "ghost")])
+
+(define_insn "update_got_version"
+ [(set (reg:SI GOT_VERSION_REGNUM)
+ (unspec:SI [(reg:SI GOT_VERSION_REGNUM)] UNSPEC_UPDATE_GOT_VERSION))]
+ "TARGET_USE_GOT"
+ ""
+ [(set_attr "type" "ghost")])
+
+;; Sibling calls. All these patterns use jump instructions.
+
+;; If TARGET_SIBCALLS, call_insn_operand will only accept constant
+;; addresses if a direct jump is acceptable. Since the 'S' constraint
+;; is defined in terms of call_insn_operand, the same is true of the
+;; constraints.
+
+;; When we use an indirect jump, we need a register that will be
+;; preserved by the epilogue. Since TARGET_USE_PIC_FN_ADDR_REG forces
+;; us to use $25 for this purpose -- and $25 is never clobbered by the
+;; epilogue -- we might as well use it for !TARGET_USE_PIC_FN_ADDR_REG
+;; as well.
+
+(define_expand "sibcall"
+ [(parallel [(call (match_operand 0 "")
+ (match_operand 1 ""))
+ (use (match_operand 2 "")) ;; next_arg_reg
+ (use (match_operand 3 ""))])] ;; struct_value_size_rtx
+ "TARGET_SIBCALLS"
+{
+ mips_expand_call (MIPS_CALL_SIBCALL, NULL_RTX, XEXP (operands[0], 0),
+ operands[1], operands[2], false);
+ DONE;
+})
+
+(define_insn "sibcall_internal"
+ [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S"))
+ (match_operand 1 "" ""))]
+ "TARGET_SIBCALLS && SIBLING_CALL_P (insn)"
+{
+ if (TARGET_MICROMIPS)
+ return MICROMIPS_J ("j", operands, 0);
+ else
+ return MIPS_CALL ("j", operands, 0, 1);
+}
+ [(set_attr "jal" "indirect,direct")
+ (set_attr "jal_macro" "no")])
+
+(define_expand "sibcall_value"
+ [(parallel [(set (match_operand 0 "")
+ (call (match_operand 1 "")
+ (match_operand 2 "")))
+ (use (match_operand 3 ""))])] ;; next_arg_reg
+ "TARGET_SIBCALLS"
+{
+ mips_expand_call (MIPS_CALL_SIBCALL, operands[0], XEXP (operands[1], 0),
+ operands[2], operands[3], false);
+ DONE;
+})
+
+(define_insn "sibcall_value_internal"
+ [(set (match_operand 0 "register_operand" "")
+ (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
+ (match_operand 2 "" "")))]
+ "TARGET_SIBCALLS && SIBLING_CALL_P (insn)"
+{
+ if (TARGET_MICROMIPS)
+ return MICROMIPS_J ("j", operands, 1);
+ else
+ return MIPS_CALL ("j", operands, 1, 2);
+}
+ [(set_attr "jal" "indirect,direct")
+ (set_attr "jal_macro" "no")])
+
+(define_insn "sibcall_value_multiple_internal"
+ [(set (match_operand 0 "register_operand" "")
+ (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
+ (match_operand 2 "" "")))
+ (set (match_operand 3 "register_operand" "")
+ (call (mem:SI (match_dup 1))
+ (match_dup 2)))]
+ "TARGET_SIBCALLS && SIBLING_CALL_P (insn)"
+{
+ if (TARGET_MICROMIPS)
+ return MICROMIPS_J ("j", operands, 1);
+ else
+ return MIPS_CALL ("j", operands, 1, 2);
+}
+ [(set_attr "jal" "indirect,direct")
+ (set_attr "jal_macro" "no")])
+
+(define_expand "call"
+ [(parallel [(call (match_operand 0 "")
+ (match_operand 1 ""))
+ (use (match_operand 2 "")) ;; next_arg_reg
+ (use (match_operand 3 ""))])] ;; struct_value_size_rtx
+ ""
+{
+ mips_expand_call (MIPS_CALL_NORMAL, NULL_RTX, XEXP (operands[0], 0),
+ operands[1], operands[2], false);
+ DONE;
+})
+
+;; This instruction directly corresponds to an assembly-language "jal".
+;; There are four cases:
+;;
+;; - -mno-abicalls:
+;; Both symbolic and register destinations are OK. The pattern
+;; always expands to a single mips instruction.
+;;
+;; - -mabicalls/-mno-explicit-relocs:
+;; Again, both symbolic and register destinations are OK.
+;; The call is treated as a multi-instruction black box.
+;;
+;; - -mabicalls/-mexplicit-relocs with n32 or n64:
+;; Only "jal $25" is allowed. This expands to a single "jalr $25"
+;; instruction.
+;;
+;; - -mabicalls/-mexplicit-relocs with o32 or o64:
+;; Only "jal $25" is allowed. The call is actually two instructions:
+;; "jalr $25" followed by an insn to reload $gp.
+;;
+;; In the last case, we can generate the individual instructions with
+;; a define_split. There are several things to be wary of:
+;;
+;; - We can't expose the load of $gp before reload. If we did,
+;; it might get removed as dead, but reload can introduce new
+;; uses of $gp by rematerializing constants.
+;;
+;; - We shouldn't restore $gp after calls that never return.
+;; It isn't valid to insert instructions between a noreturn
+;; call and the following barrier.
+;;
+;; - The splitter deliberately changes the liveness of $gp. The unsplit
+;; instruction preserves $gp and so have no effect on its liveness.
+;; But once we generate the separate insns, it becomes obvious that
+;; $gp is not live on entry to the call.
+;;
+(define_insn_and_split "call_internal"
+ [(call (mem:SI (match_operand 0 "call_insn_operand" "c,S"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI RETURN_ADDR_REGNUM))]
+ ""
+ { return TARGET_SPLIT_CALLS ? "#" : MIPS_CALL ("jal", operands, 0, 1); }
+ "reload_completed && TARGET_SPLIT_CALLS"
+ [(const_int 0)]
+{
+ mips_split_call (curr_insn, gen_call_split (operands[0], operands[1]));
+ DONE;
+}
+ [(set_attr "jal" "indirect,direct")])
+
+(define_insn "call_split"
+ [(call (mem:SI (match_operand 0 "call_insn_operand" "c,S"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI RETURN_ADDR_REGNUM))
+ (clobber (reg:SI 28))]
+ "TARGET_SPLIT_CALLS"
+ { return MIPS_CALL ("jal", operands, 0, 1); }
+ [(set_attr "jal" "indirect,direct")
+ (set_attr "jal_macro" "no")])
+
+;; A pattern for calls that must be made directly. It is used for
+;; MIPS16 calls that the linker may need to redirect to a hard-float
+;; stub; the linker relies on the call relocation type to detect when
+;; such redirection is needed.
+(define_insn_and_split "call_internal_direct"
+ [(call (mem:SI (match_operand 0 "const_call_insn_operand"))
+ (match_operand 1))
+ (const_int 1)
+ (clobber (reg:SI RETURN_ADDR_REGNUM))]
+ ""
+ { return TARGET_SPLIT_CALLS ? "#" : MIPS_CALL ("jal", operands, 0, -1); }
+ "reload_completed && TARGET_SPLIT_CALLS"
+ [(const_int 0)]
+{
+ mips_split_call (curr_insn,
+ gen_call_direct_split (operands[0], operands[1]));
+ DONE;
+}
+ [(set_attr "jal" "direct")])
+
+(define_insn "call_direct_split"
+ [(call (mem:SI (match_operand 0 "const_call_insn_operand"))
+ (match_operand 1))
+ (const_int 1)
+ (clobber (reg:SI RETURN_ADDR_REGNUM))
+ (clobber (reg:SI 28))]
+ "TARGET_SPLIT_CALLS"
+ { return MIPS_CALL ("jal", operands, 0, -1); }
+ [(set_attr "jal" "direct")
+ (set_attr "jal_macro" "no")])
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "")
+ (call (match_operand 1 "")
+ (match_operand 2 "")))
+ (use (match_operand 3 ""))])] ;; next_arg_reg
+ ""
+{
+ mips_expand_call (MIPS_CALL_NORMAL, operands[0], XEXP (operands[1], 0),
+ operands[2], operands[3], false);
+ DONE;
+})
+
+;; See comment for call_internal.
+(define_insn_and_split "call_value_internal"
+ [(set (match_operand 0 "register_operand" "")
+ (call (mem:SI (match_operand 1 "call_insn_operand" "c,S"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI RETURN_ADDR_REGNUM))]
+ ""
+ { return TARGET_SPLIT_CALLS ? "#" : MIPS_CALL ("jal", operands, 1, 2); }
+ "reload_completed && TARGET_SPLIT_CALLS"
+ [(const_int 0)]
+{
+ mips_split_call (curr_insn,
+ gen_call_value_split (operands[0], operands[1],
+ operands[2]));
+ DONE;
+}
+ [(set_attr "jal" "indirect,direct")])
+
+(define_insn "call_value_split"
+ [(set (match_operand 0 "register_operand" "")
+ (call (mem:SI (match_operand 1 "call_insn_operand" "c,S"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI RETURN_ADDR_REGNUM))
+ (clobber (reg:SI 28))]
+ "TARGET_SPLIT_CALLS"
+ { return MIPS_CALL ("jal", operands, 1, 2); }
+ [(set_attr "jal" "indirect,direct")
+ (set_attr "jal_macro" "no")])
+
+;; See call_internal_direct.
+(define_insn_and_split "call_value_internal_direct"
+ [(set (match_operand 0 "register_operand")
+ (call (mem:SI (match_operand 1 "const_call_insn_operand"))
+ (match_operand 2)))
+ (const_int 1)
+ (clobber (reg:SI RETURN_ADDR_REGNUM))]
+ ""
+ { return TARGET_SPLIT_CALLS ? "#" : MIPS_CALL ("jal", operands, 1, -1); }
+ "reload_completed && TARGET_SPLIT_CALLS"
+ [(const_int 0)]
+{
+ mips_split_call (curr_insn,
+ gen_call_value_direct_split (operands[0], operands[1],
+ operands[2]));
+ DONE;
+}
+ [(set_attr "jal" "direct")])
+
+(define_insn "call_value_direct_split"
+ [(set (match_operand 0 "register_operand")
+ (call (mem:SI (match_operand 1 "const_call_insn_operand"))
+ (match_operand 2)))
+ (const_int 1)
+ (clobber (reg:SI RETURN_ADDR_REGNUM))
+ (clobber (reg:SI 28))]
+ "TARGET_SPLIT_CALLS"
+ { return MIPS_CALL ("jal", operands, 1, -1); }
+ [(set_attr "jal" "direct")
+ (set_attr "jal_macro" "no")])
+
+;; See comment for call_internal.
+(define_insn_and_split "call_value_multiple_internal"
+ [(set (match_operand 0 "register_operand" "")
+ (call (mem:SI (match_operand 1 "call_insn_operand" "c,S"))
+ (match_operand 2 "" "")))
+ (set (match_operand 3 "register_operand" "")
+ (call (mem:SI (match_dup 1))
+ (match_dup 2)))
+ (clobber (reg:SI RETURN_ADDR_REGNUM))]
+ ""
+ { return TARGET_SPLIT_CALLS ? "#" : MIPS_CALL ("jal", operands, 1, 2); }
+ "reload_completed && TARGET_SPLIT_CALLS"
+ [(const_int 0)]
+{
+ mips_split_call (curr_insn,
+ gen_call_value_multiple_split (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+}
+ [(set_attr "jal" "indirect,direct")])
+
+(define_insn "call_value_multiple_split"
+ [(set (match_operand 0 "register_operand" "")
+ (call (mem:SI (match_operand 1 "call_insn_operand" "c,S"))
+ (match_operand 2 "" "")))
+ (set (match_operand 3 "register_operand" "")
+ (call (mem:SI (match_dup 1))
+ (match_dup 2)))
+ (clobber (reg:SI RETURN_ADDR_REGNUM))
+ (clobber (reg:SI 28))]
+ "TARGET_SPLIT_CALLS"
+ { return MIPS_CALL ("jal", operands, 1, 2); }
+ [(set_attr "jal" "indirect,direct")
+ (set_attr "jal_macro" "no")])
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "")
+ (const_int 0))
+ (match_operand 1 "")
+ (match_operand 2 "")])]
+ ""
+{
+ int i;
+
+ emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ mips_emit_move (SET_DEST (set), SET_SRC (set));
+ }
+
+ emit_insn (gen_blockage ());
+ DONE;
+})
+
+;;
+;; ....................
+;;
+;; MISC.
+;;
+;; ....................
+;;
+
+
+(define_insn "prefetch"
+ [(prefetch (match_operand:QI 0 "address_operand" "ZD")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))]
+ "ISA_HAS_PREFETCH && TARGET_EXPLICIT_RELOCS"
+{
+ if (TARGET_LOONGSON_2EF || TARGET_LOONGSON_3A)
+ {
+ /* Loongson 2[ef] and Loongson 3a use load to $0 for prefetching. */
+ if (TARGET_64BIT)
+ return "ld\t$0,%a0";
+ else
+ return "lw\t$0,%a0";
+ }
+ operands[1] = mips_prefetch_cookie (operands[1], operands[2]);
+ return "pref\t%1,%a0";
+}
+ [(set_attr "type" "prefetch")])
+
+(define_insn "*prefetch_indexed_<mode>"
+ [(prefetch (plus:P (match_operand:P 0 "register_operand" "d")
+ (match_operand:P 1 "register_operand" "d"))
+ (match_operand 2 "const_int_operand" "n")
+ (match_operand 3 "const_int_operand" "n"))]
+ "ISA_HAS_PREFETCHX && TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT"
+{
+ operands[2] = mips_prefetch_cookie (operands[2], operands[3]);
+ return "prefx\t%2,%1(%0)";
+}
+ [(set_attr "type" "prefetchx")])
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "%(nop%)"
+ [(set_attr "type" "nop")
+ (set_attr "mode" "none")])
+
+;; Like nop, but commented out when outside a .set noreorder block.
+(define_insn "hazard_nop"
+ [(const_int 1)]
+ ""
+ {
+ if (mips_noreorder.nesting_level > 0)
+ return "nop";
+ else
+ return "#nop";
+ }
+ [(set_attr "type" "nop")])
+
+;; MIPS4 Conditional move instructions.
+
+(define_insn "*mov<GPR:mode>_on_<MOVECC:mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=d,d")
+ (if_then_else:GPR
+ (match_operator 4 "equality_operator"
+ [(match_operand:MOVECC 1 "register_operand" "<MOVECC:reg>,<MOVECC:reg>")
+ (const_int 0)])
+ (match_operand:GPR 2 "reg_or_0_operand" "dJ,0")
+ (match_operand:GPR 3 "reg_or_0_operand" "0,dJ")))]
+ "ISA_HAS_CONDMOVE"
+ "@
+ mov%T4\t%0,%z2,%1
+ mov%t4\t%0,%z3,%1"
+ [(set_attr "type" "condmove")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*mov<GPR:mode>_on_<GPR2:mode>_ne"
+ [(set (match_operand:GPR 0 "register_operand" "=d,d")
+ (if_then_else:GPR
+ (match_operand:GPR2 1 "register_operand" "<GPR2:reg>,<GPR2:reg>")
+ (match_operand:GPR 2 "reg_or_0_operand" "dJ,0")
+ (match_operand:GPR 3 "reg_or_0_operand" "0,dJ")))]
+ "ISA_HAS_CONDMOVE"
+ "@
+ movn\t%0,%z2,%1
+ movz\t%0,%z3,%1"
+ [(set_attr "type" "condmove")
+ (set_attr "mode" "<GPR:MODE>")])
+
+(define_insn "*mov<SCALARF:mode>_on_<MOVECC:mode>"
+ [(set (match_operand:SCALARF 0 "register_operand" "=f,f")
+ (if_then_else:SCALARF
+ (match_operator 4 "equality_operator"
+ [(match_operand:MOVECC 1 "register_operand" "<MOVECC:reg>,<MOVECC:reg>")
+ (const_int 0)])
+ (match_operand:SCALARF 2 "register_operand" "f,0")
+ (match_operand:SCALARF 3 "register_operand" "0,f")))]
+ "ISA_HAS_FP_CONDMOVE"
+ "@
+ mov%T4.<fmt>\t%0,%2,%1
+ mov%t4.<fmt>\t%0,%3,%1"
+ [(set_attr "type" "condmove")
+ (set_attr "mode" "<SCALARF:MODE>")])
+
+;; These are the main define_expand's used to make conditional moves.
+
+(define_expand "mov<mode>cc"
+ [(set (match_dup 4) (match_operand 1 "comparison_operator"))
+ (set (match_operand:GPR 0 "register_operand")
+ (if_then_else:GPR (match_dup 5)
+ (match_operand:GPR 2 "reg_or_0_operand")
+ (match_operand:GPR 3 "reg_or_0_operand")))]
+ "ISA_HAS_CONDMOVE"
+{
+ mips_expand_conditional_move (operands);
+ DONE;
+})
+
+(define_expand "mov<mode>cc"
+ [(set (match_dup 4) (match_operand 1 "comparison_operator"))
+ (set (match_operand:SCALARF 0 "register_operand")
+ (if_then_else:SCALARF (match_dup 5)
+ (match_operand:SCALARF 2 "register_operand")
+ (match_operand:SCALARF 3 "register_operand")))]
+ "ISA_HAS_FP_CONDMOVE"
+{
+ mips_expand_conditional_move (operands);
+ DONE;
+})
+
+;;
+;; ....................
+;;
+;; mips16 inline constant tables
+;;
+;; ....................
+;;
+
+(define_insn "consttable_tls_reloc"
+ [(unspec_volatile [(match_operand 0 "tls_reloc_operand" "")
+ (match_operand 1 "const_int_operand" "")]
+ UNSPEC_CONSTTABLE_INT)]
+ "TARGET_MIPS16_PCREL_LOADS"
+ { return mips_output_tls_reloc_directive (&operands[0]); }
+ [(set (attr "length") (symbol_ref "INTVAL (operands[1])"))])
+
+(define_insn "consttable_int"
+ [(unspec_volatile [(match_operand 0 "consttable_operand" "")
+ (match_operand 1 "const_int_operand" "")]
+ UNSPEC_CONSTTABLE_INT)]
+ "TARGET_MIPS16"
+{
+ assemble_integer (mips_strip_unspec_address (operands[0]),
+ INTVAL (operands[1]),
+ BITS_PER_UNIT * INTVAL (operands[1]), 1);
+ return "";
+}
+ [(set (attr "length") (symbol_ref "INTVAL (operands[1])"))])
+
+(define_insn "consttable_float"
+ [(unspec_volatile [(match_operand 0 "consttable_operand" "")]
+ UNSPEC_CONSTTABLE_FLOAT)]
+ "TARGET_MIPS16"
+{
+ REAL_VALUE_TYPE d;
+
+ gcc_assert (GET_CODE (operands[0]) == CONST_DOUBLE);
+ REAL_VALUE_FROM_CONST_DOUBLE (d, operands[0]);
+ assemble_real (d, GET_MODE (operands[0]),
+ GET_MODE_BITSIZE (GET_MODE (operands[0])));
+ return "";
+}
+ [(set (attr "length")
+ (symbol_ref "GET_MODE_SIZE (GET_MODE (operands[0]))"))])
+
+(define_insn "align"
+ [(unspec_volatile [(match_operand 0 "const_int_operand" "")] UNSPEC_ALIGN)]
+ ""
+ ".align\t%0"
+ [(set (attr "length") (symbol_ref "(1 << INTVAL (operands[0])) - 1"))])
+
+(define_split
+ [(match_operand 0 "small_data_pattern")]
+ "reload_completed"
+ [(match_dup 0)]
+ { operands[0] = mips_rewrite_small_data (operands[0]); })
+
+;;
+;; ....................
+;;
+;; MIPS16e Save/Restore
+;;
+;; ....................
+;;
+
+(define_insn "*mips16e_save_restore"
+ [(match_parallel 0 ""
+ [(set (match_operand:SI 1 "register_operand")
+ (plus:SI (match_dup 1)
+ (match_operand:SI 2 "const_int_operand")))])]
+ "operands[1] == stack_pointer_rtx
+ && mips16e_save_restore_pattern_p (operands[0], INTVAL (operands[2]), NULL)"
+ { return mips16e_output_save_restore (operands[0], INTVAL (operands[2])); }
+ [(set_attr "type" "arith")
+ (set_attr "extended_mips16" "yes")])
+
+;; Thread-Local Storage
+
+;; The TLS base pointer is accessed via "rdhwr $3, $29". No current
+;; MIPS architecture defines this register, and no current
+;; implementation provides it; instead, any OS which supports TLS is
+;; expected to trap and emulate this instruction. rdhwr is part of the
+;; MIPS 32r2 specification, but we use it on any architecture because
+;; we expect it to be emulated. Use .set to force the assembler to
+;; accept it.
+;;
+;; We do not use a constraint to force the destination to be $3
+;; because $3 can appear explicitly as a function return value.
+;; If we leave the use of $3 implicit in the constraints until
+;; reload, we may end up making a $3 return value live across
+;; the instruction, leading to a spill failure when reloading it.
+(define_insn_and_split "tls_get_tp_<mode>"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (unspec:P [(const_int 0)] UNSPEC_TLS_GET_TP))
+ (clobber (reg:P TLS_GET_TP_REGNUM))]
+ "HAVE_AS_TLS && !TARGET_MIPS16"
+ "#"
+ "&& reload_completed"
+ [(set (reg:P TLS_GET_TP_REGNUM)
+ (unspec:P [(const_int 0)] UNSPEC_TLS_GET_TP))
+ (set (match_dup 0) (reg:P TLS_GET_TP_REGNUM))]
+ ""
+ [(set_attr "type" "unknown")
+ (set_attr "mode" "<MODE>")
+ (set_attr "insn_count" "2")])
+
+(define_insn "*tls_get_tp_<mode>_split"
+ [(set (reg:P TLS_GET_TP_REGNUM)
+ (unspec:P [(const_int 0)] UNSPEC_TLS_GET_TP))]
+ "HAVE_AS_TLS && !TARGET_MIPS16"
+ ".set\tpush\;.set\tmips32r2\t\;rdhwr\t$3,$29\;.set\tpop"
+ [(set_attr "type" "unknown")
+ ; Since rdhwr always generates a trap for now, putting it in a delay
+ ; slot would make the kernel's emulation of it much slower.
+ (set_attr "can_delay" "no")
+ (set_attr "mode" "<MODE>")])
+
+;; In MIPS16 mode, the TLS base pointer is accessed by a
+;; libgcc helper function __mips16_rdhwr(), as 'rdhwr' is not
+;; accessible in MIPS16.
+;;
+;; This is not represented as a call insn, to avoid the
+;; unnecesarry clobbering of caller-save registers by a
+;; function consisting only of: "rdhwr $3,$29; j $31; nop;"
+;;
+;; A $25 clobber is added to cater for a $25 load stub added by the
+;; linker to __mips16_rdhwr when the call is made from non-PIC code.
+
+(define_insn_and_split "tls_get_tp_mips16_<mode>"
+ [(set (match_operand:P 0 "register_operand" "=d")
+ (unspec:P [(match_operand:P 1 "call_insn_operand" "dS")]
+ UNSPEC_TLS_GET_TP))
+ (clobber (reg:P TLS_GET_TP_REGNUM))
+ (clobber (reg:P PIC_FUNCTION_ADDR_REGNUM))
+ (clobber (reg:P RETURN_ADDR_REGNUM))]
+ "HAVE_AS_TLS && TARGET_MIPS16"
+ "#"
+ "&& reload_completed"
+ [(parallel [(set (reg:P TLS_GET_TP_REGNUM)
+ (unspec:P [(match_dup 1)] UNSPEC_TLS_GET_TP))
+ (clobber (reg:P PIC_FUNCTION_ADDR_REGNUM))
+ (clobber (reg:P RETURN_ADDR_REGNUM))])
+ (set (match_dup 0) (reg:P TLS_GET_TP_REGNUM))]
+ ""
+ [(set_attr "type" "multi")
+ (set_attr "insn_count" "4")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*tls_get_tp_mips16_call_<mode>"
+ [(set (reg:P TLS_GET_TP_REGNUM)
+ (unspec:P [(match_operand:P 0 "call_insn_operand" "dS")]
+ UNSPEC_TLS_GET_TP))
+ (clobber (reg:P PIC_FUNCTION_ADDR_REGNUM))
+ (clobber (reg:P RETURN_ADDR_REGNUM))]
+ "HAVE_AS_TLS && TARGET_MIPS16"
+ { return MIPS_CALL ("jal", operands, 0, -1); }
+ [(set_attr "type" "call")
+ (set_attr "insn_count" "3")
+ (set_attr "mode" "<MODE>")])
+
+;; Named pattern for expanding thread pointer reference.
+(define_expand "get_thread_pointer<mode>"
+ [(match_operand:P 0 "register_operand" "=d")]
+ "HAVE_AS_TLS"
+{
+ mips_expand_thread_pointer (operands[0]);
+ DONE;
+})
+
+;; __builtin_mips_get_fcsr: move the FCSR into operand 0.
+(define_expand "mips_get_fcsr"
+ [(set (match_operand:SI 0 "register_operand")
+ (unspec_volatile [(const_int 0)] UNSPEC_GET_FCSR))]
+ "TARGET_HARD_FLOAT_ABI"
+{
+ if (TARGET_MIPS16)
+ {
+ mips16_expand_get_fcsr (operands[0]);
+ DONE;
+ }
+})
+
+(define_insn "*mips_get_fcsr"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec_volatile [(const_int 0)] UNSPEC_GET_FCSR))]
+ "TARGET_HARD_FLOAT"
+ "cfc1\t%0,$31")
+
+;; See tls_get_tp_mips16_<mode> for why this form is used.
+(define_insn "mips_get_fcsr_mips16_<mode>"
+ [(set (reg:SI GET_FCSR_REGNUM)
+ (unspec:SI [(match_operand:P 0 "call_insn_operand" "dS")]
+ UNSPEC_GET_FCSR))
+ (clobber (reg:P PIC_FUNCTION_ADDR_REGNUM))
+ (clobber (reg:P RETURN_ADDR_REGNUM))]
+ "TARGET_HARD_FLOAT_ABI && TARGET_MIPS16"
+ { return MIPS_CALL ("jal", operands, 0, -1); }
+ [(set_attr "type" "call")
+ (set_attr "insn_count" "3")])
+
+;; __builtin_mips_set_fcsr: move operand 0 into the FCSR.
+(define_expand "mips_set_fcsr"
+ [(unspec_volatile [(match_operand:SI 0 "register_operand")]
+ UNSPEC_SET_FCSR)]
+ "TARGET_HARD_FLOAT_ABI"
+{
+ if (TARGET_MIPS16)
+ {
+ mips16_expand_set_fcsr (operands[0]);
+ DONE;
+ }
+})
+
+(define_insn "*mips_set_fcsr"
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "d")]
+ UNSPEC_SET_FCSR)]
+ "TARGET_HARD_FLOAT"
+ "ctc1\t%0,$31")
+
+;; See tls_get_tp_mips16_<mode> for why this form is used.
+(define_insn "mips_set_fcsr_mips16_<mode>"
+ [(unspec_volatile:SI [(match_operand:P 0 "call_insn_operand" "dS")
+ (reg:SI SET_FCSR_REGNUM)] UNSPEC_SET_FCSR)
+ (clobber (reg:P PIC_FUNCTION_ADDR_REGNUM))
+ (clobber (reg:P RETURN_ADDR_REGNUM))]
+ "TARGET_HARD_FLOAT_ABI && TARGET_MIPS16"
+ { return MIPS_CALL ("jal", operands, 0, -1); }
+ [(set_attr "type" "call")
+ (set_attr "insn_count" "3")])
+
+;; Synchronization instructions.
+
+(include "sync.md")
+
+; The MIPS Paired-Single Floating Point and MIPS-3D Instructions.
+
+(include "mips-ps-3d.md")
+
+; The MIPS DSP Instructions.
+
+(include "mips-dsp.md")
+
+; The MIPS DSP REV 2 Instructions.
+
+(include "mips-dspr2.md")
+
+; MIPS fixed-point instructions.
+(include "mips-fixed.md")
+
+; microMIPS patterns.
+(include "micromips.md")
+
+; ST-Microelectronics Loongson-2E/2F-specific patterns.
+(include "loongson.md")
+
+(define_c_enum "unspec" [
+ UNSPEC_ADDRESS_FIRST
+])
diff --git a/gcc-4.9/gcc/config/mips/mips.opt b/gcc-4.9/gcc/config/mips/mips.opt
new file mode 100644
index 000000000..6ee539837
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mips.opt
@@ -0,0 +1,404 @@
+; Options for the MIPS port of the compiler
+;
+; Copyright (C) 2005-2014 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+HeaderInclude
+config/mips/mips-opts.h
+
+EB
+Driver
+
+EL
+Driver
+
+mabi=
+Target RejectNegative Joined Enum(mips_abi) Var(mips_abi) Init(MIPS_ABI_DEFAULT)
+-mabi=ABI Generate code that conforms to the given ABI
+
+Enum
+Name(mips_abi) Type(int)
+Known MIPS ABIs (for use with the -mabi= option):
+
+EnumValue
+Enum(mips_abi) String(32) Value(ABI_32)
+
+EnumValue
+Enum(mips_abi) String(o64) Value(ABI_O64)
+
+EnumValue
+Enum(mips_abi) String(n32) Value(ABI_N32)
+
+EnumValue
+Enum(mips_abi) String(64) Value(ABI_64)
+
+EnumValue
+Enum(mips_abi) String(eabi) Value(ABI_EABI)
+
+mabicalls
+Target Report Mask(ABICALLS)
+Generate code that can be used in SVR4-style dynamic objects
+
+mmad
+Target Report Var(TARGET_MAD)
+Use PMC-style 'mad' instructions
+
+mimadd
+Target Report Mask(IMADD)
+Use integer madd/msub instructions
+
+march=
+Target RejectNegative Joined Var(mips_arch_option) ToLower Enum(mips_arch_opt_value)
+-march=ISA Generate code for the given ISA
+
+mbranch-cost=
+Target RejectNegative Joined UInteger Var(mips_branch_cost)
+-mbranch-cost=COST Set the cost of branches to roughly COST instructions
+
+mbranch-likely
+Target Report Mask(BRANCHLIKELY)
+Use Branch Likely instructions, overriding the architecture default
+
+mflip-mips16
+Target Report Var(TARGET_FLIP_MIPS16)
+Switch on/off MIPS16 ASE on alternating functions for compiler testing
+
+mcheck-zero-division
+Target Report Mask(CHECK_ZERO_DIV)
+Trap on integer divide by zero
+
+mcode-readable=
+Target RejectNegative Joined Enum(mips_code_readable_setting) Var(mips_code_readable) Init(CODE_READABLE_YES)
+-mcode-readable=SETTING Specify when instructions are allowed to access code
+
+Enum
+Name(mips_code_readable_setting) Type(enum mips_code_readable_setting)
+Valid arguments to -mcode-readable=:
+
+EnumValue
+Enum(mips_code_readable_setting) String(yes) Value(CODE_READABLE_YES)
+
+EnumValue
+Enum(mips_code_readable_setting) String(pcrel) Value(CODE_READABLE_PCREL)
+
+EnumValue
+Enum(mips_code_readable_setting) String(no) Value(CODE_READABLE_NO)
+
+mdivide-breaks
+Target Report RejectNegative Mask(DIVIDE_BREAKS)
+Use branch-and-break sequences to check for integer divide by zero
+
+mdivide-traps
+Target Report RejectNegative InverseMask(DIVIDE_BREAKS, DIVIDE_TRAPS)
+Use trap instructions to check for integer divide by zero
+
+mdmx
+Target Report RejectNegative Var(TARGET_MDMX)
+Allow the use of MDMX instructions
+
+mdouble-float
+Target Report RejectNegative InverseMask(SINGLE_FLOAT, DOUBLE_FLOAT)
+Allow hardware floating-point instructions to cover both 32-bit and 64-bit operations
+
+mdsp
+Target Report Var(TARGET_DSP)
+Use MIPS-DSP instructions
+
+mdspr2
+Target Report Var(TARGET_DSPR2)
+Use MIPS-DSP REV 2 instructions
+
+mdebug
+Target Var(TARGET_DEBUG_MODE) Undocumented
+
+mdebugd
+Target Var(TARGET_DEBUG_D_MODE) Undocumented
+
+meb
+Target Report RejectNegative Mask(BIG_ENDIAN)
+Use big-endian byte order
+
+mel
+Target Report RejectNegative InverseMask(BIG_ENDIAN, LITTLE_ENDIAN)
+Use little-endian byte order
+
+membedded-data
+Target Report Var(TARGET_EMBEDDED_DATA)
+Use ROM instead of RAM
+
+meva
+Target Report Var(TARGET_EVA)
+Use Enhanced Virtual Addressing instructions
+
+mexplicit-relocs
+Target Report Mask(EXPLICIT_RELOCS)
+Use NewABI-style %reloc() assembly operators
+
+mextern-sdata
+Target Report Var(TARGET_EXTERN_SDATA) Init(1)
+Use -G for data that is not defined by the current object
+
+mfix-24k
+Target Report Var(TARGET_FIX_24K)
+Work around certain 24K errata
+
+mfix-r4000
+Target Report Mask(FIX_R4000)
+Work around certain R4000 errata
+
+mfix-r4400
+Target Report Mask(FIX_R4400)
+Work around certain R4400 errata
+
+mfix-rm7000
+Target Report Var(TARGET_FIX_RM7000)
+Work around certain RM7000 errata
+
+mfix-r10000
+Target Report Mask(FIX_R10000)
+Work around certain R10000 errata
+
+mfix-sb1
+Target Report Var(TARGET_FIX_SB1)
+Work around errata for early SB-1 revision 2 cores
+
+mfix-vr4120
+Target Report Var(TARGET_FIX_VR4120)
+Work around certain VR4120 errata
+
+mfix-vr4130
+Target Report Var(TARGET_FIX_VR4130)
+Work around VR4130 mflo/mfhi errata
+
+mfix4300
+Target Report Var(TARGET_4300_MUL_FIX)
+Work around an early 4300 hardware bug
+
+mfp-exceptions
+Target Report Var(TARGET_FP_EXCEPTIONS) Init(1)
+FP exceptions are enabled
+
+mfp32
+Target Report RejectNegative InverseMask(FLOAT64)
+Use 32-bit floating-point registers
+
+mfp64
+Target Report RejectNegative Mask(FLOAT64)
+Use 64-bit floating-point registers
+
+mflush-func=
+Target RejectNegative Joined Var(mips_cache_flush_func) Init(CACHE_FLUSH_FUNC)
+-mflush-func=FUNC Use FUNC to flush the cache before calling stack trampolines
+
+mfused-madd
+Target Report Var(TARGET_FUSED_MADD) Init(1)
+Generate floating-point multiply-add instructions
+
+mabs=
+Target RejectNegative Joined Enum(mips_ieee_754_value) Var(mips_abs) Init(MIPS_IEEE_754_DEFAULT)
+-mabs=MODE Select the IEEE 754 ABS/NEG instruction execution mode
+
+mnan=
+Target RejectNegative Joined Enum(mips_ieee_754_value) Var(mips_nan) Init(MIPS_IEEE_754_DEFAULT)
+-mnan=ENCODING Select the IEEE 754 NaN data encoding
+
+Enum
+Name(mips_ieee_754_value) Type(int)
+Known MIPS IEEE 754 settings (for use with the -mabs= and -mnan= options):
+
+EnumValue
+Enum(mips_ieee_754_value) String(2008) Value(MIPS_IEEE_754_2008)
+
+EnumValue
+Enum(mips_ieee_754_value) String(legacy) Value(MIPS_IEEE_754_LEGACY)
+
+mgp32
+Target Report RejectNegative InverseMask(64BIT)
+Use 32-bit general registers
+
+mgp64
+Target Report RejectNegative Mask(64BIT)
+Use 64-bit general registers
+
+mgpopt
+Target Report Var(TARGET_GPOPT) Init(1)
+Use GP-relative addressing to access small data
+
+mplt
+Target Report Var(TARGET_PLT)
+When generating -mabicalls code, allow executables to use PLTs and copy relocations
+
+mhard-float
+Target Report RejectNegative InverseMask(SOFT_FLOAT_ABI, HARD_FLOAT_ABI)
+Allow the use of hardware floating-point ABI and instructions
+
+minterlink-compressed
+Target Report Var(TARGET_INTERLINK_COMPRESSED) Init(0)
+Generate code that is link-compatible with MIPS16 and microMIPS code.
+
+minterlink-mips16
+Target Report Var(TARGET_INTERLINK_COMPRESSED) Init(0)
+An alias for minterlink-compressed provided for backward-compatibility.
+
+mips
+Target RejectNegative Joined ToLower Enum(mips_mips_opt_value) Var(mips_isa_option)
+-mipsN Generate code for ISA level N
+
+mips16
+Target Report RejectNegative Mask(MIPS16)
+Generate MIPS16 code
+
+mips3d
+Target Report RejectNegative Var(TARGET_MIPS3D)
+Use MIPS-3D instructions
+
+mllsc
+Target Report Mask(LLSC)
+Use ll, sc and sync instructions
+
+mlocal-sdata
+Target Report Var(TARGET_LOCAL_SDATA) Init(1)
+Use -G for object-local data
+
+mlong-calls
+Target Report Var(TARGET_LONG_CALLS)
+Use indirect calls
+
+mlong32
+Target Report RejectNegative InverseMask(LONG64, LONG32)
+Use a 32-bit long type
+
+mlong64
+Target Report RejectNegative Mask(LONG64)
+Use a 64-bit long type
+
+mmcount-ra-address
+Target Report Var(TARGET_MCOUNT_RA_ADDRESS)
+Pass the address of the ra save location to _mcount in $12
+
+mmemcpy
+Target Report Mask(MEMCPY)
+Don't optimize block moves
+
+mmicromips
+Target Report Mask(MICROMIPS)
+Use microMIPS instructions
+
+mmt
+Target Report Var(TARGET_MT)
+Allow the use of MT instructions
+
+mno-float
+Target Report RejectNegative Var(TARGET_NO_FLOAT) Condition(TARGET_SUPPORTS_NO_FLOAT)
+Prevent the use of all floating-point operations
+
+mmcu
+Target Report Var(TARGET_MCU)
+Use MCU instructions
+
+mno-flush-func
+Target RejectNegative
+Do not use a cache-flushing function before calling stack trampolines
+
+mno-mdmx
+Target Report RejectNegative Var(TARGET_MDMX, 0)
+Do not use MDMX instructions
+
+mno-mips16
+Target Report RejectNegative InverseMask(MIPS16)
+Generate normal-mode code
+
+mno-mips3d
+Target Report RejectNegative Var(TARGET_MIPS3D, 0)
+Do not use MIPS-3D instructions
+
+mpaired-single
+Target Report Mask(PAIRED_SINGLE_FLOAT)
+Use paired-single floating-point instructions
+
+mr10k-cache-barrier=
+Target Joined RejectNegative Enum(mips_r10k_cache_barrier_setting) Var(mips_r10k_cache_barrier) Init(R10K_CACHE_BARRIER_NONE)
+-mr10k-cache-barrier=SETTING Specify when r10k cache barriers should be inserted
+
+Enum
+Name(mips_r10k_cache_barrier_setting) Type(enum mips_r10k_cache_barrier_setting)
+Valid arguments to -mr10k-cache-barrier=:
+
+EnumValue
+Enum(mips_r10k_cache_barrier_setting) String(load-store) Value(R10K_CACHE_BARRIER_LOAD_STORE)
+
+EnumValue
+Enum(mips_r10k_cache_barrier_setting) String(store) Value(R10K_CACHE_BARRIER_STORE)
+
+EnumValue
+Enum(mips_r10k_cache_barrier_setting) String(none) Value(R10K_CACHE_BARRIER_NONE)
+
+mrelax-pic-calls
+Target Report Mask(RELAX_PIC_CALLS)
+Try to allow the linker to turn PIC calls into direct calls
+
+mshared
+Target Report Var(TARGET_SHARED) Init(1)
+When generating -mabicalls code, make the code suitable for use in shared libraries
+
+msingle-float
+Target Report RejectNegative Mask(SINGLE_FLOAT)
+Restrict the use of hardware floating-point instructions to 32-bit operations
+
+msmartmips
+Target Report Mask(SMARTMIPS)
+Use SmartMIPS instructions
+
+msoft-float
+Target Report RejectNegative Mask(SOFT_FLOAT_ABI)
+Prevent the use of all hardware floating-point instructions
+
+msplit-addresses
+Target Report Mask(SPLIT_ADDRESSES)
+Optimize lui/addiu address loads
+
+msym32
+Target Report Var(TARGET_SYM32)
+Assume all symbols have 32-bit values
+
+msynci
+Target Report Mask(SYNCI)
+Use synci instruction to invalidate i-cache
+
+mtune=
+Target RejectNegative Joined Var(mips_tune_option) ToLower Enum(mips_arch_opt_value)
+-mtune=PROCESSOR Optimize the output for PROCESSOR
+
+muninit-const-in-rodata
+Target Report Var(TARGET_UNINIT_CONST_IN_RODATA)
+Put uninitialized constants in ROM (needs -membedded-data)
+
+mvirt
+Target Report Var(TARGET_VIRT)
+Use Virtualization Application Specific instructions
+
+mvr4130-align
+Target Report Mask(VR4130_ALIGN)
+Perform VR4130-specific alignment optimizations
+
+mxgot
+Target Report Var(TARGET_XGOT)
+Lift restrictions on GOT size
+
+noasmopt
+Driver
diff --git a/gcc-4.9/gcc/config/mips/mti-elf.h b/gcc-4.9/gcc/config/mips/mti-elf.h
new file mode 100644
index 000000000..76d289eae
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mti-elf.h
@@ -0,0 +1,42 @@
+/* Target macros for mips*-mti-elf targets.
+ Copyright (C) 2012-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS \
+ /* Make sure a -mips option is present. This helps us to pick \
+ the right multilib, and also makes the later specs easier \
+ to write. */ \
+ MIPS_ISA_LEVEL_SPEC, \
+ \
+ /* Infer the default float setting from -march. */ \
+ MIPS_ARCH_FLOAT_SPEC, \
+ \
+ /* Infer the -msynci setting from -march if not explicitly set. */ \
+ MIPS_ISA_SYNCI_SPEC, \
+ \
+ /* If no ABI option is specified, infer one from the ISA level \
+ or -mgp setting. */ \
+ "%{!mabi=*: %{" MIPS_32BIT_OPTION_SPEC ": -mabi=32;: -mabi=n32}}", \
+ \
+ /* Make sure that an endian option is always present. This makes \
+ things like LINK_SPEC easier to write. */ \
+ "%{!EB:%{!EL:%(endian_spec)}}", \
+ \
+ /* Configuration-independent MIPS rules. */ \
+ BASE_DRIVER_SELF_SPECS
diff --git a/gcc-4.9/gcc/config/mips/mti-linux.h b/gcc-4.9/gcc/config/mips/mti-linux.h
new file mode 100644
index 000000000..db9896b40
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/mti-linux.h
@@ -0,0 +1,46 @@
+/* Target macros for mips*-mti-linux* targets.
+ Copyright (C) 2012-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This target is a multilib target, specify the sysroot paths. */
+#undef SYSROOT_SUFFIX_SPEC
+#define SYSROOT_SUFFIX_SPEC \
+ "%{mips32:/mips32}%{mips64:/mips64}%{mips64r2:/mips64r2}%{mips16:/mips16}%{mmicromips:/micromips}%{mabi=64:/64}%{mel|EL:/el}%{msoft-float:/sof}%{mfp64:/fp64}%{mnan=2008:/nan2008}"
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS \
+ /* Make sure a -mips option is present. This helps us to pick \
+ the right multilib, and also makes the later specs easier \
+ to write. */ \
+ MIPS_ISA_LEVEL_SPEC, \
+ \
+ /* Infer the default float setting from -march. */ \
+ MIPS_ARCH_FLOAT_SPEC, \
+ \
+ /* Infer the -msynci setting from -march if not explicitly set. */ \
+ MIPS_ISA_SYNCI_SPEC, \
+ \
+ /* If no ABI option is specified, infer one from the ISA level \
+ or -mgp setting. */ \
+ "%{!mabi=*: %{" MIPS_32BIT_OPTION_SPEC ": -mabi=32;: -mabi=n32}}", \
+ \
+ /* Base SPECs. */ \
+ BASE_DRIVER_SELF_SPECS \
+ \
+ /* Use the standard linux specs for everything else. */ \
+ LINUX64_DRIVER_SELF_SPECS
diff --git a/gcc-4.9/gcc/config/mips/n32-elf.h b/gcc-4.9/gcc/config/mips/n32-elf.h
new file mode 100644
index 000000000..cb0cbbf95
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/n32-elf.h
@@ -0,0 +1,35 @@
+/* Definitions of target machine for GNU compiler.
+ n32 for embedded systems.
+ Copyright (C) 2003-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Use standard ELF-style local labels (not '$' as on early Irix). */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+/* Use periods rather than dollar signs in special g++ assembler names. */
+#define NO_DOLLAR_IN_LABEL
+
+/* Force n32 to use 64-bit long doubles. */
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+#ifdef IN_LIBGCC2
+#undef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+#endif
diff --git a/gcc-4.9/gcc/config/mips/netbsd.h b/gcc-4.9/gcc/config/mips/netbsd.h
new file mode 100644
index 000000000..0313345fd
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/netbsd.h
@@ -0,0 +1,179 @@
+/* Definitions of target machine for GNU compiler, for MIPS NetBSD systems.
+ Copyright (C) 1993-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* Define default target values. */
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ NETBSD_OS_CPP_BUILTINS_ELF(); \
+ builtin_define ("__NO_LEADING_UNDERSCORES__"); \
+ builtin_define ("__GP_SUPPORT__"); \
+ if (TARGET_LONG64) \
+ builtin_define ("__LONG64"); \
+ \
+ if (TARGET_ABICALLS) \
+ builtin_define ("__ABICALLS__"); \
+ \
+ if (mips_abi == ABI_EABI) \
+ builtin_define ("__mips_eabi"); \
+ else if (mips_abi == ABI_N32) \
+ builtin_define ("__mips_n32"); \
+ else if (mips_abi == ABI_64) \
+ builtin_define ("__mips_n64"); \
+ else if (mips_abi == ABI_O64) \
+ builtin_define ("__mips_o64"); \
+ } \
+ while (0)
+
+/* The generic MIPS TARGET_CPU_CPP_BUILTINS are incorrect for NetBSD.
+ Specifically, they define too many namespace-invasive macros. Override
+ them here. Note this is structured for easy comparison to the version
+ in mips.h.
+
+ FIXME: This probably isn't the best solution. But in the absence
+ of something better, it will have to do, for now. */
+
+#undef TARGET_CPU_CPP_BUILTINS
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_assert ("cpu=mips"); \
+ builtin_define ("__mips__"); \
+ builtin_define ("_mips"); \
+ \
+ /* No _R3000 or _R4000. */ \
+ if (TARGET_64BIT) \
+ builtin_define ("__mips64"); \
+ \
+ if (TARGET_FLOAT64) \
+ builtin_define ("__mips_fpr=64"); \
+ else \
+ builtin_define ("__mips_fpr=32"); \
+ \
+ if (TARGET_MIPS16) \
+ builtin_define ("__mips16"); \
+ \
+ MIPS_CPP_SET_PROCESSOR ("_MIPS_ARCH", mips_arch_info); \
+ MIPS_CPP_SET_PROCESSOR ("_MIPS_TUNE", mips_tune_info); \
+ \
+ if (ISA_MIPS1) \
+ builtin_define ("__mips=1"); \
+ else if (ISA_MIPS2) \
+ builtin_define ("__mips=2"); \
+ else if (ISA_MIPS3) \
+ builtin_define ("__mips=3"); \
+ else if (ISA_MIPS4) \
+ builtin_define ("__mips=4"); \
+ else if (ISA_MIPS32) \
+ { \
+ builtin_define ("__mips=32"); \
+ builtin_define ("__mips_isa_rev=1"); \
+ } \
+ else if (ISA_MIPS32R2) \
+ { \
+ builtin_define ("__mips=32"); \
+ builtin_define ("__mips_isa_rev=2"); \
+ } \
+ else if (ISA_MIPS64) \
+ { \
+ builtin_define ("__mips=64"); \
+ builtin_define ("__mips_isa_rev=1"); \
+ } \
+ \
+ if (TARGET_HARD_FLOAT) \
+ builtin_define ("__mips_hard_float"); \
+ else if (TARGET_SOFT_FLOAT) \
+ builtin_define ("__mips_soft_float"); \
+ \
+ if (TARGET_SINGLE_FLOAT) \
+ builtin_define ("__mips_single_float"); \
+ \
+ if (TARGET_BIG_ENDIAN) \
+ builtin_define ("__MIPSEB__"); \
+ else \
+ builtin_define ("__MIPSEL__"); \
+ \
+ /* No language dialect defines. */ \
+ \
+ /* ABIs handled in TARGET_OS_CPP_BUILTINS. */ \
+ } \
+ while (0)
+
+
+/* Extra specs we need. */
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "netbsd_cpp_spec", NETBSD_CPP_SPEC }, \
+ { "netbsd_link_spec", NETBSD_LINK_SPEC_ELF }, \
+ { "netbsd_entry_point", NETBSD_ENTRY_POINT },
+
+/* Provide a SUBTARGET_CPP_SPEC appropriate for NetBSD. */
+
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "%(netbsd_cpp_spec)"
+
+/* Provide a LINK_SPEC appropriate for a NetBSD/mips target.
+ This is a copy of LINK_SPEC from <netbsd-elf.h> tweaked for
+ the MIPS target. */
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{EL:-m elf32lmip} \
+ %{EB:-m elf32bmip} \
+ %(endian_spec) \
+ %{G*} %{mips1} %{mips2} %{mips3} %{mips4} %{mips32} %{mips32r2} %{mips64} \
+ %(netbsd_link_spec)"
+
+#define NETBSD_ENTRY_POINT "__start"
+
+#undef SUBTARGET_ASM_SPEC
+#define SUBTARGET_ASM_SPEC \
+ "%{!mno-abicalls: \
+ %{!fno-PIC:%{!fno-pic:-KPIC}}}"
+
+
+/* -G is incompatible with -KPIC which is the default, so only allow objects
+ in the small data section if the user explicitly asks for it. */
+
+#undef MIPS_DEFAULT_GVALUE
+#define MIPS_DEFAULT_GVALUE 0
+
+
+#undef ASM_FINAL_SPEC
+#undef SET_ASM_OP
+
+
+/* NetBSD hasn't historically provided _flush_cache(), but rather
+ _cacheflush(), which takes the same arguments as the former. */
+#undef CACHE_FLUSH_FUNC
+#define CACHE_FLUSH_FUNC "_cacheflush"
+
+
+/* Make gcc agree with <machine/ansi.h> */
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+#undef WINT_TYPE
+#define WINT_TYPE "int"
diff --git a/gcc-4.9/gcc/config/mips/octeon.md b/gcc-4.9/gcc/config/mips/octeon.md
new file mode 100644
index 000000000..1d6251c40
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/octeon.md
@@ -0,0 +1,136 @@
+;; Octeon pipeline description.
+;; Copyright (C) 2008-2014 Free Software Foundation, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+;; Copyright (C) 2004, 2005, 2006 Cavium Networks.
+
+
+;; Octeon is a dual-issue processor that can issue all instructions on
+;; pipe0 and a subset on pipe1.
+
+(define_automaton "octeon_main, octeon_mult")
+
+(define_cpu_unit "octeon_pipe0" "octeon_main")
+(define_cpu_unit "octeon_pipe1" "octeon_main")
+(define_cpu_unit "octeon_mult" "octeon_mult")
+
+(define_insn_reservation "octeon_arith" 1
+ (and (eq_attr "cpu" "octeon,octeon2")
+ (eq_attr "type" "arith,const,logical,move,shift,signext,slt,nop"))
+ "octeon_pipe0 | octeon_pipe1")
+
+(define_insn_reservation "octeon_condmove" 2
+ (and (eq_attr "cpu" "octeon,octeon2")
+ (eq_attr "type" "condmove"))
+ "octeon_pipe0 | octeon_pipe1")
+
+(define_insn_reservation "octeon_load_o1" 2
+ (and (eq_attr "cpu" "octeon")
+ (eq_attr "type" "load,prefetch,mtc,mfc"))
+ "octeon_pipe0")
+
+(define_insn_reservation "octeon_load_o2" 3
+ (and (eq_attr "cpu" "octeon2")
+ (eq_attr "type" "load,prefetch"))
+ "octeon_pipe0")
+
+;; ??? memory-related cop0 reads are pipe0 with 3-cycle latency.
+;; Front-end-related ones are 1-cycle on pipe1. Assume front-end for now.
+(define_insn_reservation "octeon_cop_o2" 1
+ (and (eq_attr "cpu" "octeon2")
+ (eq_attr "type" "mtc,mfc"))
+ "octeon_pipe1")
+
+(define_insn_reservation "octeon_store" 1
+ (and (eq_attr "cpu" "octeon,octeon2")
+ (eq_attr "type" "store"))
+ "octeon_pipe0")
+
+(define_insn_reservation "octeon_brj_o1" 1
+ (and (eq_attr "cpu" "octeon")
+ (eq_attr "type" "branch,jump,call,trap"))
+ "octeon_pipe0")
+
+(define_insn_reservation "octeon_brj_o2" 2
+ (and (eq_attr "cpu" "octeon2")
+ (eq_attr "type" "branch,jump,call,trap"))
+ "octeon_pipe1")
+
+(define_insn_reservation "octeon_imul3_o1" 5
+ (and (eq_attr "cpu" "octeon")
+ (eq_attr "type" "imul3,pop,clz"))
+ "(octeon_pipe0 | octeon_pipe1) + octeon_mult")
+
+(define_insn_reservation "octeon_imul3_o2" 6
+ (and (eq_attr "cpu" "octeon2")
+ (eq_attr "type" "imul3,pop,clz"))
+ "octeon_pipe1 + octeon_mult")
+
+(define_insn_reservation "octeon_imul_o1" 2
+ (and (eq_attr "cpu" "octeon")
+ (eq_attr "type" "imul,mthi,mtlo"))
+ "(octeon_pipe0 | octeon_pipe1) + octeon_mult, octeon_mult")
+
+(define_insn_reservation "octeon_imul_o2" 1
+ (and (eq_attr "cpu" "octeon2")
+ (eq_attr "type" "imul,mthi,mtlo"))
+ "octeon_pipe1 + octeon_mult")
+
+(define_insn_reservation "octeon_mfhilo_o1" 5
+ (and (eq_attr "cpu" "octeon")
+ (eq_attr "type" "mfhi,mflo"))
+ "(octeon_pipe0 | octeon_pipe1) + octeon_mult")
+
+(define_insn_reservation "octeon_mfhilo_o2" 6
+ (and (eq_attr "cpu" "octeon2")
+ (eq_attr "type" "mfhi,mflo"))
+ "octeon_pipe1 + octeon_mult")
+
+(define_insn_reservation "octeon_imadd_o1" 4
+ (and (eq_attr "cpu" "octeon")
+ (eq_attr "type" "imadd"))
+ "(octeon_pipe0 | octeon_pipe1) + octeon_mult, octeon_mult*3")
+
+(define_insn_reservation "octeon_imadd_o2" 1
+ (and (eq_attr "cpu" "octeon2")
+ (eq_attr "type" "imadd"))
+ "octeon_pipe1 + octeon_mult")
+
+(define_insn_reservation "octeon_idiv_o1" 72
+ (and (eq_attr "cpu" "octeon")
+ (eq_attr "type" "idiv"))
+ "(octeon_pipe0 | octeon_pipe1) + octeon_mult, octeon_mult*71")
+
+(define_insn_reservation "octeon_idiv_o2_si" 18
+ (and (eq_attr "cpu" "octeon2")
+ (eq_attr "mode" "SI")
+ (eq_attr "type" "idiv"))
+ "octeon_pipe1 + octeon_mult, octeon_mult*17")
+
+(define_insn_reservation "octeon_idiv_o2_di" 35
+ (and (eq_attr "cpu" "octeon2")
+ (eq_attr "mode" "DI")
+ (eq_attr "type" "idiv"))
+ "octeon_pipe1 + octeon_mult, octeon_mult*34")
+
+;; Assume both pipes are needed for unknown and multiple-instruction
+;; patterns.
+
+(define_insn_reservation "octeon_unknown" 1
+ (and (eq_attr "cpu" "octeon,octeon2")
+ (eq_attr "type" "unknown,multi,atomic,syncloop"))
+ "octeon_pipe0 + octeon_pipe1")
diff --git a/gcc-4.9/gcc/config/mips/predicates.md b/gcc-4.9/gcc/config/mips/predicates.md
new file mode 100644
index 000000000..8ac8e0b6a
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/predicates.md
@@ -0,0 +1,494 @@
+;; Predicate definitions for MIPS.
+;; Copyright (C) 2004-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_predicate "const_uns_arith_operand"
+ (and (match_code "const_int")
+ (match_test "SMALL_OPERAND_UNSIGNED (INTVAL (op))")))
+
+(define_predicate "uns_arith_operand"
+ (ior (match_operand 0 "const_uns_arith_operand")
+ (match_operand 0 "register_operand")))
+
+(define_predicate "const_arith_operand"
+ (and (match_code "const_int")
+ (match_test "SMALL_OPERAND (INTVAL (op))")))
+
+(define_predicate "arith_operand"
+ (ior (match_operand 0 "const_arith_operand")
+ (match_operand 0 "register_operand")))
+
+(define_predicate "const_uimm6_operand"
+ (and (match_code "const_int")
+ (match_test "UIMM6_OPERAND (INTVAL (op))")))
+
+(define_predicate "const_imm10_operand"
+ (and (match_code "const_int")
+ (match_test "IMM10_OPERAND (INTVAL (op))")))
+
+(define_predicate "reg_imm10_operand"
+ (ior (match_operand 0 "const_imm10_operand")
+ (match_operand 0 "register_operand")))
+
+(define_predicate "sle_operand"
+ (and (match_code "const_int")
+ (match_test "SMALL_OPERAND (INTVAL (op) + 1)")))
+
+(define_predicate "sleu_operand"
+ (and (match_operand 0 "sle_operand")
+ (match_test "INTVAL (op) + 1 != 0")))
+
+(define_predicate "const_0_operand"
+ (and (match_code "const_int,const_double,const_vector")
+ (match_test "op == CONST0_RTX (GET_MODE (op))")))
+
+(define_predicate "reg_or_0_operand"
+ (ior (and (match_operand 0 "const_0_operand")
+ (not (match_test "TARGET_MIPS16")))
+ (match_operand 0 "register_operand")))
+
+(define_predicate "const_1_operand"
+ (and (match_code "const_int,const_double,const_vector")
+ (match_test "op == CONST1_RTX (GET_MODE (op))")))
+
+(define_predicate "reg_or_1_operand"
+ (ior (match_operand 0 "const_1_operand")
+ (match_operand 0 "register_operand")))
+
+;; This is used for indexing into vectors, and hence only accepts const_int.
+(define_predicate "const_0_or_1_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 1)")))
+
+(define_predicate "const_2_or_3_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 2, 3)")))
+
+(define_predicate "const_0_to_3_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 3)")))
+
+(define_predicate "qi_mask_operand"
+ (and (match_code "const_int")
+ (match_test "UINTVAL (op) == 0xff")))
+
+(define_predicate "hi_mask_operand"
+ (and (match_code "const_int")
+ (match_test "UINTVAL (op) == 0xffff")))
+
+(define_predicate "si_mask_operand"
+ (and (match_code "const_int")
+ (match_test "UINTVAL (op) == 0xffffffff")))
+
+(define_predicate "and_load_operand"
+ (ior (match_operand 0 "qi_mask_operand")
+ (match_operand 0 "hi_mask_operand")
+ (match_operand 0 "si_mask_operand")))
+
+(define_predicate "low_bitmask_operand"
+ (and (match_test "ISA_HAS_EXT_INS")
+ (match_code "const_int")
+ (match_test "low_bitmask_len (mode, INTVAL (op)) > 16")))
+
+(define_predicate "and_reg_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (not (match_test "TARGET_MIPS16"))
+ (match_operand 0 "const_uns_arith_operand"))
+ (match_operand 0 "low_bitmask_operand")
+ (match_operand 0 "si_mask_operand")))
+
+(define_predicate "and_operand"
+ (ior (match_operand 0 "and_load_operand")
+ (match_operand 0 "and_reg_operand")))
+
+(define_predicate "d_operand"
+ (and (match_code "reg")
+ (match_test "TARGET_MIPS16
+ ? M16_REG_P (REGNO (op))
+ : GP_REG_P (REGNO (op))")))
+
+(define_predicate "lwsp_swsp_operand"
+ (and (match_code "mem")
+ (match_test "lwsp_swsp_address_p (XEXP (op, 0), mode)")))
+
+(define_predicate "lw16_sw16_operand"
+ (and (match_code "mem")
+ (match_test "m16_based_address_p (XEXP (op, 0), mode, uw4_operand)")))
+
+(define_predicate "lhu16_sh16_operand"
+ (and (match_code "mem")
+ (match_test "m16_based_address_p (XEXP (op, 0), mode, uh4_operand)")))
+
+(define_predicate "lbu16_operand"
+ (and (match_code "mem")
+ (match_test "m16_based_address_p (XEXP (op, 0), mode, db4_operand)")))
+
+(define_predicate "sb16_operand"
+ (and (match_code "mem")
+ (match_test "m16_based_address_p (XEXP (op, 0), mode, ub4_operand)")))
+
+(define_predicate "db4_operand"
+ (and (match_code "const_int")
+ (match_test "mips_unsigned_immediate_p (INTVAL (op) + 1, 4, 0)")))
+
+(define_predicate "db7_operand"
+ (and (match_code "const_int")
+ (match_test "mips_unsigned_immediate_p (INTVAL (op) + 1, 7, 0)")))
+
+(define_predicate "db8_operand"
+ (and (match_code "const_int")
+ (match_test "mips_unsigned_immediate_p (INTVAL (op) + 1, 8, 0)")))
+
+(define_predicate "ib3_operand"
+ (and (match_code "const_int")
+ (match_test "mips_unsigned_immediate_p (INTVAL (op) - 1, 3, 0)")))
+
+(define_predicate "sb4_operand"
+ (and (match_code "const_int")
+ (match_test "mips_signed_immediate_p (INTVAL (op), 4, 0)")))
+
+(define_predicate "sb5_operand"
+ (and (match_code "const_int")
+ (match_test "mips_signed_immediate_p (INTVAL (op), 5, 0)")))
+
+(define_predicate "sb8_operand"
+ (and (match_code "const_int")
+ (match_test "mips_signed_immediate_p (INTVAL (op), 8, 0)")))
+
+(define_predicate "sd8_operand"
+ (and (match_code "const_int")
+ (match_test "mips_signed_immediate_p (INTVAL (op), 8, 3)")))
+
+(define_predicate "ub4_operand"
+ (and (match_code "const_int")
+ (match_test "mips_unsigned_immediate_p (INTVAL (op), 4, 0)")))
+
+(define_predicate "ub8_operand"
+ (and (match_code "const_int")
+ (match_test "mips_unsigned_immediate_p (INTVAL (op), 8, 0)")))
+
+(define_predicate "uh4_operand"
+ (and (match_code "const_int")
+ (match_test "mips_unsigned_immediate_p (INTVAL (op), 4, 1)")))
+
+(define_predicate "uw4_operand"
+ (and (match_code "const_int")
+ (match_test "mips_unsigned_immediate_p (INTVAL (op), 4, 2)")))
+
+(define_predicate "uw5_operand"
+ (and (match_code "const_int")
+ (match_test "mips_unsigned_immediate_p (INTVAL (op), 5, 2)")))
+
+(define_predicate "uw6_operand"
+ (and (match_code "const_int")
+ (match_test "mips_unsigned_immediate_p (INTVAL (op), 6, 2)")))
+
+(define_predicate "uw8_operand"
+ (and (match_code "const_int")
+ (match_test "mips_unsigned_immediate_p (INTVAL (op), 8, 2)")))
+
+(define_predicate "addiur2_operand"
+ (and (match_code "const_int")
+ (ior (match_test "INTVAL (op) == -1")
+ (match_test "INTVAL (op) == 1")
+ (match_test "INTVAL (op) == 4")
+ (match_test "INTVAL (op) == 8")
+ (match_test "INTVAL (op) == 12")
+ (match_test "INTVAL (op) == 16")
+ (match_test "INTVAL (op) == 20")
+ (match_test "INTVAL (op) == 24"))))
+
+(define_predicate "addiusp_operand"
+ (and (match_code "const_int")
+ (ior (match_test "(IN_RANGE (INTVAL (op), 2, 257))")
+ (match_test "(IN_RANGE (INTVAL (op), -258, -3))"))))
+
+(define_predicate "andi16_operand"
+ (and (match_code "const_int")
+ (ior (match_test "IN_RANGE (INTVAL (op), 1, 4)")
+ (match_test "IN_RANGE (INTVAL (op), 7, 8)")
+ (match_test "IN_RANGE (INTVAL (op), 15, 16)")
+ (match_test "IN_RANGE (INTVAL (op), 31, 32)")
+ (match_test "IN_RANGE (INTVAL (op), 63, 64)")
+ (match_test "INTVAL (op) == 255")
+ (match_test "INTVAL (op) == 32768")
+ (match_test "INTVAL (op) == 65535"))))
+
+(define_predicate "movep_src_register"
+ (and (match_code "reg")
+ (ior (match_test ("IN_RANGE (REGNO (op), 2, 3)"))
+ (match_test ("IN_RANGE (REGNO (op), 16, 20)")))))
+
+(define_predicate "movep_src_operand"
+ (ior (match_operand 0 "const_0_operand")
+ (match_operand 0 "movep_src_register")))
+
+(define_predicate "lo_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == LO_REGNUM")))
+
+(define_predicate "hilo_operand"
+ (and (match_code "reg")
+ (match_test "MD_REG_P (REGNO (op))")))
+
+(define_predicate "fcc_reload_operand"
+ (and (match_code "reg,subreg")
+ (match_test "ST_REG_P (true_regnum (op))")))
+
+(define_predicate "muldiv_target_operand"
+ (if_then_else (match_test "TARGET_MIPS16")
+ (match_operand 0 "hilo_operand")
+ (match_operand 0 "register_operand")))
+
+(define_predicate "const_call_insn_operand"
+ (match_code "const,symbol_ref,label_ref")
+{
+ enum mips_symbol_type symbol_type;
+
+ if (!mips_symbolic_constant_p (op, SYMBOL_CONTEXT_CALL, &symbol_type))
+ return false;
+
+ switch (symbol_type)
+ {
+ case SYMBOL_ABSOLUTE:
+ /* We can only use direct calls if we're sure that the target
+ function does not need $25 to be valid on entry. */
+ if (mips_use_pic_fn_addr_reg_p (op))
+ return false;
+
+ /* If -mlong-calls or if this function has an explicit long_call
+ attribute, we must use register addressing. The
+ SYMBOL_FLAG_LONG_CALL bit is set by mips_encode_section_info. */
+ return !(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LONG_CALL_P (op));
+
+ case SYMBOL_GOT_DISP:
+ /* Without explicit relocs, there is no special syntax for
+ loading the address of a call destination into a register.
+ Using "la $25,foo; jal $25" would prevent the lazy binding
+ of "foo", so keep the address of global symbols with the
+ jal macro. */
+ return !TARGET_EXPLICIT_RELOCS;
+
+ default:
+ return false;
+ }
+})
+
+(define_predicate "call_insn_operand"
+ (ior (match_operand 0 "const_call_insn_operand")
+ (match_operand 0 "register_operand")))
+
+;; A legitimate CONST_INT operand that takes more than one instruction
+;; to load.
+(define_predicate "splittable_const_int_operand"
+ (match_code "const_int")
+{
+ /* When generating mips16 code, TARGET_LEGITIMATE_CONSTANT_P rejects
+ CONST_INTs that can't be loaded using simple insns. */
+ if (TARGET_MIPS16)
+ return false;
+
+ /* Don't handle multi-word moves this way; we don't want to introduce
+ the individual word-mode moves until after reload. */
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return false;
+
+ /* Otherwise check whether the constant can be loaded in a single
+ instruction. */
+ return !LUI_INT (op) && !SMALL_INT (op) && !SMALL_INT_UNSIGNED (op);
+})
+
+(define_predicate "move_operand"
+ ;; Allow HI and LO to be used as the source of a MIPS16 move.
+ (ior (match_operand 0 "general_operand")
+ (match_operand 0 "hilo_operand"))
+{
+ enum mips_symbol_type symbol_type;
+
+ /* The thinking here is as follows:
+
+ (1) The move expanders should split complex load sequences into
+ individual instructions. Those individual instructions can
+ then be optimized by all rtl passes.
+
+ (2) The target of pre-reload load sequences should not be used
+ to store temporary results. If the target register is only
+ assigned one value, reload can rematerialize that value
+ on demand, rather than spill it to the stack.
+
+ (3) If we allowed pre-reload passes like combine and cse to recreate
+ complex load sequences, we would want to be able to split the
+ sequences before reload as well, so that the pre-reload scheduler
+ can see the individual instructions. This falls foul of (2);
+ the splitter would be forced to reuse the target register for
+ intermediate results.
+
+ (4) We want to define complex load splitters for combine. These
+ splitters can request a temporary scratch register, which avoids
+ the problem in (2). They allow things like:
+
+ (set (reg T1) (high SYM))
+ (set (reg T2) (low (reg T1) SYM))
+ (set (reg X) (plus (reg T2) (const_int OFFSET)))
+
+ to be combined into:
+
+ (set (reg T3) (high SYM+OFFSET))
+ (set (reg X) (lo_sum (reg T3) SYM+OFFSET))
+
+ if T2 is only used this once. */
+ switch (GET_CODE (op))
+ {
+ case CONST_INT:
+ return !splittable_const_int_operand (op, mode);
+
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ if (CONST_GP_P (op))
+ return true;
+ return (mips_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &symbol_type)
+ && !mips_split_p[symbol_type]);
+
+ case HIGH:
+ op = XEXP (op, 0);
+ return (mips_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &symbol_type)
+ && !mips_split_hi_p[symbol_type]);
+
+ default:
+ return true;
+ }
+})
+
+(define_predicate "cprestore_save_slot_operand"
+ (and (match_code "mem")
+ (match_test "mips_cprestore_address_p (XEXP (op, 0), false)")))
+
+(define_predicate "cprestore_load_slot_operand"
+ (and (match_code "mem")
+ (match_test "mips_cprestore_address_p (XEXP (op, 0), true)")))
+
+(define_predicate "consttable_operand"
+ (match_test "CONSTANT_P (op)"))
+
+(define_predicate "symbolic_operand"
+ (match_code "const,symbol_ref,label_ref")
+{
+ enum mips_symbol_type type;
+ return mips_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &type);
+})
+
+(define_predicate "absolute_symbolic_operand"
+ (match_code "const,symbol_ref,label_ref")
+{
+ enum mips_symbol_type type;
+ return (mips_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &type)
+ && type == SYMBOL_ABSOLUTE);
+})
+
+(define_predicate "symbolic_operand_with_high"
+ (match_code "const,symbol_ref,label_ref")
+{
+ enum mips_symbol_type type;
+ return (mips_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &type)
+ && mips_hi_relocs[(int) type]);
+})
+
+(define_predicate "force_to_mem_operand"
+ (match_code "const,symbol_ref,label_ref")
+{
+ enum mips_symbol_type symbol_type;
+ return (mips_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &symbol_type)
+ && mips_use_pcrel_pool_p[(int) symbol_type]);
+})
+
+(define_predicate "got_disp_operand"
+ (match_code "const,symbol_ref,label_ref")
+{
+ enum mips_symbol_type type;
+ return (mips_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &type)
+ && type == SYMBOL_GOT_DISP);
+})
+
+(define_predicate "got_page_ofst_operand"
+ (match_code "const,symbol_ref,label_ref")
+{
+ enum mips_symbol_type type;
+ return (mips_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &type)
+ && type == SYMBOL_GOT_PAGE_OFST);
+})
+
+(define_predicate "tls_reloc_operand"
+ (match_code "const,symbol_ref,label_ref")
+{
+ enum mips_symbol_type type;
+ return (mips_symbolic_constant_p (op, SYMBOL_CONTEXT_LEA, &type)
+ && (type == SYMBOL_DTPREL || type == SYMBOL_TPREL));
+})
+
+(define_predicate "symbol_ref_operand"
+ (match_code "symbol_ref"))
+
+(define_predicate "stack_operand"
+ (and (match_code "mem")
+ (match_test "mips_stack_address_p (XEXP (op, 0), GET_MODE (op))")))
+
+(define_predicate "macc_msac_operand"
+ (ior (and (match_code "plus") (match_test "ISA_HAS_MACC"))
+ (and (match_code "minus") (match_test "ISA_HAS_MSAC")))
+{
+ rtx mult = XEXP (op, GET_CODE (op) == PLUS ? 0 : 1);
+ rtx accum = XEXP (op, GET_CODE (op) == PLUS ? 1 : 0);
+ return (GET_CODE (mult) == MULT
+ && REG_P (XEXP (mult, 0))
+ && REG_P (XEXP (mult, 1))
+ && REG_P (accum));
+})
+
+
+(define_predicate "equality_operator"
+ (match_code "eq,ne"))
+
+(define_predicate "extend_operator"
+ (match_code "zero_extend,sign_extend"))
+
+(define_predicate "trap_comparison_operator"
+ (match_code "eq,ne,lt,ltu,ge,geu"))
+
+(define_predicate "order_operator"
+ (match_code "lt,ltu,le,leu,ge,geu,gt,gtu"))
+
+;; For NE, cstore uses sltu instructions in which the first operand is $0.
+;; This isn't possible in mips16 code.
+
+(define_predicate "mips_cstore_operator"
+ (ior (match_code "eq,gt,gtu,ge,geu,lt,ltu,le,leu")
+ (and (match_code "ne") (not (match_test "TARGET_MIPS16")))))
+
+(define_predicate "small_data_pattern"
+ (and (match_code "set,parallel,unspec,unspec_volatile,prefetch")
+ (match_test "mips_small_data_pattern_p (op)")))
+
+(define_predicate "mem_noofs_operand"
+ (and (match_code "mem")
+ (match_code "reg" "0")))
+
+;; Return 1 if the operand is in non-volatile memory.
+(define_predicate "non_volatile_mem_operand"
+ (and (match_operand 0 "memory_operand")
+ (not (match_test "MEM_VOLATILE_P (op)"))))
diff --git a/gcc-4.9/gcc/config/mips/r3900.h b/gcc-4.9/gcc/config/mips/r3900.h
new file mode 100644
index 000000000..d5b191264
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/r3900.h
@@ -0,0 +1,39 @@
+/* Definitions of MIPS sub target machine for GNU compiler.
+ Toshiba r3900. You should include mips.h after this.
+
+ Copyright (C) 1989-2014 Free Software Foundation, Inc.
+ Contributed by Gavin Koch (gavin@cygnus.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#undef MIPS_CPU_STRING_DEFAULT
+#define MIPS_CPU_STRING_DEFAULT "r3900"
+#define MIPS_ISA_DEFAULT 1
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { MULTILIB_ENDIAN_DEFAULT, "msoft-float" }
+
+/* We use the MIPS EABI by default. */
+#undef MIPS_ABI_DEFAULT
+#define MIPS_ABI_DEFAULT ABI_EABI
+
+/* By default (if not mips-something-else) produce code for the r3900 */
+#undef SUBTARGET_CC1_SPEC
+#define SUBTARGET_CC1_SPEC "\
+%{mhard-float:%e-mhard-float not supported} \
+%{msingle-float:%{msoft-float: \
+ %e-msingle-float and -msoft-float cannot both be specified}}"
diff --git a/gcc-4.9/gcc/config/mips/rtems.h b/gcc-4.9/gcc/config/mips/rtems.h
new file mode 100644
index 000000000..09b46b875
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/rtems.h
@@ -0,0 +1,34 @@
+/* Definitions for rtems targeting a MIPS using ELF.
+ Copyright (C) 1996-2014 Free Software Foundation, Inc.
+ Contributed by Joel Sherrill (joel@OARcorp.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Specify predefined symbols in preprocessor. */
+
+#define TARGET_OS_CPP_BUILTINS() \
+do { \
+ builtin_define ("__rtems__"); \
+ builtin_define ("__USE_INIT_FINI__"); \
+ builtin_assert ("system=rtems"); \
+} while (0)
+
+/* No sdata.
+ * The RTEMS BSPs expect -G0
+ */
+#undef MIPS_DEFAULT_GVALUE
+#define MIPS_DEFAULT_GVALUE 0
diff --git a/gcc-4.9/gcc/config/mips/sb1.md b/gcc-4.9/gcc/config/mips/sb1.md
new file mode 100644
index 000000000..311300e50
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/sb1.md
@@ -0,0 +1,573 @@
+;; Copyright (C) 2004-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+;;
+;; DFA-based pipeline description for Broadcom SB-1
+;;
+
+;; The Broadcom SB-1 core is 4-way superscalar, in-order. It has 2 load/store
+;; pipes (one of which can support some ALU operations), 2 alu pipes, 2 FP
+;; pipes, and 1 MDMX pipes. It can issue 2 ls insns and 2 exe/fpu/mdmx insns
+;; each cycle.
+
+;; We model the 4-way issue by ordering unit choices. The possible choices are
+;; {ex1,fp1}|{ex0,fp0}|ls1|ls0. Instructions issue to the first eligible unit
+;; in the list in most cases. Non-indexed load/stores issue to ls0 first.
+;; simple alu operations issue to ls1 if it is still available, and their
+;; operands are ready (no co-issue with loads), otherwise to the first
+;; available ex unit.
+
+;; When exceptions are enabled, can only issue FP insns to fp1. This is
+;; to ensure that instructions complete in order. The -mfp-exceptions option
+;; can be used to specify whether the system has FP exceptions enabled or not.
+
+;; In 32-bit mode, dependent FP can't co-issue with load, and only one FP exe
+;; insn can issue per cycle (fp1).
+
+;; The A1 MDMX pipe is separate from the FP pipes, but uses the same register
+;; file. As a result, once an MDMX insn is issued, no FP insns can be issued
+;; for 3 cycles. When an FP insn is issued, no MDMX insn can be issued for
+;; 5 cycles. This is currently not handled because there is no MDMX insn
+;; support as yet.
+
+;;
+;; We use two automata. sb1_cpu_div is for the integer divides, which are
+;; not pipelined. sb1_cpu is for everything else.
+;;
+(define_automaton "sb1_cpu, sb1_cpu_div")
+
+;; Load/store function units.
+(define_cpu_unit "sb1_ls0" "sb1_cpu")
+(define_cpu_unit "sb1_ls1" "sb1_cpu")
+
+;; CPU function units.
+(define_cpu_unit "sb1_ex0" "sb1_cpu")
+(define_cpu_unit "sb1_ex1" "sb1_cpu")
+
+;; The divide unit is not pipelined, and blocks hi/lo reads and writes.
+(define_cpu_unit "sb1_div" "sb1_cpu_div")
+;; DMULT block any multiply from issuing in the next cycle.
+(define_cpu_unit "sb1_mul" "sb1_cpu")
+
+;; Floating-point units.
+(define_cpu_unit "sb1_fp0" "sb1_cpu")
+(define_cpu_unit "sb1_fp1" "sb1_cpu")
+
+;; Can only issue to one of the ex and fp pipes at a time.
+(exclusion_set "sb1_ex0" "sb1_fp0")
+(exclusion_set "sb1_ex1" "sb1_fp1")
+
+;; Define an SB-1 specific attribute to simplify some FP descriptions.
+;; We can use 2 FP pipes only if we have 64-bit FP code, and exceptions are
+;; disabled.
+
+(define_attr "sb1_fp_pipes" "one,two"
+ (cond [(and (match_test "TARGET_FLOAT64")
+ (not (match_test "TARGET_FP_EXCEPTIONS")))
+ (const_string "two")]
+ (const_string "one")))
+
+;; Define reservations for common combinations.
+
+;; For long cycle operations, the FPU has a 4 cycle pipeline that repeats,
+;; effectively re-issuing the operation every 4 cycles. This means that we
+;; can have at most 4 long-cycle operations per pipe.
+
+;; ??? The fdiv operations should be e.g.
+;; sb1_fp1_4cycles*7" | "sb1_fp0_4cycle*7
+;; but the DFA is too large when we do that. Perhaps have to use scheduler
+;; hooks here.
+
+;; ??? Try limiting scheduler to 2 long latency operations, and see if this
+;; results in a usable DFA, and whether it helps code performance.
+
+;;(define_reservation "sb1_fp0_4cycles" "sb1_fp0, nothing*3")
+;;(define_reservation "sb1_fp1_4cycles" "sb1_fp1, nothing*3")
+
+;;
+;; The ordering of the instruction-execution-path/resource-usage
+;; descriptions (also known as reservation RTL) is roughly ordered
+;; based on the define attribute RTL for the "type" classification.
+;; When modifying, remember that the first test that matches is the
+;; reservation used!
+;;
+
+(define_insn_reservation "ir_sb1_unknown" 1
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (eq_attr "type" "unknown,multi,atomic,syncloop"))
+ "sb1_ls0+sb1_ls1+sb1_ex0+sb1_ex1+sb1_fp0+sb1_fp1")
+
+;; predicted taken branch causes 2 cycle ifetch bubble. predicted not
+;; taken branch causes 0 cycle ifetch bubble. mispredicted branch causes 8
+;; cycle ifetch bubble. We assume all branches predicted not taken.
+
+;; ??? This assumption that branches are predicated not taken should be
+;; investigated. Maybe using 2 here will give better results.
+
+(define_insn_reservation "ir_sb1_branch" 0
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (eq_attr "type" "branch,jump,call"))
+ "sb1_ex0")
+
+;; ??? This is 1 cycle for ldl/ldr to ldl/ldr when they use the same data
+;; register as destination.
+
+;; ??? SB-1 can co-issue a load with a dependent arith insn if it executes on
+;; an EX unit. Can not co-issue if the dependent insn executes on an LS unit.
+;; SB-1A can always co-issue here.
+
+;; A load normally has a latency of zero cycles. In some cases, dependent
+;; insns can be issued in the same cycle. However, a value of 1 gives
+;; better performance in empirical testing.
+
+(define_insn_reservation "ir_sb1_load" 1
+ (and (eq_attr "cpu" "sb1")
+ (eq_attr "type" "load,prefetch"))
+ "sb1_ls0 | sb1_ls1")
+
+(define_insn_reservation "ir_sb1a_load" 0
+ (and (eq_attr "cpu" "sb1a")
+ (eq_attr "type" "load,prefetch"))
+ "sb1_ls0 | sb1_ls1")
+
+;; Can not co-issue fpload with fp exe when in 32-bit mode.
+
+(define_insn_reservation "ir_sb1_fpload" 0
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fpload")
+ (match_test "TARGET_FLOAT64")))
+ "sb1_ls0 | sb1_ls1")
+
+(define_insn_reservation "ir_sb1_fpload_32bitfp" 1
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fpload")
+ (not (match_test "TARGET_FLOAT64"))))
+ "sb1_ls0 | sb1_ls1")
+
+;; Indexed loads can only execute on LS1 pipe.
+
+(define_insn_reservation "ir_sb1_fpidxload" 0
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fpidxload")
+ (match_test "TARGET_FLOAT64")))
+ "sb1_ls1")
+
+(define_insn_reservation "ir_sb1_fpidxload_32bitfp" 1
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fpidxload")
+ (not (match_test "TARGET_FLOAT64"))))
+ "sb1_ls1")
+
+;; prefx can only execute on the ls1 pipe.
+
+(define_insn_reservation "ir_sb1_prefetchx" 0
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (eq_attr "type" "prefetchx"))
+ "sb1_ls1")
+
+;; ??? There is a 4.5 cycle latency if a store is followed by a load, and
+;; there is a RAW dependency.
+
+(define_insn_reservation "ir_sb1_store" 1
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (eq_attr "type" "store"))
+ "sb1_ls0+sb1_ex1 | sb1_ls0+sb1_ex0 | sb1_ls1+sb1_ex1 | sb1_ls1+sb1_ex0")
+
+(define_insn_reservation "ir_sb1_fpstore" 1
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (eq_attr "type" "fpstore"))
+ "sb1_ls0+sb1_fp1 | sb1_ls0+sb1_fp0 | sb1_ls1+sb1_fp1 | sb1_ls1+sb1_fp0")
+
+;; Indexed stores can only execute on LS1 pipe.
+
+(define_insn_reservation "ir_sb1_fpidxstore" 1
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (eq_attr "type" "fpidxstore"))
+ "sb1_ls1+sb1_fp1 | sb1_ls1+sb1_fp0")
+
+;; Load latencies are 3 cycles for one load to another load or store (address
+;; only). This is 0 cycles for one load to a store using it as the data
+;; written.
+
+;; This assumes that if a load is dependent on a previous insn, then it must
+;; be an address dependence.
+
+(define_bypass 3
+ "ir_sb1_load,ir_sb1a_load,ir_sb1_fpload,ir_sb1_fpload_32bitfp,
+ ir_sb1_fpidxload,ir_sb1_fpidxload_32bitfp"
+ "ir_sb1_load,ir_sb1a_load,ir_sb1_fpload,ir_sb1_fpload_32bitfp,
+ ir_sb1_fpidxload,ir_sb1_fpidxload_32bitfp,ir_sb1_prefetchx")
+
+(define_bypass 3
+ "ir_sb1_load,ir_sb1a_load,ir_sb1_fpload,ir_sb1_fpload_32bitfp,
+ ir_sb1_fpidxload,ir_sb1_fpidxload_32bitfp"
+ "ir_sb1_store,ir_sb1_fpstore,ir_sb1_fpidxstore"
+ "mips_store_data_bypass_p")
+
+;; On SB-1, simple alu instructions can execute on the LS1 unit.
+
+;; ??? A simple alu insn issued on an LS unit has 0 cycle latency to an EX
+;; insn, to a store (for data), and to an xfer insn. It has 1 cycle latency to
+;; another LS insn (excluding store data). A simple alu insn issued on an EX
+;; unit has a latency of 5 cycles when the results goes to a LS unit (excluding
+;; store data), otherwise a latency of 1 cycle.
+
+;; ??? We cannot handle latencies properly for simple alu instructions
+;; within the DFA pipeline model. Latencies can be defined only from one
+;; insn reservation to another. We can't make them depend on which function
+;; unit was used. This isn't a DFA flaw. There is a conflict here, as we
+;; need to know the latency before we can determine which unit will be
+;; available, but we need to know which unit it is issued to before we can
+;; compute the latency. Perhaps this can be handled via scheduler hooks.
+;; This needs to be investigated.
+
+;; ??? Optimal scheduling taking the LS units into account seems to require
+;; a pre-scheduling pass. We need to determine which instructions feed results
+;; into store/load addresses, and thus benefit most from being issued to the
+;; LS unit. Also, we need to prune the list to ensure we don't overschedule
+;; insns to the LS unit, and that we don't conflict with insns that need LS1
+;; such as indexed loads. We then need to emit nops to ensure that simple
+;; alu instructions that are not supposed to be scheduled to LS1 don't
+;; accidentally end up there because LS1 is free when they are issued. This
+;; will be a lot of work, and it isn't clear how useful it will be.
+
+;; Empirical testing shows that 2 gives the best result.
+
+(define_insn_reservation "ir_sb1_simple_alu" 2
+ (and (eq_attr "cpu" "sb1")
+ (eq_attr "type" "const,arith,logical,move,signext"))
+ "sb1_ls1 | sb1_ex1 | sb1_ex0")
+
+;; On SB-1A, simple alu instructions can not execute on the LS1 unit, and we
+;; have none of the above problems.
+
+(define_insn_reservation "ir_sb1a_simple_alu" 1
+ (and (eq_attr "cpu" "sb1a")
+ (eq_attr "type" "const,arith,logical,move,signext"))
+ "sb1_ex1 | sb1_ex0")
+
+;; ??? condmove also includes some FP instructions that execute on the FP
+;; units. This needs to be clarified.
+
+(define_insn_reservation "ir_sb1_alu" 1
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (eq_attr "type" "condmove,nop,shift"))
+ "sb1_ex1 | sb1_ex0")
+
+;; These are type arith/darith that only execute on the EX0 unit.
+
+(define_insn_reservation "ir_sb1_alu_0" 1
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (eq_attr "type" "slt,clz,trap"))
+ "sb1_ex0")
+
+;; An alu insn issued on an EX unit has a latency of 5 cycles when the
+;; result goes to a LS unit (excluding store data).
+
+;; This assumes that if a load is dependent on a previous insn, then it must
+;; be an address dependence.
+
+(define_bypass 5
+ "ir_sb1a_simple_alu,ir_sb1_alu,ir_sb1_alu_0,ir_sb1_mfhi,ir_sb1_mflo"
+ "ir_sb1_load,ir_sb1a_load,ir_sb1_fpload,ir_sb1_fpload_32bitfp,
+ ir_sb1_fpidxload,ir_sb1_fpidxload_32bitfp,ir_sb1_prefetchx")
+
+(define_bypass 5
+ "ir_sb1a_simple_alu,ir_sb1_alu,ir_sb1_alu_0,ir_sb1_mfhi,ir_sb1_mflo"
+ "ir_sb1_store,ir_sb1_fpstore,ir_sb1_fpidxstore"
+ "mips_store_data_bypass_p")
+
+;; mf{hi,lo} is 1 cycle.
+
+(define_insn_reservation "ir_sb1_mfhi" 1
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (eq_attr "type" "mfhi"))
+ "sb1_ex1")
+
+(define_insn_reservation "ir_sb1_mflo" 1
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (eq_attr "type" "mflo"))
+ "sb1_ex1")
+
+;; mt{hi,lo} to mul/div is 4 cycles.
+
+(define_insn_reservation "ir_sb1_mthilo" 4
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (eq_attr "type" "mthi,mtlo"))
+ "sb1_ex1")
+
+;; mt{hi,lo} to mf{hi,lo} is 3 cycles.
+
+(define_bypass 3 "ir_sb1_mthilo" "ir_sb1_mfhi,ir_sb1_mflo")
+
+;; multiply latency to an EX operation is 3 cycles.
+
+;; ??? Should check whether we need to make multiply conflict with moves
+;; to/from hilo registers.
+
+(define_insn_reservation "ir_sb1_mulsi" 3
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "SI")))
+ "sb1_ex1+sb1_mul")
+
+;; muldi to mfhi is 4 cycles.
+;; Blocks any other multiply insn issue for 1 cycle.
+
+(define_insn_reservation "ir_sb1_muldi" 4
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "imul,imul3")
+ (eq_attr "mode" "DI")))
+ "sb1_ex1+sb1_mul, sb1_mul")
+
+;; muldi to mflo is 3 cycles.
+
+(define_bypass 3 "ir_sb1_muldi" "ir_sb1_mflo")
+
+;; mul latency is 7 cycles if the result is used by any LS insn.
+
+;; This assumes that if a load is dependent on a previous insn, then it must
+;; be an address dependence.
+
+(define_bypass 7
+ "ir_sb1_mulsi,ir_sb1_muldi"
+ "ir_sb1_load,ir_sb1a_load,ir_sb1_fpload,ir_sb1_fpload_32bitfp,
+ ir_sb1_fpidxload,ir_sb1_fpidxload_32bitfp,ir_sb1_prefetchx")
+
+(define_bypass 7
+ "ir_sb1_mulsi,ir_sb1_muldi"
+ "ir_sb1_store,ir_sb1_fpstore,ir_sb1_fpidxstore"
+ "mips_store_data_bypass_p")
+
+;; The divide unit is not pipelined. Divide busy is asserted in the 4th
+;; cycle, and then deasserted on the latency cycle. So only one divide at
+;; a time, but the first/last 4 cycles can overlap.
+
+;; ??? All divides block writes to hi/lo regs. hi/lo regs are written 4 cycles
+;; after the latency cycle for divides (e.g. 40/72). dmult writes lo in
+;; cycle 7, and hi in cycle 8. All other insns write hi/lo regs in cycle 7.
+;; Default for output dependencies is the difference in latencies, which is
+;; only 1 cycle off here, e.g. div to mtlo stalls for 32 cycles, but should
+;; stall for 33 cycles. This does not seem significant enough to worry about.
+
+(define_insn_reservation "ir_sb1_divsi" 36
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "SI")))
+ "sb1_ex1, nothing*3, sb1_div*32")
+
+(define_insn_reservation "ir_sb1_divdi" 68
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "sb1_ex1, nothing*3, sb1_div*64")
+
+(define_insn_reservation "ir_sb1_fpu_2pipes" 4
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fmove,fadd,fmul,fabs,fneg,fcvt,frdiv1,frsqrt1")
+ (eq_attr "sb1_fp_pipes" "two")))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_fpu_1pipe" 4
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fmove,fadd,fmul,fabs,fneg,fcvt,frdiv1,frsqrt1")
+ (eq_attr "sb1_fp_pipes" "one")))
+ "sb1_fp1")
+
+(define_insn_reservation "ir_sb1_fpu_step2_2pipes" 8
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "frdiv2,frsqrt2")
+ (eq_attr "sb1_fp_pipes" "two")))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_fpu_step2_1pipe" 8
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "frdiv2,frsqrt2")
+ (eq_attr "sb1_fp_pipes" "one")))
+ "sb1_fp1")
+
+;; ??? madd/msub 4-cycle latency to itself (same fr?), but 8 cycle latency
+;; otherwise.
+
+;; ??? Blocks issue of another non-madd/msub after 4 cycles.
+
+(define_insn_reservation "ir_sb1_fmadd_2pipes" 8
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fmadd")
+ (eq_attr "sb1_fp_pipes" "two")))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_fmadd_1pipe" 8
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fmadd")
+ (eq_attr "sb1_fp_pipes" "one")))
+ "sb1_fp1")
+
+(define_insn_reservation "ir_sb1_fcmp" 4
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (eq_attr "type" "fcmp"))
+ "sb1_fp1")
+
+;; mtc1 latency 5 cycles.
+
+(define_insn_reservation "ir_sb1_mtxfer" 5
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (eq_attr "type" "mtc"))
+ "sb1_fp0")
+
+;; mfc1 latency 1 cycle.
+
+(define_insn_reservation "ir_sb1_mfxfer" 1
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (eq_attr "type" "mfc"))
+ "sb1_fp0")
+
+;; ??? Can deliver at most 1 result per every 6 cycles because of issue
+;; restrictions.
+
+(define_insn_reservation "ir_sb1_divsf_2pipes" 24
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fdiv")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "two"))))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_divsf_1pipe" 24
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fdiv")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "one"))))
+ "sb1_fp1")
+
+;; ??? Can deliver at most 1 result per every 8 cycles because of issue
+;; restrictions.
+
+(define_insn_reservation "ir_sb1_divdf_2pipes" 32
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fdiv")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "sb1_fp_pipes" "two"))))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_divdf_1pipe" 32
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fdiv")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "sb1_fp_pipes" "one"))))
+ "sb1_fp1")
+
+;; ??? Can deliver at most 1 result per every 3 cycles because of issue
+;; restrictions.
+
+(define_insn_reservation "ir_sb1_recipsf_2pipes" 12
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "frdiv")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "two"))))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_recipsf_1pipe" 12
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "frdiv")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "one"))))
+ "sb1_fp1")
+
+;; ??? Can deliver at most 1 result per every 5 cycles because of issue
+;; restrictions.
+
+(define_insn_reservation "ir_sb1_recipdf_2pipes" 20
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "frdiv")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "sb1_fp_pipes" "two"))))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_recipdf_1pipe" 20
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "frdiv")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "sb1_fp_pipes" "one"))))
+ "sb1_fp1")
+
+;; ??? Can deliver at most 1 result per every 7 cycles because of issue
+;; restrictions.
+
+(define_insn_reservation "ir_sb1_sqrtsf_2pipes" 28
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fsqrt")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "two"))))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_sqrtsf_1pipe" 28
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fsqrt")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "one"))))
+ "sb1_fp1")
+
+;; ??? Can deliver at most 1 result per every 10 cycles because of issue
+;; restrictions.
+
+(define_insn_reservation "ir_sb1_sqrtdf_2pipes" 40
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fsqrt")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "sb1_fp_pipes" "two"))))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_sqrtdf_1pipe" 40
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "fsqrt")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "sb1_fp_pipes" "one"))))
+ "sb1_fp1")
+
+;; ??? Can deliver at most 1 result per every 4 cycles because of issue
+;; restrictions.
+
+(define_insn_reservation "ir_sb1_rsqrtsf_2pipes" 16
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "frsqrt")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "two"))))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_rsqrtsf_1pipe" 16
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "frsqrt")
+ (and (eq_attr "mode" "SF")
+ (eq_attr "sb1_fp_pipes" "one"))))
+ "sb1_fp1")
+
+;; ??? Can deliver at most 1 result per every 7 cycles because of issue
+;; restrictions.
+
+(define_insn_reservation "ir_sb1_rsqrtdf_2pipes" 28
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "frsqrt")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "sb1_fp_pipes" "two"))))
+ "sb1_fp1 | sb1_fp0")
+
+(define_insn_reservation "ir_sb1_rsqrtdf_1pipe" 28
+ (and (eq_attr "cpu" "sb1,sb1a")
+ (and (eq_attr "type" "frsqrt")
+ (and (eq_attr "mode" "DF")
+ (eq_attr "sb1_fp_pipes" "one"))))
+ "sb1_fp1")
diff --git a/gcc-4.9/gcc/config/mips/sde.h b/gcc-4.9/gcc/config/mips/sde.h
new file mode 100644
index 000000000..c138949c2
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/sde.h
@@ -0,0 +1,103 @@
+/* Definitions of target machine for GNU compiler.
+ MIPS SDE version.
+ Copyright (C) 2003-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS \
+ /* Make sure a -mips option is present. This helps us to pick \
+ the right multilib, and also makes the later specs easier \
+ to write. */ \
+ MIPS_ISA_LEVEL_SPEC, \
+ \
+ /* Infer the default float setting from -march. */ \
+ MIPS_ARCH_FLOAT_SPEC, \
+ \
+ /* If no ABI option is specified, infer one from the ISA level \
+ or -mgp setting. */ \
+ "%{!mabi=*: %{" MIPS_32BIT_OPTION_SPEC ": -mabi=32;: -mabi=n32}}", \
+ \
+ /* Remove a redundant -mfp64 for -mabi=n32; we want the !mfp64 \
+ multilibs. There's no need to check whether the architecture \
+ is 64-bit; cc1 will complain if it isn't. */ \
+ "%{mabi=n32: %<mfp64}", \
+ \
+ /* Make sure that an endian option is always present. This makes \
+ things like LINK_SPEC easier to write. */ \
+ "%{!EB:%{!EL:%(endian_spec)}}", \
+ \
+ /* Configuration-independent MIPS rules. */ \
+ BASE_DRIVER_SELF_SPECS
+
+/* Use trap rather than break for all but MIPS I ISA. Force -no-mips16,
+ so that MIPS16 assembler code requires an explicit ".set mips16".
+ Very little hand-written MIPS16 assembler exists, and some build
+ systems expect code to be assembled as non-MIPS16 even if the
+ prevailing compiler flags select -mips16. */
+#undef SUBTARGET_ASM_SPEC
+#define SUBTARGET_ASM_SPEC "\
+%{!mips1:--trap} \
+%{mips16:-no-mips16}"
+
+#undef LINK_SPEC
+#define LINK_SPEC "\
+%(endian_spec) \
+%{G*} %{mips1} %{mips2} %{mips3} %{mips4} %{mips32*} %{mips64*} \
+%{shared} \
+%{mabi=n32:-melf32%{EB:b}%{EL:l}tsmipn32} \
+%{mabi=64:-melf64%{EB:b}%{EL:l}tsmip} \
+%{mabi=32:-melf32%{EB:b}%{EL:l}tsmip}"
+
+#undef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 0
+
+/* Describe how we implement __builtin_eh_return. */
+
+/* At the moment, nothing appears to use more than 2 EH data registers.
+ The chosen registers must not clash with the return register ($2),
+ EH_RETURN_STACKADJ ($3), or MIPS_EPILOGUE_TEMP ($5), and they must
+ be general MIPS16 registers. Pick $6 and $7. */
+#undef EH_RETURN_DATA_REGNO
+#define EH_RETURN_DATA_REGNO(N) \
+ ((N) < 2 ? 7 - (N) : INVALID_REGNUM)
+
+/* Use $5 as a temporary for both MIPS16 and non-MIPS16. */
+#undef MIPS_EPILOGUE_TEMP_REGNUM
+#define MIPS_EPILOGUE_TEMP_REGNUM \
+ (cfun->machine->interrupt_handler_p ? K0_REG_NUM : GP_REG_FIRST + 5)
+
+/* Using long will always be right for size_t and ptrdiff_t, since
+ sizeof(long) must equal sizeof(void *), following from the setting
+ of the -mlong64 option. */
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+/* Force all .init and .fini entries to be 32-bit, not mips16, so that
+ in a mixed environment they are all the same mode. The crti.asm and
+ crtn.asm files will also be compiled as 32-bit due to the
+ -no-mips16 flag in SUBTARGET_ASM_SPEC above. */
+#undef CRT_CALL_STATIC_FUNCTION
+#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
+ asm (SECTION_OP "\n\
+ .set push\n\
+ .set nomips16\n\
+ jal " USER_LABEL_PREFIX #FUNC "\n\
+ .set pop\n\
+ " TEXT_SECTION_ASM_OP);
diff --git a/gcc-4.9/gcc/config/mips/sde.opt b/gcc-4.9/gcc/config/mips/sde.opt
new file mode 100644
index 000000000..4cf43b6cc
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/sde.opt
@@ -0,0 +1,28 @@
+; MIPS SDE options.
+;
+; Copyright (C) 2010-2014 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+; -mcode-xonly is a traditional alias for -mcode-readable=pcrel and
+; -mno-data-in-code is a traditional alias for -mcode-readable=no.
+
+mno-data-in-code
+Target RejectNegative Alias(mcode-readable=, no)
+
+mcode-xonly
+Target RejectNegative Alias(mcode-readable=, pcrel)
diff --git a/gcc-4.9/gcc/config/mips/sdemtk.h b/gcc-4.9/gcc/config/mips/sdemtk.h
new file mode 100644
index 000000000..15dbf1653
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/sdemtk.h
@@ -0,0 +1,102 @@
+/* Definitions of target machine for GNU compiler.
+ MIPS SDE version, for use with the SDE C library rather than newlib.
+ Copyright (C) 2007-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_assert ("system=sde"); \
+ builtin_assert ("system=posix"); \
+ builtin_define ("__SDE_MIPS__"); \
+ \
+ /* Deprecated: use __mips_isa_rev >= 2. */ \
+ if (ISA_MIPS32R2) \
+ builtin_define ("__mipsr2"); \
+ \
+ /* Deprecated: use __mips_fpr == 64. */ \
+ if (TARGET_FLOAT64) \
+ builtin_define ("__mipsfp64"); \
+ \
+ if (TARGET_NO_FLOAT) \
+ builtin_define ("__NO_FLOAT"); \
+ else if (TARGET_SOFT_FLOAT_ABI) \
+ builtin_define ("__SOFT_FLOAT"); \
+ else if (TARGET_SINGLE_FLOAT) \
+ builtin_define ("__SINGLE_FLOAT"); \
+ \
+ if (TARGET_BIG_ENDIAN) \
+ { \
+ builtin_assert ("endian=big"); \
+ builtin_assert ("cpu=mipseb"); \
+ } \
+ else \
+ { \
+ builtin_assert ("endian=little"); \
+ builtin_assert ("cpu=mipsel"); \
+ } \
+ } \
+ while (0)
+
+/* For __clear_cache in libgcc2.c. */
+#ifdef IN_LIBGCC2
+extern void mips_sync_icache (void *beg, unsigned long len);
+#undef CLEAR_INSN_CACHE
+#define CLEAR_INSN_CACHE(beg, end) \
+ mips_sync_icache (beg, end - beg)
+#endif
+
+/* For mips_cache_flush_func in mips.opt. */
+#undef CACHE_FLUSH_FUNC
+#define CACHE_FLUSH_FUNC "mips_sync_icache"
+
+/* For inline code which needs to sync the icache and dcache,
+ noting that the SDE library takes arguments (address, size). */
+#undef MIPS_ICACHE_SYNC
+#define MIPS_ICACHE_SYNC(ADDR, SIZE) \
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mips_cache_flush_func), \
+ LCT_NORMAL, VOIDmode, 2, ADDR, Pmode, \
+ SIZE, TYPE_MODE (sizetype))
+
+/* This version of _mcount does not pop 2 words from the stack. */
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ { \
+ mips_push_asm_switch (&mips_noat); \
+ /* _mcount treats $2 as the static chain register. */ \
+ if (cfun->static_chain_decl != NULL) \
+ fprintf (FILE, "\tmove\t%s,%s\n", reg_names[2], \
+ reg_names[STATIC_CHAIN_REGNUM]); \
+ /* MIPS16 code passes saved $ra in $v1 instead of $at. */ \
+ fprintf (FILE, "\tmove\t%s,%s\n", \
+ reg_names[GP_REG_FIRST + (TARGET_MIPS16 ? 3 : 1)], \
+ reg_names[RETURN_ADDR_REGNUM]); \
+ fprintf (FILE, "\tjal\t_mcount\n"); \
+ mips_pop_asm_switch (&mips_noat); \
+ /* _mcount treats $2 as the static chain register. */ \
+ if (cfun->static_chain_decl != NULL) \
+ fprintf (FILE, "\tmove\t%s,%s\n", reg_names[STATIC_CHAIN_REGNUM], \
+ reg_names[2]); \
+ }
+
+/* ...nor does the call sequence preserve $31. */
+#undef MIPS_SAVE_REG_FOR_PROFILING_P
+#define MIPS_SAVE_REG_FOR_PROFILING_P(REGNO) ((REGNO) == RETURN_ADDR_REGNUM)
+
+/* Compile in support for the -mno-float option. */
+#define TARGET_SUPPORTS_NO_FLOAT 1
diff --git a/gcc-4.9/gcc/config/mips/sr71k.md b/gcc-4.9/gcc/config/mips/sr71k.md
new file mode 100644
index 000000000..e66927655
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/sr71k.md
@@ -0,0 +1,337 @@
+;; Copyright (C) 2002-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+;;
+;; .........................
+;;
+;; DFA-based pipeline description for Sandcraft SR3 (MIPS64 based)
+;;
+;; The SR3 is described as:
+;; - nine-stage pipeline, insn buffering with out-of-order issue to
+;; multiple function units, with an average dispatch rate of 2
+;; insn.s per cycle (max 6 insns: 2 fpu, 4 cpu).
+;;
+;; The details on this are scant except for a diagram in
+;; Chap. 6 of Rev. 1.0 SR3 Spec.
+;;
+;; The model employed below is designed to closely approximate the
+;; published latencies. Emulation of out-of-order issue and the insn
+;; buffering is done via a VLIW dispatch style (with a packing of 6 insns);
+;; the function unit reservations restrictions (define_*_set) are
+;; contrived to support published timings.
+;;
+;; Reference:
+;; "SR3 Microprocessor Specification, System development information,"
+;; Revision 1.0, 13 December 2000.
+;;
+;;
+;; Reservation model is based on:
+;; 1) Figure 6-1, from the 1.0 specification.
+;; 2) Chapter 19, from the 1.0 specification.
+;; 3) following questions(Red Hat)/answers(Sandcraft):
+;; RH> From Section 19.1
+;; RH> 1) In terms of figure 6-1, are all the instructions in
+;; RH> table 19-1 restricted
+;; RH> to ALUx? When ALUx is not in use for an instruction in table;; RH> 19-1 is
+;; RH> it fully compatible with all insns that issue to ALUy?
+;;
+;; Yes, all the instructions in Table 19-1 only go to ALUX, and all the
+;; instructions that can be issued to ALUY can also be issued to ALUX.
+;;
+;;
+;; RH> From Section 19.2
+;; RH> 2) Explain conditional moves execution path (in terms of
+;; RH> figure 6-1)
+;;
+;; Conditional move of integer registers (based on floating point condition
+;; codes or integer register value) go to ALUX or ALUY.
+;;
+;; RH> 3) Explain floating point store execution path (in terms of
+;; RH> figure 6-1)
+;;
+;; Floating point stores go to Ld/St and go to MOV in the floating point
+;; pipeline.
+;;
+;; Floating point loads go to Ld/St and go to LOAD in the floating point
+;; pipeline.
+;;
+;; RH> 4) Explain branch on floating condition (in terms of figure 6-1);;
+;; Branch on floating condition go to BRU.
+;;
+;; RH> 5) Is the column for single RECIP instruction latency correct?
+;; RH> What about for RSQRT single and double?
+;;
+;; The latency/repeat for RECIP and RSQRT are correct.
+;;
+
+;;
+;; Use four automata to isolate long latency operations, and to
+;; reduce the complexity of cpu+fpu, reducing space.
+;;
+(define_automaton "sr71_cpu, sr71_cpu1, sr71_cp1, sr71_cp2, sr71_fextra, sr71_imacc")
+
+;; feeders for CPU function units and feeders for fpu (CP1 interface)
+(define_cpu_unit "sr_iss0,sr_iss1,sr_iss2,sr_iss3,sr_iss4,sr_iss5" "sr71_cpu")
+
+;; CPU function units
+(define_cpu_unit "ipu_bru" "sr71_cpu1")
+(define_cpu_unit "ipu_alux" "sr71_cpu1")
+(define_cpu_unit "ipu_aluy" "sr71_cpu1")
+(define_cpu_unit "ipu_ldst" "sr71_cpu1")
+(define_cpu_unit "ipu_macc_iter" "sr71_imacc")
+
+
+;; Floating-point unit (Co-processor interface 1).
+(define_cpu_unit "fpu_mov" "sr71_cp1")
+(define_cpu_unit "fpu_load" "sr71_cp1")
+(define_cpu_unit "fpu_fpu" "sr71_cp2")
+
+;; fictitous unit to track long float insns with separate automaton
+(define_cpu_unit "fpu_iter" "sr71_fextra")
+
+
+;;
+;; Define common execution path (reservation) combinations
+;;
+
+;;
+(define_reservation "cpu_iss" "sr_iss0|sr_iss1|sr_iss2|sr_iss3")
+
+;; two cycles are used for instruction using the fpu as it runs
+;; at half the clock speed of the cpu. By adding an extra cycle
+;; to the issue units, the default/minimum "repeat" dispatch delay is
+;; accounted for all insn.s
+(define_reservation "cp1_iss" "(sr_iss4*2)|(sr_iss5*2)")
+
+(define_reservation "serial_dispatch" "sr_iss0+sr_iss1+sr_iss2+sr_iss3+sr_iss4+sr_iss5")
+
+;; Simulate a 6 insn VLIW dispatch, 1 cycle in dispatch followed by
+;; reservation of function unit.
+(define_reservation "ri_insns" "cpu_iss,(ipu_alux|ipu_aluy)")
+(define_reservation "ri_mem" "cpu_iss,ipu_ldst")
+(define_reservation "ri_alux" "cpu_iss,ipu_alux")
+(define_reservation "ri_branch" "cpu_iss,ipu_bru")
+
+(define_reservation "rf_insn" "cp1_iss,fpu_fpu")
+(define_reservation "rf_ldmem" "cp1_iss,fpu_load")
+
+; simultaneous reservation of pseudo-unit keeps cp1 fpu tied
+; up until long cycle insn is finished...
+(define_reservation "rf_multi1" "rf_insn+fpu_iter")
+
+;;
+;; The ordering of the instruction-execution-path/resource-usage
+;; descriptions (also known as reservation RTL) is roughly ordered
+;; based on the define attribute RTL for the "type" classification.
+;; When modifying, remember that the first test that matches is the
+;; reservation used!
+;;
+
+
+(define_insn_reservation "ir_sr70_unknown" 1
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "unknown,atomic,syncloop"))
+ "serial_dispatch")
+
+
+;; Assume prediction fails.
+(define_insn_reservation "ir_sr70_branch" 6
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "branch,jump,call"))
+ "ri_branch")
+
+(define_insn_reservation "ir_sr70_load" 2
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "load"))
+ "ri_mem")
+
+(define_insn_reservation "ir_sr70_store" 1
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "store"))
+ "ri_mem")
+
+
+;;
+;; float loads/stores flow through both cpu and cp1...
+;;
+(define_insn_reservation "ir_sr70_fload" 9
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "fpload,fpidxload"))
+ "(cpu_iss+cp1_iss),(ri_mem+rf_ldmem)")
+
+(define_insn_reservation "ir_sr70_fstore" 1
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "fpstore,fpidxstore"))
+ "(cpu_iss+cp1_iss),(fpu_mov+ri_mem)")
+
+
+;; This reservation is for conditional move based on integer
+;; or floating point CC.
+(define_insn_reservation "ir_sr70_condmove" 4
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "condmove"))
+ "ri_insns")
+
+;; Try to discriminate move-from-cp1 versus move-to-cp1 as latencies
+;; are different. Like float load/store, these insns use multiple
+;; resources simultaneously
+(define_insn_reservation "ir_sr70_xfer_from" 6
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "mfc"))
+ "(cpu_iss+cp1_iss),(fpu_mov+ri_mem)")
+
+(define_insn_reservation "ir_sr70_xfer_to" 9
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "mtc"))
+ "(cpu_iss+cp1_iss),(ri_mem+rf_ldmem)")
+
+(define_insn_reservation "ir_sr70_hilo" 1
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "mthi,mtlo,mfhi,mflo"))
+ "ri_insns")
+
+(define_insn_reservation "ir_sr70_arith" 1
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "arith,shift,signext,slt,clz,const,logical,move,trap"))
+ "ri_insns")
+
+;; emulate repeat (dispatch stall) by spending extra cycle(s) in
+;; in iter unit
+(define_insn_reservation "ir_sr70_imul_si" 4
+ (and (eq_attr "cpu" "sr71000")
+ (and (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "SI")))
+ "ri_alux,ipu_alux,ipu_macc_iter")
+
+(define_insn_reservation "ir_sr70_imul_di" 6
+ (and (eq_attr "cpu" "sr71000")
+ (and (eq_attr "type" "imul,imul3,imadd")
+ (eq_attr "mode" "DI")))
+ "ri_alux,ipu_alux,(ipu_macc_iter*3)")
+
+;; Divide algorithm is early out with best latency of 7 pcycles.
+;; Use worst case for scheduling purposes.
+(define_insn_reservation "ir_sr70_idiv_si" 41
+ (and (eq_attr "cpu" "sr71000")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "SI")))
+ "ri_alux,ipu_alux,(ipu_macc_iter*38)")
+
+(define_insn_reservation "ir_sr70_idiv_di" 73
+ (and (eq_attr "cpu" "sr71000")
+ (and (eq_attr "type" "idiv")
+ (eq_attr "mode" "DI")))
+ "ri_alux,ipu_alux,(ipu_macc_iter*70)")
+
+;; extra reservations of fpu_fpu are for repeat latency
+(define_insn_reservation "ir_sr70_fadd_sf" 8
+ (and (eq_attr "cpu" "sr71000")
+ (and (eq_attr "type" "fadd")
+ (eq_attr "mode" "SF")))
+ "rf_insn,fpu_fpu")
+
+(define_insn_reservation "ir_sr70_fadd_df" 10
+ (and (eq_attr "cpu" "sr71000")
+ (and (eq_attr "type" "fadd")
+ (eq_attr "mode" "DF")))
+ "rf_insn,fpu_fpu")
+
+;; Latencies for MADD,MSUB, NMADD, NMSUB assume the Multiply is fused
+;; with the sub or add.
+(define_insn_reservation "ir_sr70_fmul_sf" 8
+ (and (eq_attr "cpu" "sr71000")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "SF")))
+ "rf_insn,fpu_fpu")
+
+;; tie up the fpu unit to emulate the balance for the "repeat
+;; rate" of 8 (2 are spent in the iss unit)
+(define_insn_reservation "ir_sr70_fmul_df" 16
+ (and (eq_attr "cpu" "sr71000")
+ (and (eq_attr "type" "fmul,fmadd")
+ (eq_attr "mode" "DF")))
+ "rf_insn,fpu_fpu*6")
+
+
+;; RECIP insn uses same type attr as div, and for SR3, has same
+;; timings for double. However, single RECIP has a latency of
+;; 28 -- only way to fix this is to introduce new insn attrs.
+;; cycles spent in iter unit are designed to satisfy balance
+;; of "repeat" latency after insn uses up rf_multi1 reservation
+(define_insn_reservation "ir_sr70_fdiv_sf" 60
+ (and (eq_attr "cpu" "sr71000")
+ (and (eq_attr "type" "fdiv,frdiv")
+ (eq_attr "mode" "SF")))
+ "rf_multi1+(fpu_iter*51)")
+
+(define_insn_reservation "ir_sr70_fdiv_df" 120
+ (and (eq_attr "cpu" "sr71000")
+ (and (eq_attr "type" "fdiv,frdiv")
+ (eq_attr "mode" "DF")))
+ "rf_multi1+(fpu_iter*109)")
+
+(define_insn_reservation "ir_sr70_fabs" 4
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "fabs,fneg,fmove"))
+ "rf_insn,fpu_fpu")
+
+(define_insn_reservation "ir_sr70_fcmp" 10
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "fcmp"))
+ "rf_insn,fpu_fpu")
+
+;; "fcvt" type attribute covers a number of diff insns, most have the same
+;; latency descriptions, a few vary. We use the
+;; most common timing (which is also worst case).
+(define_insn_reservation "ir_sr70_fcvt" 12
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "fcvt"))
+ "rf_insn,fpu_fpu*4")
+
+(define_insn_reservation "ir_sr70_fsqrt_sf" 62
+ (and (eq_attr "cpu" "sr71000")
+ (and (eq_attr "type" "fsqrt")
+ (eq_attr "mode" "SF")))
+ "rf_multi1+(fpu_iter*53)")
+
+(define_insn_reservation "ir_sr70_fsqrt_df" 122
+ (and (eq_attr "cpu" "sr71000")
+ (and (eq_attr "type" "fsqrt")
+ (eq_attr "mode" "DF")))
+ "rf_multi1+(fpu_iter*111)")
+
+(define_insn_reservation "ir_sr70_frsqrt_sf" 48
+ (and (eq_attr "cpu" "sr71000")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "SF")))
+ "rf_multi1+(fpu_iter*39)")
+
+(define_insn_reservation "ir_sr70_frsqrt_df" 240
+ (and (eq_attr "cpu" "sr71000")
+ (and (eq_attr "type" "frsqrt")
+ (eq_attr "mode" "DF")))
+ "rf_multi1+(fpu_iter*229)")
+
+(define_insn_reservation "ir_sr70_multi" 1
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "multi"))
+ "serial_dispatch")
+
+(define_insn_reservation "ir_sr70_nop" 1
+ (and (eq_attr "cpu" "sr71000")
+ (eq_attr "type" "nop"))
+ "ri_insns")
diff --git a/gcc-4.9/gcc/config/mips/st.h b/gcc-4.9/gcc/config/mips/st.h
new file mode 100644
index 000000000..6e99074d0
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/st.h
@@ -0,0 +1,30 @@
+/* ST 2e / 2f GNU/Linux Configuration.
+ Copyright (C) 2008-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* The various C libraries each have their own subdirectory. */
+#undef SYSROOT_SUFFIX_SPEC
+#define SYSROOT_SUFFIX_SPEC \
+ "%{march=loongson2e:/2e ; \
+ march=loongson2f:/2f}"
+
+#undef STARTFILE_PREFIX_SPEC
+#define STARTFILE_PREFIX_SPEC \
+ "%{mabi=32: /usr/local/lib/ /lib/ /usr/lib/} \
+ %{mabi=n32: /usr/local/lib32/ /lib32/ /usr/lib32/} \
+ %{mabi=64: /usr/local/lib64/ /lib64/ /usr/lib64/}"
diff --git a/gcc-4.9/gcc/config/mips/sync.md b/gcc-4.9/gcc/config/mips/sync.md
new file mode 100644
index 000000000..cf6c05be2
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/sync.md
@@ -0,0 +1,716 @@
+;; Machine Description for MIPS based processor synchronization
+;; instructions.
+;; Copyright (C) 2007-2014 Free Software Foundation, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_c_enum "unspec" [
+ UNSPEC_COMPARE_AND_SWAP
+ UNSPEC_COMPARE_AND_SWAP_12
+ UNSPEC_SYNC_OLD_OP
+ UNSPEC_SYNC_NEW_OP
+ UNSPEC_SYNC_NEW_OP_12
+ UNSPEC_SYNC_OLD_OP_12
+ UNSPEC_SYNC_EXCHANGE
+ UNSPEC_SYNC_EXCHANGE_12
+ UNSPEC_MEMORY_BARRIER
+ UNSPEC_ATOMIC_COMPARE_AND_SWAP
+ UNSPEC_ATOMIC_EXCHANGE
+ UNSPEC_ATOMIC_FETCH_OP
+])
+
+;; Atomic fetch bitwise operations.
+(define_code_iterator fetchop_bit [ior xor and])
+
+;; Atomic HI and QI operations
+(define_code_iterator atomic_hiqi_op [plus minus ior xor and])
+
+;; Atomic memory operations.
+
+(define_expand "memory_barrier"
+ [(set (match_dup 0)
+ (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))]
+ "GENERATE_SYNC"
+{
+ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[0]) = 1;
+})
+
+(define_insn "*memory_barrier"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))]
+ "GENERATE_SYNC"
+ { return mips_output_sync (); })
+
+;; Can be removed in favor of atomic_compare_and_swap below.
+(define_insn "sync_compare_and_swap<mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=&d,&d")
+ (match_operand:GPR 1 "memory_operand" "+ZR,ZR"))
+ (set (match_dup 1)
+ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "dJ,dJ")
+ (match_operand:GPR 3 "arith_operand" "I,d")]
+ UNSPEC_COMPARE_AND_SWAP))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "li,move")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_required_oldval" "2")
+ (set_attr "sync_insn1_op2" "3")])
+
+(define_expand "sync_compare_and_swap<mode>"
+ [(match_operand:SHORT 0 "register_operand")
+ (match_operand:SHORT 1 "memory_operand")
+ (match_operand:SHORT 2 "general_operand")
+ (match_operand:SHORT 3 "general_operand")]
+ "GENERATE_LL_SC"
+{
+ union mips_gen_fn_ptrs generator;
+ generator.fn_6 = gen_compare_and_swap_12;
+ mips_expand_atomic_qihi (generator,
+ operands[0], operands[1], operands[2], operands[3]);
+ DONE;
+})
+
+;; Helper insn for mips_expand_atomic_qihi.
+(define_insn "compare_and_swap_12"
+ [(set (match_operand:SI 0 "register_operand" "=&d,&d")
+ (match_operand:SI 1 "memory_operand" "+ZR,ZR"))
+ (set (match_dup 1)
+ (unspec_volatile:SI [(match_operand:SI 2 "register_operand" "d,d")
+ (match_operand:SI 3 "register_operand" "d,d")
+ (match_operand:SI 4 "reg_or_0_operand" "dJ,dJ")
+ (match_operand:SI 5 "reg_or_0_operand" "d,J")]
+ UNSPEC_COMPARE_AND_SWAP_12))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_oldval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_inclusive_mask" "2")
+ (set_attr "sync_exclusive_mask" "3")
+ (set_attr "sync_required_oldval" "4")
+ (set_attr "sync_insn1_op2" "5")])
+
+(define_insn "sync_add<mode>"
+ [(set (match_operand:GPR 0 "memory_operand" "+ZR,ZR")
+ (unspec_volatile:GPR
+ [(plus:GPR (match_dup 0)
+ (match_operand:GPR 1 "arith_operand" "I,d"))]
+ UNSPEC_SYNC_OLD_OP))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "addiu,addu")
+ (set_attr "sync_mem" "0")
+ (set_attr "sync_insn1_op2" "1")])
+
+(define_expand "sync_<optab><mode>"
+ [(set (match_operand:SHORT 0 "memory_operand")
+ (unspec_volatile:SHORT
+ [(atomic_hiqi_op:SHORT (match_dup 0)
+ (match_operand:SHORT 1 "general_operand"))]
+ UNSPEC_SYNC_OLD_OP))]
+ "GENERATE_LL_SC"
+{
+ union mips_gen_fn_ptrs generator;
+ generator.fn_4 = gen_sync_<optab>_12;
+ mips_expand_atomic_qihi (generator,
+ NULL, operands[0], operands[1], NULL);
+ DONE;
+})
+
+;; Helper insn for sync_<optab><mode>
+(define_insn "sync_<optab>_12"
+ [(set (match_operand:SI 0 "memory_operand" "+ZR")
+ (unspec_volatile:SI
+ [(match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d")
+ (atomic_hiqi_op:SI (match_dup 0)
+ (match_operand:SI 3 "reg_or_0_operand" "dJ"))]
+ UNSPEC_SYNC_OLD_OP_12))
+ (clobber (match_scratch:SI 4 "=&d"))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "<insn>")
+ (set_attr "sync_insn2" "and")
+ (set_attr "sync_mem" "0")
+ (set_attr "sync_inclusive_mask" "1")
+ (set_attr "sync_exclusive_mask" "2")
+ (set_attr "sync_insn1_op2" "3")
+ (set_attr "sync_oldval" "4")
+ (set_attr "sync_newval" "4")])
+
+(define_expand "sync_old_<optab><mode>"
+ [(parallel [
+ (set (match_operand:SHORT 0 "register_operand")
+ (match_operand:SHORT 1 "memory_operand"))
+ (set (match_dup 1)
+ (unspec_volatile:SHORT [(atomic_hiqi_op:SHORT
+ (match_dup 1)
+ (match_operand:SHORT 2 "general_operand"))]
+ UNSPEC_SYNC_OLD_OP))])]
+ "GENERATE_LL_SC"
+{
+ union mips_gen_fn_ptrs generator;
+ generator.fn_5 = gen_sync_old_<optab>_12;
+ mips_expand_atomic_qihi (generator,
+ operands[0], operands[1], operands[2], NULL);
+ DONE;
+})
+
+;; Helper insn for sync_old_<optab><mode>
+(define_insn "sync_old_<optab>_12"
+ [(set (match_operand:SI 0 "register_operand" "=&d")
+ (match_operand:SI 1 "memory_operand" "+ZR"))
+ (set (match_dup 1)
+ (unspec_volatile:SI
+ [(match_operand:SI 2 "register_operand" "d")
+ (match_operand:SI 3 "register_operand" "d")
+ (atomic_hiqi_op:SI (match_dup 0)
+ (match_operand:SI 4 "reg_or_0_operand" "dJ"))]
+ UNSPEC_SYNC_OLD_OP_12))
+ (clobber (match_scratch:SI 5 "=&d"))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "<insn>")
+ (set_attr "sync_insn2" "and")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_inclusive_mask" "2")
+ (set_attr "sync_exclusive_mask" "3")
+ (set_attr "sync_insn1_op2" "4")
+ (set_attr "sync_newval" "5")])
+
+(define_expand "sync_new_<optab><mode>"
+ [(parallel [
+ (set (match_operand:SHORT 0 "register_operand")
+ (unspec_volatile:SHORT [(atomic_hiqi_op:SHORT
+ (match_operand:SHORT 1 "memory_operand")
+ (match_operand:SHORT 2 "general_operand"))]
+ UNSPEC_SYNC_NEW_OP))
+ (set (match_dup 1)
+ (unspec_volatile:SHORT [(match_dup 1) (match_dup 2)]
+ UNSPEC_SYNC_NEW_OP))])]
+ "GENERATE_LL_SC"
+{
+ union mips_gen_fn_ptrs generator;
+ generator.fn_5 = gen_sync_new_<optab>_12;
+ mips_expand_atomic_qihi (generator,
+ operands[0], operands[1], operands[2], NULL);
+ DONE;
+})
+
+;; Helper insn for sync_new_<optab><mode>
+(define_insn "sync_new_<optab>_12"
+ [(set (match_operand:SI 0 "register_operand" "=&d")
+ (unspec_volatile:SI
+ [(match_operand:SI 1 "memory_operand" "+ZR")
+ (match_operand:SI 2 "register_operand" "d")
+ (match_operand:SI 3 "register_operand" "d")
+ (atomic_hiqi_op:SI (match_dup 0)
+ (match_operand:SI 4 "reg_or_0_operand" "dJ"))]
+ UNSPEC_SYNC_NEW_OP_12))
+ (set (match_dup 1)
+ (unspec_volatile:SI
+ [(match_dup 1)
+ (match_dup 2)
+ (match_dup 3)
+ (match_dup 4)] UNSPEC_SYNC_NEW_OP_12))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "<insn>")
+ (set_attr "sync_insn2" "and")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_newval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_inclusive_mask" "2")
+ (set_attr "sync_exclusive_mask" "3")
+ (set_attr "sync_insn1_op2" "4")])
+
+(define_expand "sync_nand<mode>"
+ [(set (match_operand:SHORT 0 "memory_operand")
+ (unspec_volatile:SHORT
+ [(match_dup 0)
+ (match_operand:SHORT 1 "general_operand")]
+ UNSPEC_SYNC_OLD_OP))]
+ "GENERATE_LL_SC"
+{
+ union mips_gen_fn_ptrs generator;
+ generator.fn_4 = gen_sync_nand_12;
+ mips_expand_atomic_qihi (generator,
+ NULL, operands[0], operands[1], NULL);
+ DONE;
+})
+
+;; Helper insn for sync_nand<mode>
+(define_insn "sync_nand_12"
+ [(set (match_operand:SI 0 "memory_operand" "+ZR")
+ (unspec_volatile:SI
+ [(match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "register_operand" "d")
+ (match_dup 0)
+ (match_operand:SI 3 "reg_or_0_operand" "dJ")]
+ UNSPEC_SYNC_OLD_OP_12))
+ (clobber (match_scratch:SI 4 "=&d"))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "and")
+ (set_attr "sync_insn2" "xor")
+ (set_attr "sync_mem" "0")
+ (set_attr "sync_inclusive_mask" "1")
+ (set_attr "sync_exclusive_mask" "2")
+ (set_attr "sync_insn1_op2" "3")
+ (set_attr "sync_oldval" "4")
+ (set_attr "sync_newval" "4")])
+
+(define_expand "sync_old_nand<mode>"
+ [(parallel [
+ (set (match_operand:SHORT 0 "register_operand")
+ (match_operand:SHORT 1 "memory_operand"))
+ (set (match_dup 1)
+ (unspec_volatile:SHORT [(match_dup 1)
+ (match_operand:SHORT 2 "general_operand")]
+ UNSPEC_SYNC_OLD_OP))])]
+ "GENERATE_LL_SC"
+{
+ union mips_gen_fn_ptrs generator;
+ generator.fn_5 = gen_sync_old_nand_12;
+ mips_expand_atomic_qihi (generator,
+ operands[0], operands[1], operands[2], NULL);
+ DONE;
+})
+
+;; Helper insn for sync_old_nand<mode>
+(define_insn "sync_old_nand_12"
+ [(set (match_operand:SI 0 "register_operand" "=&d")
+ (match_operand:SI 1 "memory_operand" "+ZR"))
+ (set (match_dup 1)
+ (unspec_volatile:SI
+ [(match_operand:SI 2 "register_operand" "d")
+ (match_operand:SI 3 "register_operand" "d")
+ (match_operand:SI 4 "reg_or_0_operand" "dJ")]
+ UNSPEC_SYNC_OLD_OP_12))
+ (clobber (match_scratch:SI 5 "=&d"))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "and")
+ (set_attr "sync_insn2" "xor")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_inclusive_mask" "2")
+ (set_attr "sync_exclusive_mask" "3")
+ (set_attr "sync_insn1_op2" "4")
+ (set_attr "sync_newval" "5")])
+
+(define_expand "sync_new_nand<mode>"
+ [(parallel [
+ (set (match_operand:SHORT 0 "register_operand")
+ (unspec_volatile:SHORT [(match_operand:SHORT 1 "memory_operand")
+ (match_operand:SHORT 2 "general_operand")]
+ UNSPEC_SYNC_NEW_OP))
+ (set (match_dup 1)
+ (unspec_volatile:SHORT [(match_dup 1) (match_dup 2)]
+ UNSPEC_SYNC_NEW_OP))])]
+ "GENERATE_LL_SC"
+{
+ union mips_gen_fn_ptrs generator;
+ generator.fn_5 = gen_sync_new_nand_12;
+ mips_expand_atomic_qihi (generator,
+ operands[0], operands[1], operands[2], NULL);
+ DONE;
+})
+
+;; Helper insn for sync_new_nand<mode>
+(define_insn "sync_new_nand_12"
+ [(set (match_operand:SI 0 "register_operand" "=&d")
+ (unspec_volatile:SI
+ [(match_operand:SI 1 "memory_operand" "+ZR")
+ (match_operand:SI 2 "register_operand" "d")
+ (match_operand:SI 3 "register_operand" "d")
+ (match_operand:SI 4 "reg_or_0_operand" "dJ")]
+ UNSPEC_SYNC_NEW_OP_12))
+ (set (match_dup 1)
+ (unspec_volatile:SI
+ [(match_dup 1)
+ (match_dup 2)
+ (match_dup 3)
+ (match_dup 4)] UNSPEC_SYNC_NEW_OP_12))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "and")
+ (set_attr "sync_insn2" "xor")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_newval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_inclusive_mask" "2")
+ (set_attr "sync_exclusive_mask" "3")
+ (set_attr "sync_insn1_op2" "4")])
+
+(define_insn "sync_sub<mode>"
+ [(set (match_operand:GPR 0 "memory_operand" "+ZR")
+ (unspec_volatile:GPR
+ [(minus:GPR (match_dup 0)
+ (match_operand:GPR 1 "register_operand" "d"))]
+ UNSPEC_SYNC_OLD_OP))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "subu")
+ (set_attr "sync_mem" "0")
+ (set_attr "sync_insn1_op2" "1")])
+
+;; Can be removed in favor of atomic_fetch_add below.
+(define_insn "sync_old_add<mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=&d,&d")
+ (match_operand:GPR 1 "memory_operand" "+ZR,ZR"))
+ (set (match_dup 1)
+ (unspec_volatile:GPR
+ [(plus:GPR (match_dup 1)
+ (match_operand:GPR 2 "arith_operand" "I,d"))]
+ UNSPEC_SYNC_OLD_OP))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "addiu,addu")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_insn1_op2" "2")])
+
+(define_insn "sync_old_sub<mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=&d")
+ (match_operand:GPR 1 "memory_operand" "+ZR"))
+ (set (match_dup 1)
+ (unspec_volatile:GPR
+ [(minus:GPR (match_dup 1)
+ (match_operand:GPR 2 "register_operand" "d"))]
+ UNSPEC_SYNC_OLD_OP))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "subu")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_insn1_op2" "2")])
+
+(define_insn "sync_new_add<mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=&d,&d")
+ (plus:GPR (match_operand:GPR 1 "memory_operand" "+ZR,ZR")
+ (match_operand:GPR 2 "arith_operand" "I,d")))
+ (set (match_dup 1)
+ (unspec_volatile:GPR
+ [(plus:GPR (match_dup 1) (match_dup 2))]
+ UNSPEC_SYNC_NEW_OP))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "addiu,addu")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_newval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_insn1_op2" "2")])
+
+(define_insn "sync_new_sub<mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=&d")
+ (minus:GPR (match_operand:GPR 1 "memory_operand" "+ZR")
+ (match_operand:GPR 2 "register_operand" "d")))
+ (set (match_dup 1)
+ (unspec_volatile:GPR
+ [(minus:GPR (match_dup 1) (match_dup 2))]
+ UNSPEC_SYNC_NEW_OP))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "subu")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_newval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_insn1_op2" "2")])
+
+(define_insn "sync_<optab><mode>"
+ [(set (match_operand:GPR 0 "memory_operand" "+ZR,ZR")
+ (unspec_volatile:GPR
+ [(fetchop_bit:GPR (match_operand:GPR 1 "uns_arith_operand" "K,d")
+ (match_dup 0))]
+ UNSPEC_SYNC_OLD_OP))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "<immediate_insn>,<insn>")
+ (set_attr "sync_mem" "0")
+ (set_attr "sync_insn1_op2" "1")])
+
+(define_insn "sync_old_<optab><mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=&d,&d")
+ (match_operand:GPR 1 "memory_operand" "+ZR,ZR"))
+ (set (match_dup 1)
+ (unspec_volatile:GPR
+ [(fetchop_bit:GPR (match_operand:GPR 2 "uns_arith_operand" "K,d")
+ (match_dup 1))]
+ UNSPEC_SYNC_OLD_OP))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "<immediate_insn>,<insn>")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_insn1_op2" "2")])
+
+(define_insn "sync_new_<optab><mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=&d,&d")
+ (match_operand:GPR 1 "memory_operand" "+ZR,ZR"))
+ (set (match_dup 1)
+ (unspec_volatile:GPR
+ [(fetchop_bit:GPR (match_operand:GPR 2 "uns_arith_operand" "K,d")
+ (match_dup 1))]
+ UNSPEC_SYNC_NEW_OP))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "<immediate_insn>,<insn>")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_newval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_insn1_op2" "2")])
+
+(define_insn "sync_nand<mode>"
+ [(set (match_operand:GPR 0 "memory_operand" "+ZR,ZR")
+ (unspec_volatile:GPR [(match_operand:GPR 1 "uns_arith_operand" "K,d")]
+ UNSPEC_SYNC_OLD_OP))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "andi,and")
+ (set_attr "sync_insn2" "not")
+ (set_attr "sync_mem" "0")
+ (set_attr "sync_insn1_op2" "1")])
+
+(define_insn "sync_old_nand<mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=&d,&d")
+ (match_operand:GPR 1 "memory_operand" "+ZR,ZR"))
+ (set (match_dup 1)
+ (unspec_volatile:GPR [(match_operand:GPR 2 "uns_arith_operand" "K,d")]
+ UNSPEC_SYNC_OLD_OP))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "andi,and")
+ (set_attr "sync_insn2" "not")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_insn1_op2" "2")])
+
+(define_insn "sync_new_nand<mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=&d,&d")
+ (match_operand:GPR 1 "memory_operand" "+ZR,ZR"))
+ (set (match_dup 1)
+ (unspec_volatile:GPR [(match_operand:GPR 2 "uns_arith_operand" "K,d")]
+ UNSPEC_SYNC_NEW_OP))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "andi,and")
+ (set_attr "sync_insn2" "not")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_newval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_insn1_op2" "2")])
+
+(define_insn "sync_lock_test_and_set<mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=&d,&d")
+ (match_operand:GPR 1 "memory_operand" "+ZR,ZR"))
+ (set (match_dup 1)
+ (unspec_volatile:GPR [(match_operand:GPR 2 "arith_operand" "I,d")]
+ UNSPEC_SYNC_EXCHANGE))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_memmodel" "11")
+ (set_attr "sync_insn1" "li,move")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_insn1_op2" "2")])
+
+(define_expand "sync_lock_test_and_set<mode>"
+ [(match_operand:SHORT 0 "register_operand")
+ (match_operand:SHORT 1 "memory_operand")
+ (match_operand:SHORT 2 "general_operand")]
+ "GENERATE_LL_SC"
+{
+ union mips_gen_fn_ptrs generator;
+ generator.fn_5 = gen_test_and_set_12;
+ mips_expand_atomic_qihi (generator,
+ operands[0], operands[1], operands[2], NULL);
+ DONE;
+})
+
+(define_insn "test_and_set_12"
+ [(set (match_operand:SI 0 "register_operand" "=&d")
+ (match_operand:SI 1 "memory_operand" "+ZR"))
+ (set (match_dup 1)
+ (unspec_volatile:SI [(match_operand:SI 2 "register_operand" "d")
+ (match_operand:SI 3 "register_operand" "d")
+ (match_operand:SI 4 "reg_or_0_operand" "dJ")]
+ UNSPEC_SYNC_EXCHANGE_12))]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_memmodel" "11")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_mem" "1")
+ ;; Unused, but needed to give the number of operands expected by
+ ;; the expander.
+ (set_attr "sync_inclusive_mask" "2")
+ (set_attr "sync_exclusive_mask" "3")
+ (set_attr "sync_insn1_op2" "4")])
+
+(define_insn "atomic_compare_and_swap<mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=&d,&d")
+ ;; Logically this unspec is an "eq" operator, but we need to obscure
+ ;; reads and writes from/to memory with an unspec to prevent
+ ;; optimizations on shared memory locations. Otherwise, comparison in
+ ;; { mem = 2; if (atomic_cmp_swap(mem,...) == 2) ...; }
+ ;; would be optimized away. In addition to that we need to use
+ ;; unspec_volatile, not just plain unspec -- for the sake of other
+ ;; threads -- to make sure we don't remove the entirety of the pattern
+ ;; just because current thread doesn't observe any effect from it.
+ ;; TODO: the obscuring unspec can be relaxed for permissive memory
+ ;; models.
+ ;; Same applies to other atomic_* patterns.
+ (unspec_volatile:GPR [(match_operand:GPR 2 "memory_operand" "+ZR,ZR")
+ (match_operand:GPR 3 "reg_or_0_operand" "dJ,dJ")]
+ UNSPEC_ATOMIC_COMPARE_AND_SWAP))
+ (set (match_operand:GPR 1 "register_operand" "=&d,&d")
+ (unspec_volatile:GPR [(match_dup 2)]
+ UNSPEC_ATOMIC_COMPARE_AND_SWAP))
+ (set (match_dup 2)
+ (unspec_volatile:GPR [(match_dup 2)
+ (match_dup 3)
+ (match_operand:GPR 4 "arith_operand" "I,d")]
+ UNSPEC_ATOMIC_COMPARE_AND_SWAP))
+ (unspec_volatile:GPR [(match_operand:SI 5 "const_int_operand")
+ (match_operand:SI 6 "const_int_operand")
+ (match_operand:SI 7 "const_int_operand")]
+ UNSPEC_ATOMIC_COMPARE_AND_SWAP)]
+ "GENERATE_LL_SC"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "li,move")
+ (set_attr "sync_oldval" "1")
+ (set_attr "sync_cmp" "0")
+ (set_attr "sync_mem" "2")
+ (set_attr "sync_required_oldval" "3")
+ (set_attr "sync_insn1_op2" "4")
+ (set_attr "sync_memmodel" "6")])
+
+(define_expand "atomic_exchange<mode>"
+ [(match_operand:GPR 0 "register_operand")
+ (match_operand:GPR 1 "memory_operand")
+ (match_operand:GPR 2 "arith_operand")
+ (match_operand:SI 3 "const_int_operand")]
+ "GENERATE_LL_SC || ISA_HAS_SWAP"
+{
+ if (ISA_HAS_SWAP)
+ {
+ if (!mem_noofs_operand (operands[1], <MODE>mode))
+ {
+ rtx addr;
+
+ addr = force_reg (Pmode, XEXP (operands[1], 0));
+ operands[1] = replace_equiv_address (operands[1], addr);
+ }
+ operands[2] = force_reg (<MODE>mode, operands[2]);
+ emit_insn (gen_atomic_exchange<mode>_swap (operands[0], operands[1],
+ operands[2]));
+ }
+ else
+ emit_insn (gen_atomic_exchange<mode>_llsc (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+})
+
+(define_insn "atomic_exchange<mode>_llsc"
+ [(set (match_operand:GPR 0 "register_operand" "=&d,&d")
+ (unspec_volatile:GPR [(match_operand:GPR 1 "memory_operand" "+ZR,ZR")]
+ UNSPEC_ATOMIC_EXCHANGE))
+ (set (match_dup 1)
+ (unspec_volatile:GPR [(match_operand:GPR 2 "arith_operand" "I,d")]
+ UNSPEC_ATOMIC_EXCHANGE))
+ (unspec_volatile:GPR [(match_operand:SI 3 "const_int_operand")]
+ UNSPEC_ATOMIC_EXCHANGE)]
+ "GENERATE_LL_SC && !ISA_HAS_SWAP"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "li,move")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_insn1_op2" "2")
+ (set_attr "sync_memmodel" "3")])
+
+;; XLP issues implicit sync for SWAP/LDADD, so no need for an explicit one.
+(define_insn "atomic_exchange<mode>_swap"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (unspec_volatile:GPR [(match_operand:GPR 1 "mem_noofs_operand" "+ZR")]
+ UNSPEC_ATOMIC_EXCHANGE))
+ (set (match_dup 1)
+ (unspec_volatile:GPR [(match_operand:GPR 2 "register_operand" "0")]
+ UNSPEC_ATOMIC_EXCHANGE))]
+ "ISA_HAS_SWAP"
+ "swap<size>\t%0,%b1"
+ [(set_attr "type" "atomic")])
+
+(define_expand "atomic_fetch_add<mode>"
+ [(match_operand:GPR 0 "register_operand")
+ (match_operand:GPR 1 "memory_operand")
+ (match_operand:GPR 2 "arith_operand")
+ (match_operand:SI 3 "const_int_operand")]
+ "GENERATE_LL_SC || ISA_HAS_LDADD"
+{
+ if (ISA_HAS_LDADD)
+ {
+ if (!mem_noofs_operand (operands[1], <MODE>mode))
+ {
+ rtx addr;
+
+ addr = force_reg (Pmode, XEXP (operands[1], 0));
+ operands[1] = replace_equiv_address (operands[1], addr);
+ }
+ operands[2] = force_reg (<MODE>mode, operands[2]);
+ emit_insn (gen_atomic_fetch_add<mode>_ldadd (operands[0], operands[1],
+ operands[2]));
+ }
+ else
+ emit_insn (gen_atomic_fetch_add<mode>_llsc (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+})
+
+(define_insn "atomic_fetch_add<mode>_llsc"
+ [(set (match_operand:GPR 0 "register_operand" "=&d,&d")
+ (unspec_volatile:GPR [(match_operand:GPR 1 "memory_operand" "+ZR,ZR")]
+ UNSPEC_ATOMIC_FETCH_OP))
+ (set (match_dup 1)
+ (unspec_volatile:GPR
+ [(plus:GPR (match_dup 1)
+ (match_operand:GPR 2 "arith_operand" "I,d"))]
+ UNSPEC_ATOMIC_FETCH_OP))
+ (unspec_volatile:GPR [(match_operand:SI 3 "const_int_operand")]
+ UNSPEC_ATOMIC_FETCH_OP)]
+ "GENERATE_LL_SC && !ISA_HAS_LDADD"
+ { return mips_output_sync_loop (insn, operands); }
+ [(set_attr "sync_insn1" "addiu,addu")
+ (set_attr "sync_oldval" "0")
+ (set_attr "sync_mem" "1")
+ (set_attr "sync_insn1_op2" "2")
+ (set_attr "sync_memmodel" "3")])
+
+;; XLP issues implicit sync for SWAP/LDADD, so no need for an explicit one.
+(define_insn "atomic_fetch_add<mode>_ldadd"
+ [(set (match_operand:GPR 0 "register_operand" "=d")
+ (unspec_volatile:GPR [(match_operand:GPR 1 "mem_noofs_operand" "+ZR")]
+ UNSPEC_ATOMIC_FETCH_OP))
+ (set (match_dup 1)
+ (unspec_volatile:GPR
+ [(plus:GPR (match_dup 1)
+ (match_operand:GPR 2 "register_operand" "0"))]
+ UNSPEC_ATOMIC_FETCH_OP))]
+ "ISA_HAS_LDADD"
+ "ldadd<size>\t%0,%b1"
+ [(set_attr "type" "atomic")])
diff --git a/gcc-4.9/gcc/config/mips/t-elf b/gcc-4.9/gcc/config/mips/t-elf
new file mode 100644
index 000000000..4e44a9f93
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-elf
@@ -0,0 +1,23 @@
+# Copyright (C) 1999-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Build the libraries for both hard and soft floating point
+
+MULTILIB_OPTIONS = msoft-float EL/EB
+MULTILIB_DIRNAMES = soft-float el eb
+MULTILIB_MATCHES = EL=mel EB=meb msingle-float=m4650
diff --git a/gcc-4.9/gcc/config/mips/t-irix6 b/gcc-4.9/gcc/config/mips/t-irix6
new file mode 100644
index 000000000..6c76a4ad9
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-irix6
@@ -0,0 +1,4 @@
+MULTILIB_OPTIONS=mabi=n32/mabi=64
+MULTILIB_DIRNAMES=n32 64
+MULTILIB_MATCHES=
+MULTILIB_OSDIRNAMES=../lib32 ../lib64
diff --git a/gcc-4.9/gcc/config/mips/t-isa3264 b/gcc-4.9/gcc/config/mips/t-isa3264
new file mode 100644
index 000000000..a5e001ef7
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-isa3264
@@ -0,0 +1,33 @@
+# Copyright (C) 2001-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Build the libraries for both hard and soft floating point
+
+ifneq ($(filter MIPS_ABI_DEFAULT=ABI_EABI,$(tm_defines)),)
+MULTILIB_OPTIONS = msoft-float EL/EB mips32/mips32r2/mips64/mips64r2
+MULTILIB_DIRNAMES = soft-float el eb mips32 mips32r2 mips64 mips64r2
+else
+MULTILIB_OPTIONS = msoft-float/mfp64 EL/EB mips32/mips32r2/mips64/mips64r2
+MULTILIB_DIRNAMES = soft-float fp64 el eb mips32 mips32r2 mips64 mips64r2
+ifneq ($(filter MIPS_ISA_DEFAULT=33,$(tm_defines)),)
+MULTILIB_EXCLUSIONS = mips32/mfp64 mips64/mfp64 mips64r2/mfp64
+else
+MULTILIB_EXCLUSIONS = !mips32r2/mfp64
+endif
+endif
+MULTILIB_MATCHES = EL=mel EB=meb
diff --git a/gcc-4.9/gcc/config/mips/t-linux64 b/gcc-4.9/gcc/config/mips/t-linux64
new file mode 100644
index 000000000..4b28ad03d
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-linux64
@@ -0,0 +1,26 @@
+# Copyright (C) 2003-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+MULTILIB_OPTIONS = mabi=n32/mabi=32/mabi=64
+MULTILIB_DIRNAMES = n32 32 64
+MIPS_EL = $(if $(filter %el, $(firstword $(subst -, ,$(target)))),el)
+MIPS_SOFT = $(if $(strip $(filter MASK_SOFT_FLOAT_ABI, $(target_cpu_default)) $(filter soft, $(with_float))),soft)
+MULTILIB_OSDIRNAMES = \
+ ../lib32$(call if_multiarch,:mips64$(MIPS_EL)-linux-gnuabin32$(MIPS_SOFT)) \
+ ../lib$(call if_multiarch,:mips$(MIPS_EL)-linux-gnu$(MIPS_SOFT)) \
+ ../lib64$(call if_multiarch,:mips64$(MIPS_EL)-linux-gnuabi64$(MIPS_SOFT))
diff --git a/gcc-4.9/gcc/config/mips/t-mips b/gcc-4.9/gcc/config/mips/t-mips
new file mode 100644
index 000000000..6e048f951
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-mips
@@ -0,0 +1,22 @@
+# Copyright (C) 2002-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+$(srcdir)/config/mips/mips-tables.opt: $(srcdir)/config/mips/genopt.sh \
+ $(srcdir)/config/mips/mips-cpus.def
+ $(SHELL) $(srcdir)/config/mips/genopt.sh $(srcdir)/config/mips > \
+ $(srcdir)/config/mips/mips-tables.opt
diff --git a/gcc-4.9/gcc/config/mips/t-mti-elf b/gcc-4.9/gcc/config/mips/t-mti-elf
new file mode 100644
index 000000000..1109ea716
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-mti-elf
@@ -0,0 +1,50 @@
+# Copyright (C) 2012-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# The default build is mips32r2, hard-float big-endian. Add mips32,
+# soft-float, and little-endian variations.
+
+MULTILIB_OPTIONS = mips32/mips64/mips64r2 mips16/mmicromips mabi=64 EL msoft-float/mfp64 mnan=2008
+MULTILIB_DIRNAMES = mips32 mips64 mips64r2 mips16 micromips 64 el sof fp64 nan2008
+MULTILIB_MATCHES = EL=mel EB=meb
+
+# The 64 bit ABI is not supported on the mips32 architecture.
+MULTILIB_EXCEPTIONS += *mips32*/*mabi=64*
+
+# The 64 bit ABI is not supported on the mips32r2 architecture.
+# Because mips32r2 is the default we can't use that flag to trigger
+# the exception so we check for mabi=64 with no specific mips
+# architecture flag instead.
+MULTILIB_EXCEPTIONS += mabi=64*
+
+# We do not want to build mips16 versions of mips64* architectures.
+MULTILIB_EXCEPTIONS += *mips64*/*mips16*
+MULTILIB_EXCEPTIONS += *mips16/mabi=64*
+
+# We only want micromips for mips32r2 architecture.
+MULTILIB_EXCEPTIONS += *mips32/mmicromips*
+MULTILIB_EXCEPTIONS += *mips64*/mmicromips*
+MULTILIB_EXCEPTIONS += *mmicromips/mabi=64*
+
+# We do not want nan2008 libraries for soft-float.
+MULTILIB_EXCEPTIONS += *msoft-float*/*mnan=2008*
+
+# -mfp64 libraries are only built for mips32r2 and not in mips16 mode.
+MULTILIB_EXCEPTIONS += *mips32/*mfp64*
+MULTILIB_EXCEPTIONS += *mips64*/*mfp64*
+MULTILIB_EXCEPTIONS += *mips16*/*mfp64*
diff --git a/gcc-4.9/gcc/config/mips/t-mti-linux b/gcc-4.9/gcc/config/mips/t-mti-linux
new file mode 100644
index 000000000..1109ea716
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-mti-linux
@@ -0,0 +1,50 @@
+# Copyright (C) 2012-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# The default build is mips32r2, hard-float big-endian. Add mips32,
+# soft-float, and little-endian variations.
+
+MULTILIB_OPTIONS = mips32/mips64/mips64r2 mips16/mmicromips mabi=64 EL msoft-float/mfp64 mnan=2008
+MULTILIB_DIRNAMES = mips32 mips64 mips64r2 mips16 micromips 64 el sof fp64 nan2008
+MULTILIB_MATCHES = EL=mel EB=meb
+
+# The 64 bit ABI is not supported on the mips32 architecture.
+MULTILIB_EXCEPTIONS += *mips32*/*mabi=64*
+
+# The 64 bit ABI is not supported on the mips32r2 architecture.
+# Because mips32r2 is the default we can't use that flag to trigger
+# the exception so we check for mabi=64 with no specific mips
+# architecture flag instead.
+MULTILIB_EXCEPTIONS += mabi=64*
+
+# We do not want to build mips16 versions of mips64* architectures.
+MULTILIB_EXCEPTIONS += *mips64*/*mips16*
+MULTILIB_EXCEPTIONS += *mips16/mabi=64*
+
+# We only want micromips for mips32r2 architecture.
+MULTILIB_EXCEPTIONS += *mips32/mmicromips*
+MULTILIB_EXCEPTIONS += *mips64*/mmicromips*
+MULTILIB_EXCEPTIONS += *mmicromips/mabi=64*
+
+# We do not want nan2008 libraries for soft-float.
+MULTILIB_EXCEPTIONS += *msoft-float*/*mnan=2008*
+
+# -mfp64 libraries are only built for mips32r2 and not in mips16 mode.
+MULTILIB_EXCEPTIONS += *mips32/*mfp64*
+MULTILIB_EXCEPTIONS += *mips64*/*mfp64*
+MULTILIB_EXCEPTIONS += *mips16*/*mfp64*
diff --git a/gcc-4.9/gcc/config/mips/t-r3900 b/gcc-4.9/gcc/config/mips/t-r3900
new file mode 100644
index 000000000..fcd9c25b5
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-r3900
@@ -0,0 +1,23 @@
+# Copyright (C) 1998-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Build the libraries for both hard and soft floating point
+
+MULTILIB_OPTIONS = msoft-float EL/EB
+MULTILIB_DIRNAMES = soft-float el eb
+MULTILIB_MATCHES = EL=mel EB=meb
diff --git a/gcc-4.9/gcc/config/mips/t-rtems b/gcc-4.9/gcc/config/mips/t-rtems
new file mode 100644
index 000000000..4019f946b
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-rtems
@@ -0,0 +1,34 @@
+# Custom multilibs for RTEMS
+#
+# Copyright (C) 2003-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# default is mips1 EB hard-float
+MULTILIB_OPTIONS = mips1/mips3/mips32 EB/EL msoft-float
+MULTILIB_DIRNAMES = mips1 mips3 mips32 eb el soft-float
+MULTILIB_MATCHES = EL=mel EB=meb
+
+MULTILIB_EXCEPTIONS =
+
+# Big endian only
+MULTILIB_EXCEPTIONS += EL*
+MULTILIB_EXCEPTIONS += mips32/EL*
+
+# Little endian only
+MULTILIB_EXCEPTIONS += mips3
+MULTILIB_EXCEPTIONS += mips3/msoft-float
diff --git a/gcc-4.9/gcc/config/mips/t-sb1 b/gcc-4.9/gcc/config/mips/t-sb1
new file mode 100644
index 000000000..99214be4f
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-sb1
@@ -0,0 +1,62 @@
+# Copyright (C) 2006-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# GP-rel: G0 only
+#
+# Endianness: EB or EL
+#
+# ABIs: mabi=32
+# mabi=o64
+# mabi=o64/mlong64
+#
+# FPU: (default mhard-float)
+# msoft-float (only for mabi=32)
+#
+
+MULTILIB_EXTRA_OPTS = G0
+
+MULTILIB_OPTIONS = \
+ EB/EL \
+ mabi=32/mabi=o64 \
+ mips32/mips64 \
+ mlong64/msoft-float \
+
+MULTILIB_DIRNAMES = \
+ eb el \
+ o32 o64 \
+ mips32 mips64 \
+ long64 soft-float \
+
+MULTILIB_MATCHES = \
+ EB=meb EL=mel \
+
+MULTILIB_EXCEPTIONS = \
+ *mabi=32/*mlong64* \
+
+MULTILIB_EXCLUSIONS = \
+ mips32/!mabi=32 \
+ mabi=32/!mips32 \
+ msoft-float/!mabi=32 \
+
+# Small multilib list for quick builds and tests.
+# Must either comment out everything above these lines, or everything below
+# these lines.
+
+#MULTILIB_OPTIONS = EB/EL msoft-float
+#MULTILIB_DIRNAMES = eb el soft-float
+#MULTILIB_MATCHES = EB=meb EL=mel
diff --git a/gcc-4.9/gcc/config/mips/t-sde b/gcc-4.9/gcc/config/mips/t-sde
new file mode 100644
index 000000000..229e3d644
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-sde
@@ -0,0 +1,37 @@
+# Copyright (C) 2007-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+MULTILIB_OPTIONS = EL/EB mips32/mips32r2/mips64/mips64r2 mips16/mmicromips msoft-float/mfp64 mcode-readable=no
+MULTILIB_DIRNAMES = el eb mips32 mips32r2 mips64 mips64r2 mips16 micromips sof f64 spram
+MULTILIB_MATCHES = EL=mel EB=meb
+
+# The -mfp64 option is only valid in conjunction with -mips32r2.
+ifneq ($(filter MIPS_ISA_DEFAULT=33,$(tm_defines)),)
+MULTILIB_EXCLUSIONS := mips32/mfp64 mips64/mfp64 mips64r2/mfp64
+else
+MULTILIB_EXCLUSIONS := !mips32r2/mfp64
+endif
+
+# Don't build 64-bit MIPS16 and microMIPS multilibs.
+ifneq ($(filter MIPS_ISA_DEFAULT=6%,$(tm_defines)),)
+MULTILIB_EXCLUSIONS += !mips32/!mips32r2/mips16
+MULTILIB_EXCLUSIONS += !mips32/!mips32r2/mmicromips
+else
+MULTILIB_EXCLUSIONS += mips64/mips16 mips64r2/mips16
+MULTILIB_EXCLUSIONS += mips64/mmicromips mips64r2/mmicromips
+endif
diff --git a/gcc-4.9/gcc/config/mips/t-sdemtk b/gcc-4.9/gcc/config/mips/t-sdemtk
new file mode 100644
index 000000000..820faa305
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-sdemtk
@@ -0,0 +1,40 @@
+# Copyright (C) 2007-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Override newlib settings in t-sde and set up for building
+# against SDE header files and libraries.
+
+MULTILIB_OPTIONS = EL/EB mips32/mips32r2/mips64/mips64r2 mips16 msoft-float/mno-float/mfp64
+MULTILIB_DIRNAMES = el eb mips32 mips32r2 mips64 mips64r2 mips16 sof nof f64
+
+# Remove stdarg.h and stddef.h from USER_H.
+USER_H = $(srcdir)/ginclude/float.h \
+ $(srcdir)/ginclude/iso646.h \
+ $(srcdir)/ginclude/stdbool.h \
+ $(srcdir)/ginclude/varargs.h \
+ $(EXTRA_HEADERS)
+
+# Don't run fixinclude
+STMP_FIXINC = stmp-sdefixinc
+stmp-sdefixinc: gsyslimits.h
+ rm -rf include; mkdir include
+ chmod a+rx include
+ rm -f include/syslimits.h
+ cp $(srcdir)/gsyslimits.h include/syslimits.h
+ chmod a+r include/syslimits.h
+ $(STAMP) stmp-sdefixinc
diff --git a/gcc-4.9/gcc/config/mips/t-sr71k b/gcc-4.9/gcc/config/mips/t-sr71k
new file mode 100644
index 000000000..3346c914c
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-sr71k
@@ -0,0 +1,21 @@
+# Copyright (C) 2002-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Build the libraries for both hard and soft floating point
+MULTILIB_OPTIONS = EL/EB msoft-float mips2
+MULTILIB_DIRNAMES = el eb soft-float mips2
diff --git a/gcc-4.9/gcc/config/mips/t-st b/gcc-4.9/gcc/config/mips/t-st
new file mode 100644
index 000000000..57daa9f0c
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-st
@@ -0,0 +1,30 @@
+# Copyright (C) 2008-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+MULTILIB_OPTIONS = march=loongson2e/march=loongson2f mabi=n32/mabi=32/mabi=64
+MULTILIB_DIRNAMES = 2e 2f lib32 lib lib64
+
+MULTILIB_OSDIRNAMES = march.loongson2e/mabi.n32=../lib32/2e
+MULTILIB_OSDIRNAMES += march.loongson2e/mabi.32=../lib/2e
+MULTILIB_OSDIRNAMES += march.loongson2e/mabi.64=../lib64/2e
+MULTILIB_OSDIRNAMES += march.loongson2f/mabi.n32=../lib32/2f
+MULTILIB_OSDIRNAMES += march.loongson2f/mabi.32=../lib/2f
+MULTILIB_OSDIRNAMES += march.loongson2f/mabi.64=../lib64/2f
+MULTILIB_OSDIRNAMES += mabi.n32=../lib32
+MULTILIB_OSDIRNAMES += mabi.32=../lib
+MULTILIB_OSDIRNAMES += mabi.64=../lib64
diff --git a/gcc-4.9/gcc/config/mips/t-vr b/gcc-4.9/gcc/config/mips/t-vr
new file mode 100644
index 000000000..ccc5d24e2
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-vr
@@ -0,0 +1,106 @@
+# Copyright (C) 2002-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Main multilibs
+# --------------
+#
+# Endianness: EB or EL
+#
+# ABIs: mabi=32
+# mabi=o64
+# mabi=eabi
+# mabi=eabi/mlong32
+# mabi=eabi/mgp32
+# mabi=eabi/mgp32/mlong64
+#
+# Architecture: march=vr4120 with -mfix-vr4120
+# march=vr4130 with -mfix-vr4130 (default)
+# march=vr5000
+# march=vr5400
+# march=vr5500
+#
+# Total: 2 * 6 * 5 = 60 multilibs.
+#
+#
+# Extra vr4300 multilibs
+# ----------------------
+#
+# Endianness: EB or EL
+#
+# ABI: o64
+#
+# Architecture: vr4300.
+#
+# Total: 2 * 1 * 2 = 2 multilibs.
+#
+#
+# Extra MIPS16 multilibs
+# ----------------------
+#
+# Endianness: EB or EL
+#
+# ABIs: mabi=o64
+# mabi=eabi/mlong32
+# mabi=eabi/mgp32
+#
+# Architecture: march=vr4120 with -mfix-vr4120
+# march=vr4130 with -mfix-vr4130 (default)
+#
+# Total: 2 * 3 * 2 = 12 multilibs.
+MULTILIB_OPTIONS = \
+ EL/EB \
+ mabi=32/mabi=o64/mabi=eabi \
+ mgp32 \
+ mlong64 \
+ mips16 \
+ mfix-vr4120/mfix-vr4130/march=vr4300/march=vr5000/march=vr5400/march=vr5500
+
+MULTILIB_DIRNAMES = \
+ el eb \
+ o32 o64 eabi \
+ gp32 \
+ long64 \
+ mips16 \
+ vr4120 vr4130 vr4300 vr5000 vr5400 vr5500
+
+MULTILIB_MATCHES = EL=mel EB=meb mfix-vr4120=march?vr4120 \
+ mfix-vr4130=march?vr4130
+
+# Assume a 41xx-series is the default: we'd need a *mips16 entry if
+# the default processor didn't support mips16. Also assume the
+# default ABI is EABI64 -mlong32.
+MULTILIB_EXCEPTIONS = \
+ *mabi=32/mlong64* \
+ *mabi=32/mgp32* \
+ *mabi=o64/mgp32* \
+ *mabi=o64/mlong64* \
+ *mips16/march=vr5* \
+ *mips16/march=vr4300 \
+ $(MIPS16_EXCEPTIONS) \
+ $(VR4300_EXCEPTIONS)
+
+MIPS16_EXCEPTIONS = \
+ *mabi=32*mips16* \
+ *mlong64*mips16*
+
+VR4300_EXCEPTIONS = \
+ *mabi=32*march=vr4300 \
+ *mgp32*march=vr4300 \
+ *mlong64*march=vr4300 \
+ march=vr4300 \
+ E[LB]/march=vr4300
diff --git a/gcc-4.9/gcc/config/mips/t-vxworks b/gcc-4.9/gcc/config/mips/t-vxworks
new file mode 100644
index 000000000..97c1e782b
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/t-vxworks
@@ -0,0 +1,35 @@
+# Copyright (C) 2003-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Multilibs for VxWorks. We want these 8 architecture combinations:
+#
+# {-mips2,-mips3} x {-EB,-EL} x {-mhard-float,-msoft-float}
+#
+# where the first option in each group is the default. The -mips2
+# multilibs use o32 and the -mips3 multilibs use o64.
+#
+# We want three multilibs for each architecture combination:
+# default (kernel mode), -mrtp and -mrtp/-fPIC.
+MULTILIB_OPTIONS = mabi=o64 mips3 EL msoft-float mrtp fPIC
+MULTILIB_DIRNAMES = o64 mips3 EL msoft-float mrtp pic
+MULTILIB_MATCHES = EL=mel fPIC=fpic
+MULTILIB_EXCEPTIONS = mips3* mabi=o64 fPIC \
+ $(addprefix mabi=o64/, EL* msoft-float* mrtp* fPIC*) \
+ $(addsuffix /fPIC, *mabi=o64 *mips3 *EL *msoft-float)
+
+MULTILIB_EXTRA_OPTS = -G 0 -mno-branch-likely
diff --git a/gcc-4.9/gcc/config/mips/vr.h b/gcc-4.9/gcc/config/mips/vr.h
new file mode 100644
index 000000000..bbbefd64d
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/vr.h
@@ -0,0 +1,58 @@
+/* Definitions of target machine for GNU compiler.
+ NEC VR Series Processors
+ Copyright (C) 2002-2014 Free Software Foundation, Inc.
+ Contributed by Red Hat, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#define DEFAULT_VR_ARCH "mfix-vr4130"
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS \
+ { MULTILIB_ENDIAN_DEFAULT, \
+ MULTILIB_ABI_DEFAULT, \
+ DEFAULT_VR_ARCH }
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS \
+ /* Enforce the default architecture. This is mostly for \
+ the assembler's benefit. */ \
+ "%{!march=*:%{!mfix-vr4120:%{!mfix-vr4130:" \
+ "-" DEFAULT_VR_ARCH "}}}", \
+ \
+ /* Make -mfix-vr4120 imply -march=vr4120. This cuts down \
+ on command-line tautology and makes it easier for t-vr to \
+ provide a -mfix-vr4120 multilib. */ \
+ "%{mfix-vr4120:%{!march=*:-march=vr4120}}", \
+ \
+ /* Same idea for -mfix-vr4130. */ \
+ "%{mfix-vr4130:%{!march=*:-march=vr4130}}", \
+ \
+ /* Infer the default float setting from -march. */ \
+ MIPS_ARCH_FLOAT_SPEC, \
+ \
+ /* Make -mabi=eabi -mlong32 the default. */ \
+ "%{!mabi=*:-mabi=eabi %{!mlong*:-mlong32}}", \
+ \
+ /* Make sure -mlong64 multilibs are chosen when 64-bit longs \
+ are needed. */ \
+ "%{mabi=eabi:%{!mlong*:%{!mgp32:-mlong64}}}", \
+ \
+ /* Remove -mgp32 if it is redundant. */ \
+ "%{mabi=32:%<mgp32}", \
+ \
+ /* Configuration-independent MIPS rules. */ \
+ BASE_DRIVER_SELF_SPECS
diff --git a/gcc-4.9/gcc/config/mips/vxworks.h b/gcc-4.9/gcc/config/mips/vxworks.h
new file mode 100644
index 000000000..7771076d0
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/vxworks.h
@@ -0,0 +1,78 @@
+/* Copyright (C) 1999-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{!G:-G 0} %{G*} %(endian_spec) %{mips1} %{mips2} %{mips3} %{mips4} \
+%{mips32} %{mips32r2} %{mips64} \
+%{mips16:%{!mno-mips16:-mips16}} %{mno-mips16:-no-mips16} \
+%(subtarget_asm_optimizing_spec) \
+%(subtarget_asm_debugging_spec) \
+%{mabi=*} %{!mabi*: %(asm_abi_default_spec)} \
+%{mgp32} %{mgp64} %{march=*} %{mxgot:-xgot} \
+%{mtune=*} \
+%(subtarget_asm_spec)"
+
+#undef LINK_SPEC
+#define LINK_SPEC "\
+%(endian_spec) \
+%{!G:-G 0} %{G*} %{mips1} %{mips2} %{mips3} %{mips4} %{mips32} %{mips64} " \
+VXWORKS_LINK_SPEC
+
+#undef LIB_SPEC
+#define LIB_SPEC VXWORKS_LIB_SPEC
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC VXWORKS_STARTFILE_SPEC
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC VXWORKS_ENDFILE_SPEC
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ if (TARGET_64BIT) \
+ builtin_define ("CPU=MIPS64"); \
+ else \
+ builtin_define ("CPU=MIPS32"); \
+ if (TARGET_BIG_ENDIAN) \
+ builtin_define ("MIPSEB"); \
+ else \
+ builtin_define ("MIPSEL"); \
+ if (TARGET_SOFT_FLOAT) \
+ builtin_define ("SOFT_FLOAT"); \
+ VXWORKS_OS_CPP_BUILTINS (); \
+ } \
+ while (0)
+
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC VXWORKS_ADDITIONAL_CPP_SPEC
+
+/* No sdata. */
+#undef MIPS_DEFAULT_GVALUE
+#define MIPS_DEFAULT_GVALUE 0
+
+/* No _mcount profiling on VxWorks. */
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER VXWORKS_FUNCTION_PROFILER
+
+#undef SUBTARGET_ASM_SPEC
+#define SUBTARGET_ASM_SPEC "%{mrtp:%{fPIC|fpic:-mvxworks-pic}}"
+
+#undef SUBTARGET_OVERRIDE_OPTIONS
+#define SUBTARGET_OVERRIDE_OPTIONS VXWORKS_OVERRIDE_OPTIONS
+
+#undef DBX_REGISTER_NUMBER
diff --git a/gcc-4.9/gcc/config/mips/x-native b/gcc-4.9/gcc/config/mips/x-native
new file mode 100644
index 000000000..5e31121ed
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/x-native
@@ -0,0 +1,3 @@
+driver-native.o : $(srcdir)/config/mips/driver-native.c \
+ $(CONFIG_H) $(SYSTEM_H)
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
diff --git a/gcc-4.9/gcc/config/mips/xlp.md b/gcc-4.9/gcc/config/mips/xlp.md
new file mode 100644
index 000000000..905808dd7
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/xlp.md
@@ -0,0 +1,213 @@
+;; DFA-based pipeline description for the XLP.
+;; Copyright (C) 2012-2014 Free Software Foundation, Inc.
+;;
+;; xlp.md Machine Description for the Broadcom XLP Microprocessor
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "xlp_cpu")
+
+;; CPU function units.
+(define_cpu_unit "xlp_ex0" "xlp_cpu")
+(define_cpu_unit "xlp_ex1" "xlp_cpu")
+(define_cpu_unit "xlp_ex2" "xlp_cpu")
+(define_cpu_unit "xlp_ex3" "xlp_cpu")
+
+;; Integer Multiply Unit
+(define_cpu_unit "xlp_div" "xlp_cpu")
+
+;; ALU2 completion port.
+(define_cpu_unit "xlp_ex2_wrb" "xlp_cpu")
+
+(define_automaton "xlp_fpu")
+
+;; Floating-point units.
+(define_cpu_unit "xlp_fp" "xlp_fpu")
+
+;; Floating Point Sqrt/Divide
+(define_cpu_unit "xlp_divsq" "xlp_fpu")
+
+;; FPU completion port.
+(define_cpu_unit "xlp_fp_wrb" "xlp_fpu")
+
+;; Define reservations for common combinations.
+
+;;
+;; The ordering of the instruction-execution-path/resource-usage
+;; descriptions (also known as reservation RTL) is roughly ordered
+;; based on the define attribute RTL for the "type" classification.
+;; When modifying, remember that the first test that matches is the
+;; reservation used!
+;;
+(define_insn_reservation "ir_xlp_unknown" 1
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "unknown,multi"))
+ "xlp_ex0+xlp_ex1+xlp_ex2+xlp_ex3")
+
+(define_insn_reservation "ir_xlp_branch" 1
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "branch,jump,call"))
+ "xlp_ex3")
+
+(define_insn_reservation "ir_xlp_prefetch" 1
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "prefetch,prefetchx"))
+ "xlp_ex0|xlp_ex1")
+
+(define_insn_reservation "ir_xlp_load" 4
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "load"))
+ "xlp_ex0|xlp_ex1")
+
+(define_insn_reservation "ir_xlp_fpload" 5
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "fpload,fpidxload"))
+ "xlp_ex0|xlp_ex1")
+
+(define_insn_reservation "ir_xlp_alu" 1
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "const,arith,shift,slt,clz,signext,logical,move,trap,nop"))
+ "xlp_ex0|xlp_ex1|(xlp_ex2,xlp_ex2_wrb)|xlp_ex3")
+
+(define_insn_reservation "ir_xlp_condmov" 1
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "condmove")
+ (eq_attr "mode" "SI,DI"))
+ "xlp_ex2,xlp_ex2_wrb")
+
+(define_insn_reservation "ir_xlp_mul" 5
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "imul,imadd"))
+ "xlp_ex2,nothing*4,xlp_ex2_wrb")
+
+(define_insn_reservation "ir_xlp_mul3" 3
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "imul3"))
+ "xlp_ex2,nothing*2,xlp_ex2_wrb")
+
+(define_insn_reservation "ir_xlp_div" 24
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "mode" "SI")
+ (eq_attr "type" "idiv"))
+ "xlp_ex2+xlp_div,xlp_div*23,xlp_ex2_wrb")
+
+(define_insn_reservation "ir_xlp_ddiv" 48
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "mode" "DI")
+ (eq_attr "type" "idiv"))
+ "xlp_ex2+xlp_div,xlp_div*47,xlp_ex2_wrb")
+
+(define_insn_reservation "ir_xlp_store" 1
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "store,fpstore,fpidxstore"))
+ "xlp_ex0|xlp_ex1")
+
+(define_insn_reservation "ir_xlp_fpmove" 2
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "mfc"))
+ "xlp_ex3,xlp_fp,xlp_fp_wrb")
+
+(define_insn_reservation "ir_xlp_mfhi" 1
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "mfhi"))
+ "xlp_ex2,xlp_ex2_wrb")
+
+(define_insn_reservation "ir_xlp_mflo" 1
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "mflo"))
+ "xlp_ex2,xlp_ex2_wrb")
+
+(define_insn_reservation "ir_xlp_mthi" 1
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "mthi"))
+ "xlp_ex2,xlp_ex2_wrb")
+
+(define_insn_reservation "ir_xlp_mtlo" 3
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "mtlo"))
+ "xlp_ex2,nothing*2,xlp_ex2_wrb")
+
+(define_insn_reservation "ir_xlp_fp2" 2
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "fmove,fneg,fabs,condmove"))
+ "xlp_fp,nothing,xlp_fp_wrb")
+
+(define_insn_reservation "ir_xlp_fp3" 3
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "fcmp"))
+ "xlp_fp,nothing*2,xlp_fp_wrb")
+
+(define_insn_reservation "ir_xlp_fp4" 4
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "fcvt"))
+ "xlp_fp,nothing*3,xlp_fp_wrb")
+
+(define_insn_reservation "ir_xlp_fp5" 5
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "mode" "SF")
+ (eq_attr "type" "fadd,fmul"))
+ "xlp_fp,nothing*4,xlp_fp_wrb")
+
+(define_insn_reservation "ir_xlp_fp6" 6
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "mode" "DF")
+ (eq_attr "type" "fadd,fmul"))
+ "xlp_fp,nothing*5,xlp_fp_wrb")
+
+(define_insn_reservation "ir_xlp_fp9" 9
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "mode" "SF")
+ (eq_attr "type" "fmadd"))
+ "xlp_fp,nothing*3,xlp_fp,nothing*3,xlp_fp_wrb")
+
+(define_insn_reservation "ir_xlp_fp11" 11
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "mode" "DF")
+ (eq_attr "type" "fmadd"))
+ "xlp_fp,nothing*4,xlp_fp,nothing*4,xlp_fp_wrb")
+
+(define_insn_reservation "ir_xlp_fpcomplex_s" 23
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "mode" "SF")
+ (eq_attr "type" "fdiv,frdiv,frdiv1,frdiv2,fsqrt,frsqrt,frsqrt1,frsqrt2"))
+ "xlp_fp+xlp_divsq,xlp_divsq*22,xlp_fp_wrb")
+
+(define_insn_reservation "ir_xlp_fpcomplex_d" 38
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "mode" "DF")
+ (eq_attr "type" "fdiv,frdiv,frdiv1,frdiv2,fsqrt,frsqrt,frsqrt1,frsqrt2"))
+ "xlp_fp+xlp_divsq,xlp_divsq*37,xlp_fp_wrb")
+
+(define_bypass 3 "ir_xlp_mul" "ir_xlp_mfhi")
+
+(define_insn_reservation "ir_xlp_atomic" 15
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "atomic"))
+ "xlp_ex0|xlp_ex1")
+
+;; Sync loop consists of (in order)
+;; (1) optional sync,
+;; (2) LL instruction,
+;; (3) branch and 1-2 ALU instructions,
+;; (4) SC instruction,
+;; (5) optional sync,
+;; (6) branch and ALU instruction.
+;; The net result of this reservation is a big delay with flush of
+;; ALU pipeline and outgoing reservations discouraging use of EX3.
+(define_insn_reservation "ir_xlp_sync_loop" 40
+ (and (eq_attr "cpu" "xlp")
+ (eq_attr "type" "syncloop"))
+ "(xlp_ex0+xlp_ex1+xlp_ex2+xlp_ex3)*39,xlp_ex3+(xlp_ex0|xlp_ex1|(xlp_ex2,xlp_ex2_wrb))")
diff --git a/gcc-4.9/gcc/config/mips/xlr.md b/gcc-4.9/gcc/config/mips/xlr.md
new file mode 100644
index 000000000..a8d1a12c1
--- /dev/null
+++ b/gcc-4.9/gcc/config/mips/xlr.md
@@ -0,0 +1,94 @@
+;; DFA-based pipeline description for the XLR.
+;; Copyright (C) 2008-2014 Free Software Foundation, Inc.
+;;
+;; xlr.md Machine Description for the RMI XLR Microprocessor
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "xlr_main,xlr_muldiv")
+
+;; Definitions for xlr_main automaton.
+(define_cpu_unit "xlr_main_pipe" "xlr_main")
+
+(define_insn_reservation "ir_xlr_alu_slt" 2
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "slt"))
+ "xlr_main_pipe")
+
+(define_insn_reservation "ir_xlr_alu_clz" 2
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "clz"))
+ "xlr_main_pipe")
+
+;; Integer arithmetic instructions.
+(define_insn_reservation "ir_xlr_alu" 1
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "move,arith,shift,logical,signext,const,unknown,multi,nop,trap,atomic,syncloop"))
+ "xlr_main_pipe")
+
+;; Integer arithmetic instructions.
+(define_insn_reservation "ir_xlr_condmove" 2
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "condmove"))
+ "xlr_main_pipe")
+
+;; Load/store instructions.
+(define_insn_reservation "ir_xlr_load" 4
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "load"))
+ "xlr_main_pipe")
+
+(define_insn_reservation "ir_xlr_store" 1
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "store"))
+ "xlr_main_pipe")
+
+(define_insn_reservation "ir_xlr_prefetch_x" 1
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "prefetch,prefetchx"))
+ "xlr_main_pipe")
+
+;; Branch instructions - use branch misprediction latency.
+(define_insn_reservation "ir_xlr_branch" 1
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "branch,jump,call"))
+ "xlr_main_pipe")
+
+;; Coprocessor move instructions.
+(define_insn_reservation "ir_xlr_xfer" 2
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "mtc,mfc"))
+ "xlr_main_pipe")
+
+(define_bypass 5 "ir_xlr_xfer" "ir_xlr_xfer")
+
+;; Definitions for the xlr_muldiv automaton.
+(define_cpu_unit "xlr_imuldiv_nopipe" "xlr_muldiv")
+
+(define_insn_reservation "ir_xlr_imul" 8
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "imul,imul3,imadd"))
+ "xlr_main_pipe,xlr_imuldiv_nopipe*6")
+
+(define_insn_reservation "ir_xlr_div" 68
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "idiv"))
+ "xlr_main_pipe,xlr_imuldiv_nopipe*67")
+
+(define_insn_reservation "xlr_hilo" 2
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "mfhi,mflo,mthi,mtlo"))
+ "xlr_imuldiv_nopipe")