aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.9/gcc/config/fr30/fr30.md
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.9/gcc/config/fr30/fr30.md')
-rw-r--r--gcc-4.9/gcc/config/fr30/fr30.md1267
1 files changed, 1267 insertions, 0 deletions
diff --git a/gcc-4.9/gcc/config/fr30/fr30.md b/gcc-4.9/gcc/config/fr30/fr30.md
new file mode 100644
index 000000000..8bbd77c69
--- /dev/null
+++ b/gcc-4.9/gcc/config/fr30/fr30.md
@@ -0,0 +1,1267 @@
+;; FR30 machine description.
+;; Copyright (C) 1998-2014 Free Software Foundation, Inc.
+;; Contributed by Cygnus Solutions.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;;{{{ Attributes
+
+(define_attr "length" "" (const_int 2))
+
+;; Used to distinguish between small memory model targets and big mode targets.
+
+(define_attr "size" "small,big"
+ (const (if_then_else (symbol_ref "TARGET_SMALL_MODEL")
+ (const_string "small")
+ (const_string "big"))))
+
+
+;; Define an attribute to be used by the delay slot code.
+;; An instruction by default is considered to be 'delayable'
+;; that is, it can be placed into a delay slot, but it is not
+;; itself a delayed branch type instruction. An instruction
+;; whose type is 'delayed' is one which has a delay slot, and
+;; an instruction whose delay_type is 'other' is one which does
+;; not have a delay slot, nor can it be placed into a delay slot.
+
+(define_attr "delay_type" "delayable,delayed,other" (const_string "delayable"))
+
+;;}}}
+;;{{{ Delay Slot Specifications
+
+(define_delay (eq_attr "delay_type" "delayed")
+ [(and (eq_attr "delay_type" "delayable")
+ (eq_attr "length" "2"))
+ (nil)
+ (nil)]
+)
+
+(include "predicates.md")
+(include "constraints.md")
+
+;;}}}
+;;{{{ Moves
+
+;;{{{ Comment
+
+;; Wrap moves in define_expand to prevent memory->memory moves from being
+;; generated at the RTL level, which generates better code for most machines
+;; which can't do mem->mem moves.
+
+;; If operand 0 is a `subreg' with mode M of a register whose own mode is wider
+;; than M, the effect of this instruction is to store the specified value in
+;; the part of the register that corresponds to mode M. The effect on the rest
+;; of the register is undefined.
+
+;; This class of patterns is special in several ways. First of all, each of
+;; these names *must* be defined, because there is no other way to copy a datum
+;; from one place to another.
+
+;; Second, these patterns are not used solely in the RTL generation pass. Even
+;; the reload pass can generate move insns to copy values from stack slots into
+;; temporary registers. When it does so, one of the operands is a hard
+;; register and the other is an operand that can need to be reloaded into a
+;; register.
+
+;; Therefore, when given such a pair of operands, the pattern must
+;; generate RTL which needs no reloading and needs no temporary
+;; registers--no registers other than the operands. For example, if
+;; you support the pattern with a `define_expand', then in such a
+;; case the `define_expand' mustn't call `force_reg' or any other such
+;; function which might generate new pseudo registers.
+
+;; This requirement exists even for subword modes on a RISC machine
+;; where fetching those modes from memory normally requires several
+;; insns and some temporary registers. Look in `spur.md' to see how
+;; the requirement can be satisfied.
+
+;; During reload a memory reference with an invalid address may be passed as an
+;; operand. Such an address will be replaced with a valid address later in the
+;; reload pass. In this case, nothing may be done with the address except to
+;; use it as it stands. If it is copied, it will not be replaced with a valid
+;; address. No attempt should be made to make such an address into a valid
+;; address and no routine (such as `change_address') that will do so may be
+;; called. Note that `general_operand' will fail when applied to such an
+;; address.
+;;
+;; The global variable `reload_in_progress' (which must be explicitly declared
+;; if required) can be used to determine whether such special handling is
+;; required.
+;;
+;; The variety of operands that have reloads depends on the rest of
+;; the machine description, but typically on a RISC machine these can
+;; only be pseudo registers that did not get hard registers, while on
+;; other machines explicit memory references will get optional
+;; reloads.
+;;
+;; If a scratch register is required to move an object to or from memory, it
+;; can be allocated using `gen_reg_rtx' prior to reload. But this is
+;; impossible during and after reload. If there are cases needing scratch
+;; registers after reload, you must define `SECONDARY_INPUT_RELOAD_CLASS' and
+;; perhaps also `SECONDARY_OUTPUT_RELOAD_CLASS' to detect them, and provide
+;; patterns `reload_inM' or `reload_outM' to handle them.
+
+;; The constraints on a `moveM' must permit moving any hard register to any
+;; other hard register provided that `HARD_REGNO_MODE_OK' permits mode M in
+;; both registers and `REGISTER_MOVE_COST' applied to their classes returns a
+;; value of 2.
+
+;; It is obligatory to support floating point `moveM' instructions
+;; into and out of any registers that can hold fixed point values,
+;; because unions and structures (which have modes `SImode' or
+;; `DImode') can be in those registers and they may have floating
+;; point members.
+
+;; There may also be a need to support fixed point `moveM' instructions in and
+;; out of floating point registers. Unfortunately, I have forgotten why this
+;; was so, and I don't know whether it is still true. If `HARD_REGNO_MODE_OK'
+;; rejects fixed point values in floating point registers, then the constraints
+;; of the fixed point `moveM' instructions must be designed to avoid ever
+;; trying to reload into a floating point register.
+
+;;}}}
+;;{{{ Push and Pop
+
+;; Push a register onto the stack
+(define_insn "movsi_push"
+ [(set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 0 "register_operand" "a"))]
+ ""
+ "st %0, @-r15"
+)
+
+;; Pop a register off the stack
+(define_insn "movsi_pop"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (mem:SI (post_inc:SI (reg:SI 15))))]
+ ""
+ "ld @r15+, %0"
+)
+
+;;}}}
+;;{{{ 1 Byte Moves
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (!reload_in_progress
+ && !reload_completed
+ && GET_CODE (operands[0]) == MEM
+ && (GET_CODE (operands[1]) == MEM
+ || immediate_operand (operands[1], QImode)))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+}")
+
+(define_insn "movqi_unsigned_register_load"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldub %1, %0"
+)
+
+(define_expand "movqi_signed_register_load"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "")))]
+ ""
+ "
+ emit_insn (gen_movqi_unsigned_register_load (operands[0], operands[1]));
+ emit_insn (gen_extendqisi2 (operands[0], operands[0]));
+ DONE;
+ "
+)
+
+(define_insn "*movqi_internal"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,red,m,r")
+ (match_operand:QI 1 "general_operand" "i,red,r,rm"))]
+ ""
+ "@
+ ldi:8\\t#%A1, %0
+ mov \\t%1, %0
+ stb \\t%1, %0
+ ldub \\t%1, %0"
+)
+
+;;}}}
+;;{{{ 2 Byte Moves
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (!reload_in_progress
+ && !reload_completed
+ && GET_CODE (operands[0]) == MEM
+ && (GET_CODE (operands[1]) == MEM
+ || immediate_operand (operands[1], HImode)))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+}")
+
+(define_insn "movhi_unsigned_register_load"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "lduh %1, %0"
+)
+
+(define_expand "movhi_signed_register_load"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "")))]
+ ""
+ "
+ emit_insn (gen_movhi_unsigned_register_load (operands[0], operands[1]));
+ emit_insn (gen_extendhisi2 (operands[0], operands[0]));
+ DONE;
+ "
+)
+
+(define_insn "*movhi_internal"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,red,m,r")
+ (match_operand:HI 1 "general_operand" "L,M,n,red,r,rm"))]
+ ""
+ "@
+ ldi:8 \\t#%1, %0
+ ldi:20\\t#%1, %0
+ ldi:32\\t#%1, %0
+ mov \\t%1, %0
+ sth \\t%1, %0
+ lduh \\t%1, %0"
+ [(set_attr "length" "*,4,6,*,*,*")]
+)
+
+;;}}}
+;;{{{ 4 Byte Moves
+
+;; If the destination is a MEM and the source is a
+;; MEM or an CONST_INT move the source into a register.
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "{
+ if (!reload_in_progress
+ && !reload_completed
+ && GET_CODE(operands[0]) == MEM
+ && (GET_CODE (operands[1]) == MEM
+ || immediate_operand (operands[1], SImode)))
+ operands[1] = copy_to_mode_reg (SImode, operands[1]);
+ }"
+)
+
+;; We can do some clever tricks when loading certain immediate
+;; values. We implement these tricks as define_splits, rather
+;; than putting the code into the define_expand "movsi" above,
+;; because if we put them there, they will be evaluated at RTL
+;; generation time and then the combiner pass will come along
+;; and replace the multiple insns that have been generated with
+;; the original, slower, load insns. (The combiner pass only
+;; cares about reducing the number of instructions, it does not
+;; care about instruction lengths or speeds). Splits are
+;; evaluated after the combine pass and before the scheduling
+;; passes, so that they are the perfect place to put this
+;; intelligence.
+;;
+;; XXX we probably ought to implement these for QI and HI mode
+;; loads as well.
+
+;; If we are loading a small negative constant we can save space
+;; and time by loading the positive value and then sign extending it.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) <= -1 && INTVAL (operands[1]) >= -128
+ && (GET_CODE (operands[0]) != SUBREG
+ || SCALAR_INT_MODE_P (GET_MODE (XEXP (operands[0], 0))))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (sign_extend:SI (match_dup 2)))]
+ "{
+ operands[1] = GEN_INT (INTVAL (operands[1]) & 0xff);
+ operands[2] = gen_lowpart (QImode, operands[0]);
+ }"
+)
+
+;; If we are loading a large negative constant, one which does
+;; not have any of its bottom 24 bit set, then we can save time
+;; and space by loading the byte value and shifting it into place.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "(INTVAL (operands[1]) < 0) && ((INTVAL (operands[1]) & 0x00ffffff) == 0)"
+ [(set (match_dup 0) (match_dup 2))
+ (parallel [(set (match_dup 0) (ashift:SI (match_dup 0) (const_int 24)))
+ (clobber (reg:CC 16))])]
+ "{
+ HOST_WIDE_INT val = INTVAL (operands[1]);
+ operands[2] = GEN_INT (val >> 24);
+ }"
+)
+
+;; If we are loading a large positive constant, one which has bits
+;; in the top byte set, but whose set bits all lie within an 8 bit
+;; range, then we can save time and space by loading the byte value
+;; and shifting it into place.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "(INTVAL (operands[1]) > 0x00ffffff)
+ && ((INTVAL (operands[1]) >> exact_log2 (INTVAL (operands[1]) & (- INTVAL (operands[1])))) < 0x100)"
+ [(set (match_dup 0) (match_dup 2))
+ (parallel [(set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 3)))
+ (clobber (reg:CC 16))])]
+ "{
+ HOST_WIDE_INT val = INTVAL (operands[1]);
+ int shift = exact_log2 (val & ( - val));
+ operands[2] = GEN_INT (val >> shift);
+ operands[3] = GEN_INT (shift);
+ }"
+)
+
+;; When TARGET_SMALL_MODEL is defined we assume that all symbolic
+;; values are addresses which will fit in 20 bits.
+
+(define_insn "movsi_internal"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,red,V,r,m")
+ (match_operand:SI 1 "general_operand" "L,M,n,i,rde,r,rm,r"))]
+ ""
+ "*
+ {
+ switch (which_alternative)
+ {
+ case 0: return \"ldi:8 \\t#%1, %0\";
+ case 1: return \"ldi:20\\t#%1, %0\";
+ case 2: return \"ldi:32\\t#%1, %0\";
+ case 3: if (TARGET_SMALL_MODEL)
+ return \"ldi:20\\t%1, %0\";
+ else
+ return \"ldi:32\\t%1, %0\";
+ case 4: return \"mov \\t%1, %0\";
+ case 5: return \"st \\t%1, %0\";
+ case 6: return \"ld \\t%1, %0\";
+ case 7: return \"st \\t%1, %0\";
+ default: gcc_unreachable ();
+ }
+ }"
+ [(set (attr "length") (cond [(eq_attr "alternative" "1") (const_int 4)
+ (eq_attr "alternative" "2") (const_int 6)
+ (eq_attr "alternative" "3")
+ (if_then_else (eq_attr "size" "small")
+ (const_int 4)
+ (const_int 6))]
+ (const_int 2)))]
+)
+
+;;}}}
+;;{{{ 8 Byte Moves
+
+;; Note - the FR30 does not have an 8 byte load/store instruction
+;; but we have to support this pattern because some other patterns
+;; (e.g. muldisi2) can produce a DImode result.
+;; (This code is stolen from the M32R port.)
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+ /* Everything except mem = const or mem = mem can be done easily. */
+
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (DImode, operands[1]);
+ "
+)
+
+;; We use an insn and a split so that we can generate
+;; RTL rather than text from fr30_move_double().
+
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "nonimmediate_di_operand" "=r,r,m,r")
+ (match_operand:DI 1 "di_operand" "r,m,r,nF"))]
+ "register_operand (operands[0], DImode) || register_operand (operands[1], DImode)"
+ "#"
+ [(set_attr "length" "4,8,12,12")]
+)
+
+(define_split
+ [(set (match_operand:DI 0 "nonimmediate_di_operand" "")
+ (match_operand:DI 1 "di_operand" ""))]
+ "reload_completed"
+ [(match_dup 2)]
+ "operands[2] = fr30_move_double (operands);"
+)
+
+;;}}}
+;;{{{ Load & Store Multiple Registers
+
+;; The load multiple and store multiple patterns are implemented
+;; as peepholes because the only time they are expected to occur
+;; is during function prologues and epilogues.
+
+(define_peephole
+ [(set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 0 "high_register_operand" "h"))
+ (set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 1 "high_register_operand" "h"))
+ (set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 2 "high_register_operand" "h"))
+ (set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 3 "high_register_operand" "h"))]
+ "fr30_check_multiple_regs (operands, 4, 1)"
+ "stm1 (%0, %1, %2, %3)"
+ [(set_attr "delay_type" "other")]
+)
+
+(define_peephole
+ [(set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 0 "high_register_operand" "h"))
+ (set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 1 "high_register_operand" "h"))
+ (set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 2 "high_register_operand" "h"))]
+ "fr30_check_multiple_regs (operands, 3, 1)"
+ "stm1 (%0, %1, %2)"
+ [(set_attr "delay_type" "other")]
+)
+
+(define_peephole
+ [(set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 0 "high_register_operand" "h"))
+ (set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 1 "high_register_operand" "h"))]
+ "fr30_check_multiple_regs (operands, 2, 1)"
+ "stm1 (%0, %1)"
+ [(set_attr "delay_type" "other")]
+)
+
+(define_peephole
+ [(set (match_operand:SI 0 "high_register_operand" "h")
+ (mem:SI (post_inc:SI (reg:SI 15))))
+ (set (match_operand:SI 1 "high_register_operand" "h")
+ (mem:SI (post_inc:SI (reg:SI 15))))
+ (set (match_operand:SI 2 "high_register_operand" "h")
+ (mem:SI (post_inc:SI (reg:SI 15))))
+ (set (match_operand:SI 3 "high_register_operand" "h")
+ (mem:SI (post_inc:SI (reg:SI 15))))]
+ "fr30_check_multiple_regs (operands, 4, 0)"
+ "ldm1 (%0, %1, %2, %3)"
+ [(set_attr "delay_type" "other")]
+)
+
+(define_peephole
+ [(set (match_operand:SI 0 "high_register_operand" "h")
+ (mem:SI (post_inc:SI (reg:SI 15))))
+ (set (match_operand:SI 1 "high_register_operand" "h")
+ (mem:SI (post_inc:SI (reg:SI 15))))
+ (set (match_operand:SI 2 "high_register_operand" "h")
+ (mem:SI (post_inc:SI (reg:SI 15))))]
+ "fr30_check_multiple_regs (operands, 3, 0)"
+ "ldm1 (%0, %1, %2)"
+ [(set_attr "delay_type" "other")]
+)
+
+(define_peephole
+ [(set (match_operand:SI 0 "high_register_operand" "h")
+ (mem:SI (post_inc:SI (reg:SI 15))))
+ (set (match_operand:SI 1 "high_register_operand" "h")
+ (mem:SI (post_inc:SI (reg:SI 15))))]
+ "fr30_check_multiple_regs (operands, 2, 0)"
+ "ldm1 (%0, %1)"
+ [(set_attr "delay_type" "other")]
+)
+
+(define_peephole
+ [(set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 0 "low_register_operand" "l"))
+ (set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 1 "low_register_operand" "l"))
+ (set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 2 "low_register_operand" "l"))
+ (set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 3 "low_register_operand" "l"))]
+ "fr30_check_multiple_regs (operands, 4, 1)"
+ "stm0 (%0, %1, %2, %3)"
+ [(set_attr "delay_type" "other")]
+)
+
+(define_peephole
+ [(set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 0 "low_register_operand" "l"))
+ (set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 1 "low_register_operand" "l"))
+ (set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 2 "low_register_operand" "l"))]
+ "fr30_check_multiple_regs (operands, 3, 1)"
+ "stm0 (%0, %1, %2)"
+ [(set_attr "delay_type" "other")]
+)
+
+(define_peephole
+ [(set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 0 "low_register_operand" "l"))
+ (set (mem:SI (pre_dec:SI (reg:SI 15)))
+ (match_operand:SI 1 "low_register_operand" "l"))]
+ "fr30_check_multiple_regs (operands, 2, 1)"
+ "stm0 (%0, %1)"
+ [(set_attr "delay_type" "other")]
+)
+
+;;}}}
+;;{{{ Floating Point Moves
+
+;; Note - Patterns for SF mode moves are compulsory, but
+;; patterns for DF are optional, as GCC can synthesize them.
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "{
+ if (!reload_in_progress && !reload_completed
+ && memory_operand (operands[0], SFmode)
+ && memory_operand (operands[1], SFmode))
+ operands[1] = copy_to_mode_reg (SFmode, operands[1]);
+ }"
+)
+
+(define_insn "*movsf_internal"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,red,m,r")
+ (match_operand:SF 1 "general_operand" "Fn,i,rde,r,rm"))]
+ ""
+ "*
+ {
+ switch (which_alternative)
+ {
+ case 0: return \"ldi:32\\t%1, %0\";
+ case 1: if (TARGET_SMALL_MODEL)
+ return \"ldi:20\\t%1, %0\";
+ else
+ return \"ldi:32\\t%1, %0\";
+ case 2: return \"mov \\t%1, %0\";
+ case 3: return \"st \\t%1, %0\";
+ case 4: return \"ld \\t%1, %0\";
+ default: gcc_unreachable ();
+ }
+ }"
+ [(set (attr "length") (cond [(eq_attr "alternative" "0") (const_int 6)
+ (eq_attr "alternative" "1")
+ (if_then_else (eq_attr "size" "small")
+ (const_int 4)
+ (const_int 6))]
+ (const_int 2)))]
+)
+
+(define_insn "*movsf_constant_store"
+ [(set (match_operand:SF 0 "memory_operand" "=m")
+ (match_operand:SF 1 "immediate_operand" "F"))]
+ ""
+ "*
+ {
+ const char * ldi_instr;
+ const char * tmp_reg;
+ static char buffer[100];
+
+ ldi_instr = fr30_const_double_is_zero (operands[1]) ? \"ldi:8\" : \"ldi:32\";
+
+ tmp_reg = reg_names [COMPILER_SCRATCH_REGISTER];
+
+ sprintf (buffer, \"%s\\t#%%1, %s\\t;\\n\\tst\\t%s, %%0\\t; Created by movsf_constant_store\",
+ ldi_instr, tmp_reg, tmp_reg);
+
+ return buffer;
+ }"
+ [(set_attr "length" "8")]
+)
+
+;;}}}
+
+;;}}}
+;;{{{ Conversions
+
+;; Signed conversions from a smaller integer to a larger integer
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "register_operand" "0")))]
+ ""
+ "extsb %0"
+)
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "register_operand" "0")))]
+ ""
+ "extsh %0"
+)
+
+;; Unsigned conversions from a smaller integer to a larger integer
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "register_operand" "0")))]
+ ""
+ "extub %0"
+)
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "register_operand" "0")))]
+ ""
+ "extuh %0"
+)
+
+;;}}}
+;;{{{ Arithmetic
+
+;;{{{ Addition
+
+;; This is a special pattern just for adjusting the stack size.
+(define_insn "add_to_stack"
+ [(set (reg:SI 15)
+ (plus:SI (reg:SI 15)
+ (match_operand:SI 0 "stack_add_operand" "i")))]
+ ""
+ "addsp %0"
+)
+
+;; We need some trickery to be able to handle the addition of
+;; large (i.e. outside +/- 16) constants. We need to be able to
+;; handle this because reload assumes that it can generate add
+;; instructions with arbitrary sized constants.
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "{
+ if ( GET_CODE (operands[2]) == REG
+ || GET_CODE (operands[2]) == SUBREG)
+ emit_insn (gen_addsi_regs (operands[0], operands[1], operands[2]));
+ else if (GET_CODE (operands[2]) != CONST_INT)
+ emit_insn (gen_addsi_big_int (operands[0], operands[1], operands[2]));
+ else if (INTVAL (operands[2]) >= -16
+ && INTVAL (operands[2]) <= 15
+ && (!REG_P (operands[1])
+ || !REGNO_PTR_FRAME_P (REGNO (operands[1]))
+ || REGNO (operands[1]) == STACK_POINTER_REGNUM))
+ emit_insn (gen_addsi_small_int (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_addsi_big_int (operands[0], operands[1], operands[2]));
+ DONE;
+ }"
+)
+
+(define_insn "addsi_regs"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "addn %2, %0"
+)
+
+;; Do not allow an eliminable register in the source register. It
+;; might be eliminated in favor of the stack pointer, probably
+;; increasing the offset, and so rendering the instruction illegal.
+(define_insn "addsi_small_int"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "add_immediate_operand" "I,J")))]
+ "!REG_P (operands[1])
+ || !REGNO_PTR_FRAME_P (REGNO (operands[1]))
+ || REGNO (operands[1]) == STACK_POINTER_REGNUM"
+ "@
+ addn %2, %0
+ addn2 %2, %0"
+)
+
+(define_expand "addsi_big_int"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "immediate_operand" "")))]
+ ""
+ "{
+ /* Cope with the possibility that ops 0 and 1 are the same register. */
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (reload_in_progress || reload_completed)
+ {
+ rtx reg = gen_rtx_REG (SImode, 0/*COMPILER_SCRATCH_REGISTER*/);
+
+ emit_insn (gen_movsi (reg, operands[2]));
+ emit_insn (gen_addsi_regs (operands[0], operands[0], reg));
+ }
+ else
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ emit_insn (gen_addsi_regs (operands[0], operands[0], operands[2]));
+ }
+ }
+ else
+ {
+ emit_insn (gen_movsi (operands[0], operands[2]));
+ emit_insn (gen_addsi_regs (operands[0], operands[0], operands[1]));
+ }
+ DONE;
+ }"
+)
+
+(define_insn "*addsi_for_reload"
+ [(set (match_operand:SI 0 "register_operand" "=&r,r,r")
+ (plus:SI (match_operand:SI 1 "register_operand" "r,r,r")
+ (match_operand:SI 2 "immediate_operand" "L,M,n")))]
+ "reload_in_progress || reload_completed"
+ "@
+ ldi:8\\t#%2, %0 \\n\\taddn\\t%1, %0
+ ldi:20\\t#%2, %0 \\n\\taddn\\t%1, %0
+ ldi:32\\t#%2, %0 \\n\\taddn\\t%1, %0"
+ [(set_attr "length" "4,6,8")]
+)
+
+;;}}}
+;;{{{ Subtraction
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "subn %2, %0"
+)
+
+;;}}}
+;;{{{ Multiplication
+
+;; Signed multiplication producing 64-bit results from 32-bit inputs
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "%r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))
+ (clobber (reg:CC 16))]
+ ""
+ "mul %2, %1\\n\\tmov\\tmdh, %0\\n\\tmov\\tmdl, %p0"
+ [(set_attr "length" "6")]
+)
+
+;; Unsigned multiplication producing 64-bit results from 32-bit inputs
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "%r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))
+ (clobber (reg:CC 16))]
+ ""
+ "mulu %2, %1\\n\\tmov\\tmdh, %0\\n\\tmov\\tmdl, %p0"
+ [(set_attr "length" "6")]
+)
+
+;; Signed multiplication producing 32-bit result from 16-bit inputs
+(define_insn "mulhisi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r"))
+ (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))
+ (clobber (reg:CC 16))]
+ ""
+ "mulh %2, %1\\n\\tmov\\tmdl, %0"
+ [(set_attr "length" "4")]
+)
+
+;; Unsigned multiplication producing 32-bit result from 16-bit inputs
+(define_insn "umulhisi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (zero_extend:SI (match_operand:HI 1 "register_operand" "%r"))
+ (zero_extend:SI (match_operand:HI 2 "register_operand" "r"))))
+ (clobber (reg:CC 16))]
+ ""
+ "muluh %2, %1\\n\\tmov\\tmdl, %0"
+ [(set_attr "length" "4")]
+)
+
+;; Signed multiplication producing 32-bit result from 32-bit inputs
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (reg:CC 16))]
+ ""
+ "mul %2, %1\\n\\tmov\\tmdl, %0"
+ [(set_attr "length" "4")]
+)
+
+;;}}}
+;;}}}
+;;{{{ Shifts
+
+;; Arithmetic Shift Left
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0,0,0")
+ (match_operand:SI 2 "nonmemory_operand" "r,I,K")))
+ (clobber (reg:CC 16))]
+ ""
+ "@
+ lsl %2, %0
+ lsl %2, %0
+ lsl2 %x2, %0"
+)
+
+;; Arithmetic Shift Right
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,0")
+ (match_operand:SI 2 "nonmemory_operand" "r,I,K")))
+ (clobber (reg:CC 16))]
+ ""
+ "@
+ asr %2, %0
+ asr %2, %0
+ asr2 %x2, %0"
+)
+
+;; Logical Shift Right
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,0")
+ (match_operand:SI 2 "nonmemory_operand" "r,I,K")))
+ (clobber (reg:CC 16))]
+ ""
+ "@
+ lsr %2, %0
+ lsr %2, %0
+ lsr2 %x2, %0"
+)
+
+;;}}}
+;;{{{ Logical Operations
+
+;; Logical AND, 32-bit integers
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (and:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "register_operand" "0")))
+ (clobber (reg:CC 16))]
+ ""
+ "and %1, %0"
+)
+
+;; Inclusive OR, 32-bit integers
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ior:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "register_operand" "0")))
+ (clobber (reg:CC 16))]
+ ""
+ "or %1, %0"
+)
+
+;; Exclusive OR, 32-bit integers
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (xor:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "register_operand" "0")))
+ (clobber (reg:CC 16))]
+ ""
+ "eor %1, %0"
+)
+
+;; One's complement, 32-bit integers
+(define_expand "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (not:SI (match_operand:SI 1 "register_operand" "")))]
+ ""
+ "{
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (reload_in_progress || reload_completed)
+ {
+ rtx reg = gen_rtx_REG (SImode, 0/*COMPILER_SCRATCH_REGISTER*/);
+
+ emit_insn (gen_movsi (reg, constm1_rtx));
+ emit_insn (gen_xorsi3 (operands[0], operands[0], reg));
+ }
+ else
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (reg, constm1_rtx));
+ emit_insn (gen_xorsi3 (operands[0], operands[0], reg));
+ }
+ }
+ else
+ {
+ emit_insn (gen_movsi_internal (operands[0], constm1_rtx));
+ emit_insn (gen_xorsi3 (operands[0], operands[1], operands[0]));
+ }
+ DONE;
+ }"
+)
+
+;;}}}
+;;{{{ Comparisons
+
+;; The actual comparisons, generated by the cbranch and/or cstore expanders
+
+(define_insn "*cmpsi_internal"
+ [(set (reg:CC 16)
+ (compare:CC (match_operand:SI 0 "register_operand" "r,r,r")
+ (match_operand:SI 1 "nonmemory_operand" "r,I,J")))]
+ ""
+ "@
+ cmp %1, %0
+ cmp %1, %0
+ cmp2 %1, %0"
+)
+
+;;}}}
+;;{{{ Branches
+
+;; Define_expands called by the machine independent part of the compiler
+;; to allocate a new comparison register
+
+(define_expand "cbranchsi4"
+ [(set (reg:CC 16)
+ (compare:CC (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))
+ (set (pc)
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(reg:CC 16) (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ ""
+)
+
+
+;; Actual branches. We must allow for the (label_ref) and the (pc) to be
+;; swapped. If they are swapped, it reverses the sense of the branch.
+
+;; This pattern matches the (branch-if-true) branches generated above.
+;; It generates two different instruction sequences depending upon how
+;; far away the destination is.
+
+;; The calculation for the instruction length is derived as follows:
+;; The branch instruction has a 9-bit signed displacement so we have
+;; this inequality for the displacement:
+;;
+;; -256 <= pc < 256
+;; or
+;; -256 + 256 <= pc + 256 < 256 + 256
+;; i.e.
+;; 0 <= pc + 256 < 512
+;;
+;; if we consider the displacement as an unsigned value, then negative
+;; displacements become very large positive displacements, and the
+;; inequality becomes:
+;;
+;; pc + 256 < 512
+;;
+;; In order to allow for the fact that the real branch instruction works
+;; from pc + 2, we increase the offset to 258.
+;;
+;; Note - we do not have to worry about whether the branch is delayed or
+;; not, as branch shortening happens after delay slot reorganization.
+
+(define_insn "*branch_true"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(reg:CC 16)
+ (const_int 0)])
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ "*
+ {
+ if (get_attr_length (insn) == 2)
+ return \"b%b0%#\\t%l1\";
+ else
+ {
+ static char buffer [100];
+ const char * tmp_reg;
+ const char * ldi_insn;
+
+ tmp_reg = reg_names [COMPILER_SCRATCH_REGISTER];
+
+ ldi_insn = TARGET_SMALL_MODEL ? \"ldi:20\" : \"ldi:32\";
+
+ /* The code produced here is, for say the EQ case:
+
+ Bne 1f
+ LDI <label>, r0
+ JMP r0
+ 1: */
+
+ sprintf (buffer,
+ \"b%%B0\\t1f\\t;\\n\\t%s\\t%%l1, %s\\t;\\n\\tjmp%%#\\t@%s\\t;\\n1:\",
+ ldi_insn, tmp_reg, tmp_reg);
+
+ return buffer;
+ }
+ }"
+ [(set (attr "length") (if_then_else
+ (ltu
+ (plus
+ (minus
+ (match_dup 1)
+ (pc))
+ (const_int 254))
+ (const_int 506))
+ (const_int 2)
+ (if_then_else (eq_attr "size" "small")
+ (const_int 8)
+ (const_int 10))))
+ (set_attr "delay_type" "delayed")]
+)
+
+
+;; This pattern is a duplicate of the previous one, except that the
+;; branch occurs if the test is false, so the %B operator is used.
+(define_insn "*branch_false"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(reg:CC 16)
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 1 "" ""))))]
+ ""
+ "*
+ {
+ if (get_attr_length (insn) == 2)
+ return \"b%B0%#\\t%l1 \";
+ else
+ {
+ static char buffer [100];
+ const char * tmp_reg;
+ const char * ldi_insn;
+
+ tmp_reg = reg_names [COMPILER_SCRATCH_REGISTER];
+
+ ldi_insn = TARGET_SMALL_MODEL ? \"ldi:20\" : \"ldi:32\";
+
+ sprintf (buffer,
+ \"b%%b0\\t1f\\t;\\n\\t%s\\t%%l1, %s\\t;\\n\\tjmp%%#\\t@%s\\t;\\n1:\",
+ ldi_insn, tmp_reg, tmp_reg);
+
+ return buffer;
+ }
+ }"
+ [(set (attr "length") (if_then_else (ltu (plus (minus (match_dup 1) (pc))
+ (const_int 254))
+ (const_int 506))
+ (const_int 2)
+ (if_then_else (eq_attr "size" "small")
+ (const_int 8)
+ (const_int 10))))
+ (set_attr "delay_type" "delayed")]
+)
+
+;;}}}
+;;{{{ Calls & Jumps
+
+;; Subroutine call instruction returning no value. Operand 0 is the function
+;; to call; operand 1 is the number of bytes of arguments pushed (in mode
+;; `SImode', except it is normally a `const_int'); operand 2 is the number of
+;; registers used as operands.
+
+(define_insn "call"
+ [(call (match_operand 0 "call_operand" "Qm")
+ (match_operand 1 "" "g"))
+ (clobber (reg:SI 17))]
+ ""
+ "call%#\\t%0"
+ [(set_attr "delay_type" "delayed")]
+)
+
+;; Subroutine call instruction returning a value. Operand 0 is the hard
+;; register in which the value is returned. There are three more operands, the
+;; same as the three operands of the `call' instruction (but with numbers
+;; increased by one).
+
+;; Subroutines that return `BLKmode' objects use the `call' insn.
+
+(define_insn "call_value"
+ [(set (match_operand 0 "register_operand" "=r")
+ (call (match_operand 1 "call_operand" "Qm")
+ (match_operand 2 "" "g")))
+ (clobber (reg:SI 17))]
+ ""
+ "call%#\\t%1"
+ [(set_attr "delay_type" "delayed")]
+)
+
+;; Normal unconditional jump.
+;; For a description of the computation of the length
+;; attribute see the branch patterns above.
+;;
+;; Although this instruction really clobbers r0, flow
+;; relies on jump being simplejump_p in several places
+;; and as r0 is fixed, this doesn't change anything
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+ {
+ if (get_attr_length (insn) == 2)
+ return \"bra%#\\t%0\";
+ else
+ {
+ static char buffer [100];
+ const char * tmp_reg;
+ const char * ldi_insn;
+
+ tmp_reg = reg_names [COMPILER_SCRATCH_REGISTER];
+
+ ldi_insn = TARGET_SMALL_MODEL ? \"ldi:20\" : \"ldi:32\";
+
+ sprintf (buffer, \"%s\\t%%0, %s\\t;\\n\\tjmp%%#\\t@%s\\t;\",
+ ldi_insn, tmp_reg, tmp_reg);
+
+ return buffer;
+ }
+ }"
+ [(set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 254))
+ (const_int 506))
+ (const_int 2)
+ (if_then_else (eq_attr "size" "small")
+ (const_int 6)
+ (const_int 8))))
+ (set_attr "delay_type" "delayed")]
+)
+
+;; Indirect jump through a register
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "nonimmediate_operand" "r"))]
+ "GET_CODE (operands[0]) != MEM || GET_CODE (XEXP (operands[0], 0)) != PLUS"
+ "jmp%#\\t@%0"
+ [(set_attr "delay_type" "delayed")]
+)
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jmp%#\\t@%0"
+ [(set_attr "delay_type" "delayed")]
+)
+
+;;}}}
+;;{{{ Function Prologues and Epilogues
+
+;; Called after register allocation to add any instructions needed for the
+;; prologue. Using a prologue insn is favored compared to putting all of the
+;; instructions in output_function_prologue(), since it allows the scheduler
+;; to intermix instructions with the saves of the caller saved registers. In
+;; some cases, it might be necessary to emit a barrier instruction as the last
+;; insn to prevent such scheduling.
+(define_expand "prologue"
+ [(clobber (const_int 0))]
+ ""
+ "{
+ fr30_expand_prologue ();
+ DONE;
+ }"
+)
+
+;; Called after register allocation to add any instructions needed for the
+;; epilogue. Using an epilogue insn is favored compared to putting all of the
+;; instructions in output_function_epilogue(), since it allows the scheduler
+;; to intermix instructions with the restores of the caller saved registers.
+;; In some cases, it might be necessary to emit a barrier instruction as the
+;; first insn to prevent such scheduling.
+(define_expand "epilogue"
+ [(return)]
+ ""
+ "{
+ fr30_expand_epilogue ();
+ DONE;
+ }"
+)
+
+(define_insn "return_from_func"
+ [(return)
+ (use (reg:SI 17))]
+ "reload_completed"
+ "ret%#"
+ [(set_attr "delay_type" "delayed")]
+)
+
+(define_insn "leave_func"
+ [(set (reg:SI 15) (plus:SI (reg:SI 14) (const_int 4)))
+ (set (reg:SI 14) (mem:SI (minus:SI (reg:SI 15) (const_int 4))))]
+ "reload_completed"
+ "leave"
+)
+
+(define_expand "enter_func"
+ [(parallel
+ [(set (mem:SI (minus:SI (match_dup 1)
+ (const_int 4)))
+ (match_dup 2))
+ (set (match_dup 2)
+ (minus:SI (match_dup 1)
+ (const_int 4)))
+ (set (match_dup 1)
+ (minus:SI (match_dup 1)
+ (match_operand:SI 0 "immediate_operand")))]
+ )]
+ ""
+{
+ operands[1] = stack_pointer_rtx;
+ operands[2] = hard_frame_pointer_rtx;
+})
+
+(define_insn "*enter_func"
+ [(set (mem:SI (minus:SI (reg:SI 15)
+ (const_int 4)))
+ (reg:SI 14))
+ (set (reg:SI 14)
+ (minus:SI (reg:SI 15)
+ (const_int 4)))
+ (set (reg:SI 15)
+ (minus:SI (reg:SI 15)
+ (match_operand 0 "immediate_operand" "i")))]
+ "reload_completed"
+ "enter #%0"
+ [(set_attr "delay_type" "other")]
+)
+
+;;}}}
+;;{{{ Miscellaneous
+
+;; No operation, needed in case the user uses -g but not -O.
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+)
+
+;; Pseudo instruction that prevents the scheduler from moving code above this
+;; point.
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")]
+)
+;;}}}
+
+;; Local Variables:
+;; mode: md
+;; folded-file: t
+;; End: